diff options
Diffstat (limited to 'drivers/net/ethernet')
587 files changed, 24266 insertions, 9983 deletions
diff --git a/drivers/net/ethernet/3com/3c509.c b/drivers/net/ethernet/3com/3c509.c index 96cc5fc36eb5..87c906e744fb 100644 --- a/drivers/net/ethernet/3com/3c509.c +++ b/drivers/net/ethernet/3com/3c509.c @@ -302,7 +302,6 @@ static int el3_isa_match(struct device *pdev, unsigned int ndev) return -ENOMEM; SET_NETDEV_DEV(dev, pdev); - netdev_boot_setup_check(dev); if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509-isa")) { free_netdev(dev); @@ -421,7 +420,6 @@ static int el3_pnp_probe(struct pnp_dev *pdev, const struct pnp_device_id *id) return -ENOMEM; } SET_NETDEV_DEV(dev, &pdev->dev); - netdev_boot_setup_check(dev); el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_PNP); pnp_set_drvdata(pdev, dev); @@ -514,7 +512,9 @@ static int el3_common_init(struct net_device *dev) { struct el3_private *lp = netdev_priv(dev); int err; - const char *if_names[] = {"10baseT", "AUI", "undefined", "BNC"}; + static const char * const if_names[] = { + "10baseT", "AUI", "undefined", "BNC" + }; spin_lock_init(&lp->lock); @@ -588,7 +588,6 @@ static int el3_eisa_probe(struct device *device) } SET_NETDEV_DEV(dev, device); - netdev_boot_setup_check(dev); el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_EISA); eisa_set_drvdata (edev, dev); diff --git a/drivers/net/ethernet/3com/3c515.c b/drivers/net/ethernet/3com/3c515.c index 47b4215bb93b..8d90fed5d33e 100644 --- a/drivers/net/ethernet/3com/3c515.c +++ b/drivers/net/ethernet/3com/3c515.c @@ -407,7 +407,7 @@ MODULE_PARM_DESC(max_interrupt_work, "3c515 maximum events handled per interrupt /* we will need locking (and refcounting) if we ever use it for more */ static LIST_HEAD(root_corkscrew_dev); -int init_module(void) +static int corkscrew_init_module(void) { int found = 0; if (debug >= 0) @@ -416,6 +416,7 @@ int init_module(void) found++; return found ? 0 : -ENODEV; } +module_init(corkscrew_init_module); #else struct net_device *tc515_probe(int unit) diff --git a/drivers/net/ethernet/3com/3c574_cs.c b/drivers/net/ethernet/3com/3c574_cs.c index f66e7fb9a2bb..dd4d3c48b98d 100644 --- a/drivers/net/ethernet/3com/3c574_cs.c +++ b/drivers/net/ethernet/3com/3c574_cs.c @@ -252,7 +252,7 @@ static const struct net_device_ops el3_netdev_ops = { .ndo_start_xmit = el3_start_xmit, .ndo_tx_timeout = el3_tx_timeout, .ndo_get_stats = el3_get_stats, - .ndo_do_ioctl = el3_ioctl, + .ndo_eth_ioctl = el3_ioctl, .ndo_set_rx_mode = set_multicast_list, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 7d7d3ffe25c3..17c16333a412 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -1052,7 +1052,7 @@ static const struct net_device_ops boomrang_netdev_ops = { .ndo_tx_timeout = vortex_tx_timeout, .ndo_get_stats = vortex_get_stats, #ifdef CONFIG_PCI - .ndo_do_ioctl = vortex_ioctl, + .ndo_eth_ioctl = vortex_ioctl, #endif .ndo_set_rx_mode = set_rx_mode, .ndo_set_mac_address = eth_mac_addr, @@ -1069,7 +1069,7 @@ static const struct net_device_ops vortex_netdev_ops = { .ndo_tx_timeout = vortex_tx_timeout, .ndo_get_stats = vortex_get_stats, #ifdef CONFIG_PCI - .ndo_do_ioctl = vortex_ioctl, + .ndo_eth_ioctl = vortex_ioctl, #endif .ndo_set_rx_mode = set_rx_mode, .ndo_set_mac_address = eth_mac_addr, diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig index a52a3740f0c9..706bd59bf645 100644 --- a/drivers/net/ethernet/3com/Kconfig +++ b/drivers/net/ethernet/3com/Kconfig @@ -34,6 +34,7 @@ config EL3 config 3C515 tristate "3c515 ISA \"Fast EtherLink\"" depends on ISA && ISA_DMA_API && !PPC32 + select NETDEV_LEGACY_INIT help If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet network card, say Y here. diff --git a/drivers/net/ethernet/8390/Kconfig b/drivers/net/ethernet/8390/Kconfig index 9f4b302fd2ce..a4130e643342 100644 --- a/drivers/net/ethernet/8390/Kconfig +++ b/drivers/net/ethernet/8390/Kconfig @@ -102,6 +102,7 @@ config MCF8390 config NE2000 tristate "NE2000/NE1000 support" depends on (ISA || (Q40 && m) || MACH_TX49XX || ATARI_ETHERNEC) + select NETDEV_LEGACY_INIT if ISA select CRC32 help If you have a network (Ethernet) card of this type, say Y here. @@ -169,6 +170,7 @@ config STNIC config ULTRA tristate "SMC Ultra support" depends on ISA + select NETDEV_LEGACY_INIT select CRC32 help If you have a network (Ethernet) card of this type, say Y here. @@ -186,6 +188,7 @@ config ULTRA config WD80x3 tristate "WD80*3 support" depends on ISA + select NETDEV_LEGACY_INIT select CRC32 help If you have a network (Ethernet) card of this type, say Y here. diff --git a/drivers/net/ethernet/8390/apne.c b/drivers/net/ethernet/8390/apne.c index fe6c834c422e..da1ae37a9d73 100644 --- a/drivers/net/ethernet/8390/apne.c +++ b/drivers/net/ethernet/8390/apne.c @@ -75,7 +75,6 @@ #define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ -struct net_device * __init apne_probe(int unit); static int apne_probe1(struct net_device *dev, int ioaddr); static void apne_reset_8390(struct net_device *dev); @@ -120,7 +119,7 @@ static u32 apne_msg_enable; module_param_named(msg_enable, apne_msg_enable, uint, 0444); MODULE_PARM_DESC(msg_enable, "Debug message level (see linux/netdevice.h for bitmap)"); -struct net_device * __init apne_probe(int unit) +static struct net_device * __init apne_probe(void) { struct net_device *dev; struct ei_device *ei_local; @@ -150,10 +149,6 @@ struct net_device * __init apne_probe(int unit) dev = alloc_ei_netdev(); if (!dev) return ERR_PTR(-ENOMEM); - if (unit >= 0) { - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - } ei_local = netdev_priv(dev); ei_local->msg_enable = apne_msg_enable; @@ -554,12 +549,11 @@ static irqreturn_t apne_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -#ifdef MODULE static struct net_device *apne_dev; static int __init apne_module_init(void) { - apne_dev = apne_probe(-1); + apne_dev = apne_probe(); return PTR_ERR_OR_ZERO(apne_dev); } @@ -579,7 +573,6 @@ static void __exit apne_module_exit(void) } module_init(apne_module_init); module_exit(apne_module_exit); -#endif static int init_pcmcia(void) { diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c index 172947fc051a..6c6bdd5913ec 100644 --- a/drivers/net/ethernet/8390/ax88796.c +++ b/drivers/net/ethernet/8390/ax88796.c @@ -101,6 +101,13 @@ static inline struct ax_device *to_ax_dev(struct net_device *dev) return (struct ax_device *)(ei_local + 1); } +void ax_NS8390_reinit(struct net_device *dev) +{ + ax_NS8390_init(dev, 1); +} + +EXPORT_SYMBOL_GPL(ax_NS8390_reinit); + /* * ax_initial_check * @@ -635,7 +642,7 @@ static void ax_eeprom_register_write(struct eeprom_93cx6 *eeprom) static const struct net_device_ops ax_netdev_ops = { .ndo_open = ax_open, .ndo_stop = ax_close, - .ndo_do_ioctl = ax_ioctl, + .ndo_eth_ioctl = ax_ioctl, .ndo_start_xmit = ax_ei_start_xmit, .ndo_tx_timeout = ax_ei_tx_timeout, diff --git a/drivers/net/ethernet/8390/axnet_cs.c b/drivers/net/ethernet/8390/axnet_cs.c index 8c321dfc7b3b..3c370e686ec3 100644 --- a/drivers/net/ethernet/8390/axnet_cs.c +++ b/drivers/net/ethernet/8390/axnet_cs.c @@ -128,7 +128,7 @@ static inline struct axnet_dev *PRIV(struct net_device *dev) static const struct net_device_ops axnet_netdev_ops = { .ndo_open = axnet_open, .ndo_stop = axnet_close, - .ndo_do_ioctl = axnet_ioctl, + .ndo_eth_ioctl = axnet_ioctl, .ndo_start_xmit = axnet_start_xmit, .ndo_tx_timeout = axnet_tx_timeout, .ndo_get_stats = get_stats, diff --git a/drivers/net/ethernet/8390/ne.c b/drivers/net/ethernet/8390/ne.c index e9756d0ea5b8..53660bc8d6ff 100644 --- a/drivers/net/ethernet/8390/ne.c +++ b/drivers/net/ethernet/8390/ne.c @@ -923,7 +923,7 @@ static void __init ne_add_devices(void) } #ifdef MODULE -int __init init_module(void) +static int __init ne_init(void) { int retval; ne_add_devices(); @@ -940,6 +940,7 @@ int __init init_module(void) ne_loop_rm_unreg(0); return retval; } +module_init(ne_init); #else /* MODULE */ static int __init ne_init(void) { @@ -951,6 +952,7 @@ static int __init ne_init(void) } module_init(ne_init); +#ifdef CONFIG_NETDEV_LEGACY_INIT struct net_device * __init ne_probe(int unit) { int this_dev; @@ -991,6 +993,7 @@ struct net_device * __init ne_probe(int unit) return ERR_PTR(-ENODEV); } +#endif #endif /* MODULE */ static void __exit ne_exit(void) diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c index cac036706382..96ad72abd373 100644 --- a/drivers/net/ethernet/8390/pcnet_cs.c +++ b/drivers/net/ethernet/8390/pcnet_cs.c @@ -223,7 +223,7 @@ static const struct net_device_ops pcnet_netdev_ops = { .ndo_set_config = set_config, .ndo_start_xmit = ei_start_xmit, .ndo_get_stats = ei_get_stats, - .ndo_do_ioctl = ei_ioctl, + .ndo_eth_ioctl = ei_ioctl, .ndo_set_rx_mode = ei_set_multicast_list, .ndo_tx_timeout = ei_tx_timeout, .ndo_set_mac_address = eth_mac_addr, diff --git a/drivers/net/ethernet/8390/smc-ultra.c b/drivers/net/ethernet/8390/smc-ultra.c index 1d8ed7357b7f..0890fa493f70 100644 --- a/drivers/net/ethernet/8390/smc-ultra.c +++ b/drivers/net/ethernet/8390/smc-ultra.c @@ -522,7 +522,6 @@ static void ultra_pio_input(struct net_device *dev, int count, /* We know skbuffs are padded to at least word alignment. */ insw(ioaddr + IOPD, buf, (count+1)>>1); } - static void ultra_pio_output(struct net_device *dev, int count, const unsigned char *buf, const int start_page) { @@ -572,8 +571,7 @@ MODULE_LICENSE("GPL"); /* This is set up so that only a single autoprobe takes place per call. ISA device autoprobes on a running machine are not recommended. */ -int __init -init_module(void) +static int __init ultra_init_module(void) { struct net_device *dev; int this_dev, found = 0; @@ -600,6 +598,7 @@ init_module(void) return 0; return -ENXIO; } +module_init(ultra_init_module); static void cleanup_card(struct net_device *dev) { @@ -613,8 +612,7 @@ static void cleanup_card(struct net_device *dev) iounmap(ei_status.mem); } -void __exit -cleanup_module(void) +static void __exit ultra_cleanup_module(void) { int this_dev; @@ -627,4 +625,5 @@ cleanup_module(void) } } } +module_exit(ultra_cleanup_module); #endif /* MODULE */ diff --git a/drivers/net/ethernet/8390/wd.c b/drivers/net/ethernet/8390/wd.c index c834123560f1..263a942d81fa 100644 --- a/drivers/net/ethernet/8390/wd.c +++ b/drivers/net/ethernet/8390/wd.c @@ -519,7 +519,7 @@ MODULE_LICENSE("GPL"); /* This is set up so that only a single autoprobe takes place per call. ISA device autoprobes on a running machine are not recommended. */ -int __init init_module(void) +static int __init wd_init_module(void) { struct net_device *dev; int this_dev, found = 0; @@ -548,6 +548,7 @@ int __init init_module(void) return 0; return -ENXIO; } +module_init(wd_init_module); static void cleanup_card(struct net_device *dev) { @@ -556,8 +557,7 @@ static void cleanup_card(struct net_device *dev) iounmap(ei_status.mem); } -void __exit -cleanup_module(void) +static void __exit wd_cleanup_module(void) { int this_dev; @@ -570,4 +570,5 @@ cleanup_module(void) } } } +module_exit(wd_cleanup_module); #endif /* MODULE */ diff --git a/drivers/net/ethernet/8390/xsurf100.c b/drivers/net/ethernet/8390/xsurf100.c index e2c963821ffe..fe7a74707aa4 100644 --- a/drivers/net/ethernet/8390/xsurf100.c +++ b/drivers/net/ethernet/8390/xsurf100.c @@ -22,8 +22,6 @@ #define XS100_8390_DATA_WRITE32_BASE 0x0C80 #define XS100_8390_DATA_AREA_SIZE 0x80 -#define __NS8390_init ax_NS8390_init - /* force unsigned long back to 'void __iomem *' */ #define ax_convert_addr(_a) ((void __force __iomem *)(_a)) @@ -42,10 +40,7 @@ /* Ensure we have our RCR base value */ #define AX88796_PLATFORM -static unsigned char version[] = - "ax88796.c: Copyright 2005,2007 Simtec Electronics\n"; - -#include "lib8390.c" +#include "8390.h" /* from ne.c */ #define NE_CMD EI_SHIFT(0x00) @@ -232,7 +227,7 @@ static void xs100_block_output(struct net_device *dev, int count, if (jiffies - dma_start > 2 * HZ / 100) { /* 20ms */ netdev_warn(dev, "timeout waiting for Tx RDC.\n"); ei_local->reset_8390(dev); - ax_NS8390_init(dev, 1); + ax_NS8390_reinit(dev); break; } } diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig index 1cdff1dca790..d796684ec9ca 100644 --- a/drivers/net/ethernet/Kconfig +++ b/drivers/net/ethernet/Kconfig @@ -118,6 +118,7 @@ config LANTIQ_XRX200 Support for the PMAC of the Gigabit switch (GSWIP) inside the Lantiq / Intel VRX200 VDSL SoC +source "drivers/net/ethernet/litex/Kconfig" source "drivers/net/ethernet/marvell/Kconfig" source "drivers/net/ethernet/mediatek/Kconfig" source "drivers/net/ethernet/mellanox/Kconfig" diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile index cb3f9084a21b..aaa5078cd7d1 100644 --- a/drivers/net/ethernet/Makefile +++ b/drivers/net/ethernet/Makefile @@ -51,6 +51,7 @@ obj-$(CONFIG_JME) += jme.o obj-$(CONFIG_KORINA) += korina.o obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o obj-$(CONFIG_LANTIQ_XRX200) += lantiq_xrx200.o +obj-$(CONFIG_NET_VENDOR_LITEX) += litex/ obj-$(CONFIG_NET_VENDOR_MARVELL) += marvell/ obj-$(CONFIG_NET_VENDOR_MEDIATEK) += mediatek/ obj-$(CONFIG_NET_VENDOR_MELLANOX) += mellanox/ diff --git a/drivers/net/ethernet/actions/Kconfig b/drivers/net/ethernet/actions/Kconfig index ccad6a3f4d6f..f630cac2ab6c 100644 --- a/drivers/net/ethernet/actions/Kconfig +++ b/drivers/net/ethernet/actions/Kconfig @@ -2,8 +2,8 @@ config NET_VENDOR_ACTIONS bool "Actions Semi devices" - default y - depends on ARCH_ACTIONS + depends on ARCH_ACTIONS || COMPILE_TEST + default ARCH_ACTIONS help If you have a network (Ethernet) card belonging to this class, say Y. diff --git a/drivers/net/ethernet/actions/owl-emac.c b/drivers/net/ethernet/actions/owl-emac.c index b8e771c2bc40..c4ecf4fcadf8 100644 --- a/drivers/net/ethernet/actions/owl-emac.c +++ b/drivers/net/ethernet/actions/owl-emac.c @@ -1179,8 +1179,8 @@ static int owl_emac_ndo_set_mac_addr(struct net_device *netdev, void *addr) return owl_emac_setup_frame_xmit(netdev_priv(netdev)); } -static int owl_emac_ndo_do_ioctl(struct net_device *netdev, - struct ifreq *req, int cmd) +static int owl_emac_ndo_eth_ioctl(struct net_device *netdev, + struct ifreq *req, int cmd) { if (!netif_running(netdev)) return -EINVAL; @@ -1224,7 +1224,7 @@ static const struct net_device_ops owl_emac_netdev_ops = { .ndo_set_rx_mode = owl_emac_ndo_set_rx_mode, .ndo_set_mac_address = owl_emac_ndo_set_mac_addr, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = owl_emac_ndo_do_ioctl, + .ndo_eth_ioctl = owl_emac_ndo_eth_ioctl, .ndo_tx_timeout = owl_emac_ndo_tx_timeout, .ndo_get_stats = owl_emac_ndo_get_stats, }; diff --git a/drivers/net/ethernet/adaptec/starfire.c b/drivers/net/ethernet/adaptec/starfire.c index 7965e5e3c985..e0f6cc910bd2 100644 --- a/drivers/net/ethernet/adaptec/starfire.c +++ b/drivers/net/ethernet/adaptec/starfire.c @@ -625,7 +625,7 @@ static const struct net_device_ops netdev_ops = { .ndo_tx_timeout = tx_timeout, .ndo_get_stats = get_stats, .ndo_set_rx_mode = set_rx_mode, - .ndo_do_ioctl = netdev_ioctl, + .ndo_eth_ioctl = netdev_ioctl, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, #ifdef VLAN_SUPPORT diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c index 41f8821f792d..920633161174 100644 --- a/drivers/net/ethernet/agere/et131x.c +++ b/drivers/net/ethernet/agere/et131x.c @@ -3882,7 +3882,7 @@ static const struct net_device_ops et131x_netdev_ops = { .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_get_stats = et131x_stats, - .ndo_do_ioctl = phy_do_ioctl, + .ndo_eth_ioctl = phy_do_ioctl, }; static int et131x_pci_setup(struct pci_dev *pdev, diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index f99ae317c188..037baea1c738 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -774,7 +774,7 @@ static const struct net_device_ops emac_netdev_ops = { .ndo_start_xmit = emac_start_xmit, .ndo_tx_timeout = emac_timeout, .ndo_set_rx_mode = emac_set_rx_mode, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = emac_set_mac_address, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index 27dae632efcb..13e745cf3781 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -357,7 +357,9 @@ static int ena_get_link_ksettings(struct net_device *netdev, } static int ena_get_coalesce(struct net_device *net_dev, - struct ethtool_coalesce *coalesce) + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct ena_adapter *adapter = netdev_priv(net_dev); struct ena_com_dev *ena_dev = adapter->ena_dev; @@ -402,7 +404,9 @@ static void ena_update_rx_rings_nonadaptive_intr_moderation(struct ena_adapter * } static int ena_set_coalesce(struct net_device *net_dev, - struct ethtool_coalesce *coalesce) + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct ena_adapter *adapter = netdev_priv(net_dev); struct ena_com_dev *ena_dev = adapter->ena_dev; diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig index d0b0609bbe23..4786f0504691 100644 --- a/drivers/net/ethernet/amd/Kconfig +++ b/drivers/net/ethernet/amd/Kconfig @@ -46,6 +46,7 @@ config AMD8111_ETH config LANCE tristate "AMD LANCE and PCnet (AT1500 and NE2100) support" depends on ISA && ISA_DMA_API && !ARM && !PPC32 + select NETDEV_LEGACY_INIT help If you have a network (Ethernet) card of this type, say Y here. Some LinkSys cards are of this type. @@ -132,6 +133,7 @@ config PCMCIA_NMCLAN config NI65 tristate "NI6510 support" depends on ISA && ISA_DMA_API && !ARM && !PPC32 + select NETDEV_LEGACY_INIT help If you have a network (Ethernet) card of this type, say Y here. @@ -168,11 +170,11 @@ config AMD_XGBE tristate "AMD 10GbE Ethernet driver" depends on ((OF_NET && OF_ADDRESS) || ACPI || PCI) && HAS_IOMEM depends on X86 || ARM64 || COMPILE_TEST + depends on PTP_1588_CLOCK_OPTIONAL select BITREVERSE select CRC32 select PHYLIB select AMD_XGBE_HAVE_ECC if X86 - imply PTP_1588_CLOCK help This driver supports the AMD 10GbE Ethernet device found on an AMD SoC. diff --git a/drivers/net/ethernet/amd/amd8111e.c b/drivers/net/ethernet/amd/amd8111e.c index 9cac5aa75a73..92e4246dc359 100644 --- a/drivers/net/ethernet/amd/amd8111e.c +++ b/drivers/net/ethernet/amd/amd8111e.c @@ -1729,7 +1729,7 @@ static const struct net_device_ops amd8111e_netdev_ops = { .ndo_set_rx_mode = amd8111e_set_multicast_list, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = amd8111e_set_mac_address, - .ndo_do_ioctl = amd8111e_ioctl, + .ndo_eth_ioctl = amd8111e_ioctl, .ndo_change_mtu = amd8111e_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = amd8111e_poll, diff --git a/drivers/net/ethernet/amd/atarilance.c b/drivers/net/ethernet/amd/atarilance.c index 36f54d13a2eb..9d2f49fd945e 100644 --- a/drivers/net/ethernet/amd/atarilance.c +++ b/drivers/net/ethernet/amd/atarilance.c @@ -367,7 +367,7 @@ static void *slow_memcpy( void *dst, const void *src, size_t len ) } -struct net_device * __init atarilance_probe(int unit) +struct net_device * __init atarilance_probe(void) { int i; static int found; @@ -382,10 +382,6 @@ struct net_device * __init atarilance_probe(int unit) dev = alloc_etherdev(sizeof(struct lance_private)); if (!dev) return ERR_PTR(-ENOMEM); - if (unit >= 0) { - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - } for( i = 0; i < N_LANCE_ADDR; ++i ) { if (lance_probe1( dev, &lance_addr_list[i] )) { @@ -1137,13 +1133,11 @@ static int lance_set_mac_address( struct net_device *dev, void *addr ) return 0; } - -#ifdef MODULE static struct net_device *atarilance_dev; static int __init atarilance_module_init(void) { - atarilance_dev = atarilance_probe(-1); + atarilance_dev = atarilance_probe(); return PTR_ERR_OR_ZERO(atarilance_dev); } @@ -1155,4 +1149,3 @@ static void __exit atarilance_module_exit(void) } module_init(atarilance_module_init); module_exit(atarilance_module_exit); -#endif /* MODULE */ diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index 19e195420e24..9c1636222b99 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -1051,7 +1051,7 @@ static const struct net_device_ops au1000_netdev_ops = { .ndo_stop = au1000_close, .ndo_start_xmit = au1000_tx, .ndo_set_rx_mode = au1000_multicast_list, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_tx_timeout = au1000_tx_timeout, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c index 2178e6b89dbd..945bf1d87507 100644 --- a/drivers/net/ethernet/amd/lance.c +++ b/drivers/net/ethernet/amd/lance.c @@ -327,7 +327,7 @@ MODULE_PARM_DESC(dma, "LANCE/PCnet ISA DMA channel (ignored for some devices)"); MODULE_PARM_DESC(irq, "LANCE/PCnet IRQ number (ignored for some devices)"); MODULE_PARM_DESC(lance_debug, "LANCE/PCnet debug level (0-7)"); -int __init init_module(void) +static int __init lance_init_module(void) { struct net_device *dev; int this_dev, found = 0; @@ -356,6 +356,7 @@ int __init init_module(void) return 0; return -ENXIO; } +module_init(lance_init_module); static void cleanup_card(struct net_device *dev) { @@ -368,7 +369,7 @@ static void cleanup_card(struct net_device *dev) kfree(lp); } -void __exit cleanup_module(void) +static void __exit lance_cleanup_module(void) { int this_dev; @@ -381,6 +382,7 @@ void __exit cleanup_module(void) } } } +module_exit(lance_cleanup_module); #endif /* MODULE */ MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/amd/mvme147.c b/drivers/net/ethernet/amd/mvme147.c index 3f2e4cdd0b83..da97fccea9ea 100644 --- a/drivers/net/ethernet/amd/mvme147.c +++ b/drivers/net/ethernet/amd/mvme147.c @@ -68,7 +68,7 @@ static const struct net_device_ops lance_netdev_ops = { }; /* Initialise the one and only on-board 7990 */ -struct net_device * __init mvme147lance_probe(int unit) +static struct net_device * __init mvme147lance_probe(void) { struct net_device *dev; static int called; @@ -86,9 +86,6 @@ struct net_device * __init mvme147lance_probe(int unit) if (!dev) return ERR_PTR(-ENOMEM); - if (unit >= 0) - sprintf(dev->name, "eth%d", unit); - /* Fill the dev fields */ dev->base_addr = (unsigned long)MVME147_LANCE_BASE; dev->netdev_ops = &lance_netdev_ops; @@ -179,22 +176,21 @@ static int m147lance_close(struct net_device *dev) return 0; } -#ifdef MODULE MODULE_LICENSE("GPL"); static struct net_device *dev_mvme147_lance; -int __init init_module(void) +static int __init m147lance_init(void) { - dev_mvme147_lance = mvme147lance_probe(-1); + dev_mvme147_lance = mvme147lance_probe(); return PTR_ERR_OR_ZERO(dev_mvme147_lance); } +module_init(m147lance_init); -void __exit cleanup_module(void) +static void __exit m147lance_exit(void) { struct m147lance_private *lp = netdev_priv(dev_mvme147_lance); unregister_netdev(dev_mvme147_lance); free_pages(lp->ram, 3); free_netdev(dev_mvme147_lance); } - -#endif /* MODULE */ +module_exit(m147lance_exit); diff --git a/drivers/net/ethernet/amd/ni65.c b/drivers/net/ethernet/amd/ni65.c index 5c1cfb0c4a42..b5df7ad5a83f 100644 --- a/drivers/net/ethernet/amd/ni65.c +++ b/drivers/net/ethernet/amd/ni65.c @@ -1230,18 +1230,20 @@ MODULE_PARM_DESC(irq, "ni6510 IRQ number (ignored for some cards)"); MODULE_PARM_DESC(io, "ni6510 I/O base address"); MODULE_PARM_DESC(dma, "ni6510 ISA DMA channel (ignored for some cards)"); -int __init init_module(void) +static int __init ni65_init_module(void) { dev_ni65 = ni65_probe(-1); return PTR_ERR_OR_ZERO(dev_ni65); } +module_init(ni65_init_module); -void __exit cleanup_module(void) +static void __exit ni65_cleanup_module(void) { unregister_netdev(dev_ni65); cleanup_card(dev_ni65); free_netdev(dev_ni65); } +module_exit(ni65_cleanup_module); #endif /* MODULE */ MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/amd/pcnet32.c b/drivers/net/ethernet/amd/pcnet32.c index 4100ab07e6b7..70d76fdb9f56 100644 --- a/drivers/net/ethernet/amd/pcnet32.c +++ b/drivers/net/ethernet/amd/pcnet32.c @@ -1572,7 +1572,7 @@ static const struct net_device_ops pcnet32_netdev_ops = { .ndo_tx_timeout = pcnet32_tx_timeout, .ndo_get_stats = pcnet32_get_stats, .ndo_set_rx_mode = pcnet32_set_multicast_list, - .ndo_do_ioctl = pcnet32_ioctl, + .ndo_eth_ioctl = pcnet32_ioctl, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/amd/sun3lance.c b/drivers/net/ethernet/amd/sun3lance.c index f8d7a9387a56..4a845bc071b2 100644 --- a/drivers/net/ethernet/amd/sun3lance.c +++ b/drivers/net/ethernet/amd/sun3lance.c @@ -245,7 +245,7 @@ static void set_multicast_list( struct net_device *dev ); /************************* End of Prototypes **************************/ -struct net_device * __init sun3lance_probe(int unit) +static struct net_device * __init sun3lance_probe(void) { struct net_device *dev; static int found; @@ -272,10 +272,6 @@ struct net_device * __init sun3lance_probe(int unit) dev = alloc_etherdev(sizeof(struct lance_private)); if (!dev) return ERR_PTR(-ENOMEM); - if (unit >= 0) { - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - } if (!lance_probe(dev)) goto out; @@ -924,17 +920,16 @@ static void set_multicast_list( struct net_device *dev ) } -#ifdef MODULE - static struct net_device *sun3lance_dev; -int __init init_module(void) +static int __init sun3lance_init(void) { - sun3lance_dev = sun3lance_probe(-1); + sun3lance_dev = sun3lance_probe(); return PTR_ERR_OR_ZERO(sun3lance_dev); } +module_init(sun3lance_init); -void __exit cleanup_module(void) +static void __exit sun3lance_cleanup(void) { unregister_netdev(sun3lance_dev); #ifdef CONFIG_SUN3 @@ -942,6 +937,4 @@ void __exit cleanup_module(void) #endif free_netdev(sun3lance_dev); } - -#endif /* MODULE */ - +module_exit(sun3lance_cleanup); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 4f714f874c4f..17a585adfb49 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -2284,7 +2284,7 @@ static const struct net_device_ops xgbe_netdev_ops = { .ndo_set_rx_mode = xgbe_set_rx_mode, .ndo_set_mac_address = xgbe_set_mac_address, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = xgbe_ioctl, + .ndo_eth_ioctl = xgbe_ioctl, .ndo_change_mtu = xgbe_change_mtu, .ndo_tx_timeout = xgbe_tx_timeout, .ndo_get_stats64 = xgbe_get_stats64, diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c index 61f39a0e04f9..bafc51c34e0b 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c @@ -428,7 +428,9 @@ static void xgbe_set_msglevel(struct net_device *netdev, u32 msglevel) } static int xgbe_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct xgbe_prv_data *pdata = netdev_priv(netdev); @@ -443,7 +445,9 @@ static int xgbe_get_coalesce(struct net_device *netdev, } static int xgbe_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct xgbe_prv_data *pdata = netdev_priv(netdev); struct xgbe_hw_if *hw_if = &pdata->hw_if; diff --git a/drivers/net/ethernet/apm/xgene-v2/main.c b/drivers/net/ethernet/apm/xgene-v2/main.c index 860c18fb7aae..80399c8980bd 100644 --- a/drivers/net/ethernet/apm/xgene-v2/main.c +++ b/drivers/net/ethernet/apm/xgene-v2/main.c @@ -677,11 +677,13 @@ static int xge_probe(struct platform_device *pdev) ret = register_netdev(ndev); if (ret) { netdev_err(ndev, "Failed to register netdev\n"); - goto err; + goto err_mdio_remove; } return 0; +err_mdio_remove: + xge_mdio_remove(ndev); err: free_netdev(ndev); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index de2a9348bc3f..a9ef0544e30f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -547,7 +547,9 @@ static int aq_ethtool_set_rxnfc(struct net_device *ndev, } static int aq_ethtool_get_coalesce(struct net_device *ndev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct aq_nic_s *aq_nic = netdev_priv(ndev); struct aq_nic_cfg_s *cfg; @@ -571,7 +573,9 @@ static int aq_ethtool_get_coalesce(struct net_device *ndev, } static int aq_ethtool_set_coalesce(struct net_device *ndev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct aq_nic_s *aq_nic = netdev_priv(ndev); struct aq_nic_cfg_s *cfg; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index 4af0cd9530de..e22935ce9573 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -421,7 +421,7 @@ static const struct net_device_ops aq_ndev_ops = { .ndo_change_mtu = aq_ndev_change_mtu, .ndo_set_mac_address = aq_ndev_set_mac_address, .ndo_set_features = aq_ndev_set_features, - .ndo_do_ioctl = aq_ndev_ioctl, + .ndo_eth_ioctl = aq_ndev_ioctl, .ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid, .ndo_setup_tc = aq_ndo_setup_tc, diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index 59253846e885..dee9ff74d6d6 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -119,16 +119,10 @@ static int aq_pci_func_init(struct pci_dev *pdev) { int err; - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); - if (!err) - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); + if (err) + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); - if (!err) - err = pci_set_consistent_dma_mask(pdev, - DMA_BIT_MASK(32)); - } - if (err != 0) { err = -ENOSR; goto err_exit; } @@ -417,6 +411,9 @@ static int atl_resume_common(struct device *dev, bool deep) pci_restore_state(pdev); if (deep) { + /* Reinitialize Nic/Vecs objects */ + aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol); + ret = aq_nic_init(nic); if (ret) goto err_exit; diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index 67b8113a2b53..38c288ec9059 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -844,7 +844,7 @@ static const struct net_device_ops arc_emac_netdev_ops = { .ndo_set_mac_address = arc_emac_set_address, .ndo_get_stats = arc_emac_stats, .ndo_set_rx_mode = arc_emac_set_rx_mode, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = arc_emac_poll_controller, #endif diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c index 1ba81b1eb6fd..02ae98aabf91 100644 --- a/drivers/net/ethernet/atheros/ag71xx.c +++ b/drivers/net/ethernet/atheros/ag71xx.c @@ -1851,7 +1851,7 @@ static const struct net_device_ops ag71xx_netdev_ops = { .ndo_open = ag71xx_open, .ndo_stop = ag71xx_stop, .ndo_start_xmit = ag71xx_hard_start_xmit, - .ndo_do_ioctl = phy_do_ioctl, + .ndo_eth_ioctl = phy_do_ioctl, .ndo_tx_timeout = ag71xx_tx_timeout, .ndo_change_mtu = ag71xx_change_mtu, .ndo_set_mac_address = eth_mac_addr, diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index 11ef1fbe7aee..4ea157efca86 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -1701,7 +1701,7 @@ static const struct net_device_ops alx_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = alx_set_mac_address, .ndo_change_mtu = alx_change_mtu, - .ndo_do_ioctl = alx_ioctl, + .ndo_eth_ioctl = alx_ioctl, .ndo_tx_timeout = alx_tx_timeout, .ndo_fix_features = alx_fix_features, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 1c6246a5dc22..3b51b172b317 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -2609,7 +2609,7 @@ static const struct net_device_ops atl1c_netdev_ops = { .ndo_change_mtu = atl1c_change_mtu, .ndo_fix_features = atl1c_fix_features, .ndo_set_features = atl1c_set_features, - .ndo_do_ioctl = atl1c_ioctl, + .ndo_eth_ioctl = atl1c_ioctl, .ndo_tx_timeout = atl1c_tx_timeout, .ndo_get_stats = atl1c_get_stats, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 2eb0a2ab69f6..753973ac922e 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -2247,7 +2247,7 @@ static const struct net_device_ops atl1e_netdev_ops = { .ndo_fix_features = atl1e_fix_features, .ndo_set_features = atl1e_set_features, .ndo_change_mtu = atl1e_change_mtu, - .ndo_do_ioctl = atl1e_ioctl, + .ndo_eth_ioctl = atl1e_ioctl, .ndo_tx_timeout = atl1e_tx_timeout, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = atl1e_netpoll, diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c index c67201a13cf5..68f6c0bbd945 100644 --- a/drivers/net/ethernet/atheros/atlx/atl1.c +++ b/drivers/net/ethernet/atheros/atlx/atl1.c @@ -2885,7 +2885,7 @@ static const struct net_device_ops atl1_netdev_ops = { .ndo_change_mtu = atl1_change_mtu, .ndo_fix_features = atlx_fix_features, .ndo_set_features = atlx_set_features, - .ndo_do_ioctl = atlx_ioctl, + .ndo_eth_ioctl = atlx_ioctl, .ndo_tx_timeout = atlx_tx_timeout, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = atl1_poll_controller, diff --git a/drivers/net/ethernet/atheros/atlx/atl2.c b/drivers/net/ethernet/atheros/atlx/atl2.c index 0cc0db04c27d..b69298ddb647 100644 --- a/drivers/net/ethernet/atheros/atlx/atl2.c +++ b/drivers/net/ethernet/atheros/atlx/atl2.c @@ -1293,7 +1293,7 @@ static const struct net_device_ops atl2_netdev_ops = { .ndo_change_mtu = atl2_change_mtu, .ndo_fix_features = atl2_fix_features, .ndo_set_features = atl2_set_features, - .ndo_do_ioctl = atl2_ioctl, + .ndo_eth_ioctl = atl2_ioctl, .ndo_tx_timeout = atl2_tx_timeout, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = atl2_poll_controller, diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index 1a02ca600b71..56e0fb07aec7 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -122,8 +122,8 @@ config SB1250_MAC config TIGON3 tristate "Broadcom Tigon3 support" depends on PCI + depends on PTP_1588_CLOCK_OPTIONAL select PHYLIB - imply PTP_1588_CLOCK help This driver supports Broadcom Tigon3 based gigabit Ethernet cards. @@ -140,7 +140,7 @@ config TIGON3_HWMON config BNX2X tristate "Broadcom NetXtremeII 10Gb support" depends on PCI - imply PTP_1588_CLOCK + depends on PTP_1588_CLOCK_OPTIONAL select FW_LOADER select ZLIB_INFLATE select LIBCRC32C @@ -206,7 +206,7 @@ config SYSTEMPORT config BNXT tristate "Broadcom NetXtreme-C/E support" depends on PCI - imply PTP_1588_CLOCK + depends on PTP_1588_CLOCK_OPTIONAL select FW_LOADER select LIBCRC32C select NET_DEVLINK diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index ad2655efe423..fa784953c601 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -2198,7 +2198,7 @@ static const struct net_device_ops b44_netdev_ops = { .ndo_set_rx_mode = b44_set_rx_mode, .ndo_set_mac_address = b44_set_mac_addr, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = b44_ioctl, + .ndo_eth_ioctl = b44_ioctl, .ndo_tx_timeout = b44_tx_timeout, .ndo_change_mtu = b44_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 977f097fc7bf..d56886300ecf 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -1699,7 +1699,7 @@ static const struct net_device_ops bcm_enet_ops = { .ndo_start_xmit = bcm_enet_start_xmit, .ndo_set_mac_address = bcm_enet_set_mac_address, .ndo_set_rx_mode = bcm_enet_set_multicast_list, - .ndo_do_ioctl = bcm_enet_ioctl, + .ndo_eth_ioctl = bcm_enet_ioctl, .ndo_change_mtu = bcm_enet_change_mtu, }; @@ -2446,7 +2446,7 @@ static const struct net_device_ops bcm_enetsw_ops = { .ndo_stop = bcm_enetsw_stop, .ndo_start_xmit = bcm_enet_start_xmit, .ndo_change_mtu = bcm_enet_change_mtu, - .ndo_do_ioctl = bcm_enetsw_ioctl, + .ndo_eth_ioctl = bcm_enetsw_ioctl, }; @@ -2649,7 +2649,6 @@ static int bcm_enetsw_probe(struct platform_device *pdev) if (!res_mem || irq_rx < 0) return -ENODEV; - ret = 0; dev = alloc_etherdev(sizeof(*priv)); if (!dev) return -ENOMEM; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index d9f0f0df8f7b..7fa1b695400d 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -607,7 +607,9 @@ static void bcm_sysport_set_tx_coalesce(struct bcm_sysport_tx_ring *ring, } static int bcm_sysport_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct bcm_sysport_priv *priv = netdev_priv(dev); u32 reg; @@ -627,7 +629,9 @@ static int bcm_sysport_get_coalesce(struct net_device *dev, } static int bcm_sysport_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct bcm_sysport_priv *priv = netdev_priv(dev); struct dim_cq_moder moder; diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 075f6e146b29..fe4d99abd548 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -1263,7 +1263,7 @@ static const struct net_device_ops bgmac_netdev_ops = { .ndo_set_rx_mode = bgmac_set_rx_mode, .ndo_set_mac_address = bgmac_set_mac_address, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_change_mtu = bgmac_change_mtu, }; diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index bee6cfad9fc6..a705e2615307 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -2730,7 +2730,7 @@ bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gf if (!page) return -ENOMEM; mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); if (dma_mapping_error(&bp->pdev->dev, mapping)) { __free_page(page); return -EIO; @@ -2753,7 +2753,7 @@ bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index) return; dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping), - PAGE_SIZE, PCI_DMA_FROMDEVICE); + PAGE_SIZE, DMA_FROM_DEVICE); __free_page(page); rx_pg->page = NULL; @@ -2775,7 +2775,7 @@ bnx2_alloc_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gf mapping = dma_map_single(&bp->pdev->dev, get_l2_fhdr(data), bp->rx_buf_use_size, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); if (dma_mapping_error(&bp->pdev->dev, mapping)) { kfree(data); return -EIO; @@ -2881,7 +2881,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) } dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), - skb_headlen(skb), PCI_DMA_TODEVICE); + skb_headlen(skb), DMA_TO_DEVICE); tx_buf->skb = NULL; last = tx_buf->nr_frags; @@ -2895,7 +2895,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), skb_frag_size(&skb_shinfo(skb)->frags[i]), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); } sw_cons = BNX2_NEXT_TX_BD(sw_cons); @@ -3003,7 +3003,7 @@ bnx2_reuse_rx_data(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, dma_sync_single_for_device(&bp->pdev->dev, dma_unmap_addr(cons_rx_buf, mapping), - BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE); + BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, DMA_FROM_DEVICE); rxr->rx_prod_bseq += bp->rx_buf_use_size; @@ -3044,7 +3044,7 @@ error: } dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); skb = build_skb(data, 0); if (!skb) { kfree(data); @@ -3110,7 +3110,7 @@ error: } dma_unmap_page(&bp->pdev->dev, mapping_old, - PAGE_SIZE, PCI_DMA_FROMDEVICE); + PAGE_SIZE, DMA_FROM_DEVICE); frag_size -= frag_len; skb->data_len += frag_len; @@ -3180,7 +3180,7 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget) dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr, BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); next_ring_idx = BNX2_RX_RING_IDX(BNX2_NEXT_RX_BD(sw_cons)); next_rx_buf = &rxr->rx_buf_ring[next_ring_idx]; @@ -5449,7 +5449,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp) dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), skb_headlen(skb), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); tx_buf->skb = NULL; @@ -5460,7 +5460,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp) dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), skb_frag_size(&skb_shinfo(skb)->frags[k]), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); } dev_kfree_skb(skb); } @@ -5491,7 +5491,7 @@ bnx2_free_rx_skbs(struct bnx2 *bp) dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), bp->rx_buf_use_size, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); rx_buf->data = NULL; @@ -5843,7 +5843,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) packet[i] = (unsigned char) (i & 0xff); map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); if (dma_mapping_error(&bp->pdev->dev, map)) { dev_kfree_skb(skb); return -EIO; @@ -5882,7 +5882,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) udelay(5); - dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE); + dma_unmap_single(&bp->pdev->dev, map, pkt_size, DMA_TO_DEVICE); dev_kfree_skb(skb); if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod) @@ -5901,7 +5901,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode) dma_sync_single_for_cpu(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping), - bp->rx_buf_use_size, PCI_DMA_FROMDEVICE); + bp->rx_buf_use_size, DMA_FROM_DEVICE); if (rx_hdr->l2_fhdr_status & (L2_FHDR_ERRORS_BAD_CRC | @@ -6660,7 +6660,8 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev) } else mss = 0; - mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); + mapping = dma_map_single(&bp->pdev->dev, skb->data, len, + DMA_TO_DEVICE); if (dma_mapping_error(&bp->pdev->dev, mapping)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; @@ -6741,7 +6742,7 @@ dma_error: tx_buf = &txr->tx_buf_ring[ring_prod]; tx_buf->skb = NULL; dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), - skb_headlen(skb), PCI_DMA_TODEVICE); + skb_headlen(skb), DMA_TO_DEVICE); /* unmap remaining mapped pages */ for (i = 0; i < last_frag; i++) { @@ -6750,7 +6751,7 @@ dma_error: tx_buf = &txr->tx_buf_ring[ring_prod]; dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping), skb_frag_size(&skb_shinfo(skb)->frags[i]), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); } dev_kfree_skb_any(skb); @@ -7241,8 +7242,10 @@ bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, return rc; } -static int -bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) +static int bnx2_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct bnx2 *bp = netdev_priv(dev); @@ -7263,8 +7266,10 @@ bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) return 0; } -static int -bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) +static int bnx2_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct bnx2 *bp = netdev_priv(dev); @@ -8041,21 +8046,16 @@ bnx2_read_vpd_fw_ver(struct bnx2 *bp) #define BNX2_VPD_LEN 128 #define BNX2_MAX_VER_SLEN 30 - data = kmalloc(256, GFP_KERNEL); + data = kmalloc(BNX2_VPD_LEN, GFP_KERNEL); if (!data) return; - rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN, - BNX2_VPD_LEN); + rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data, BNX2_VPD_LEN); if (rc) goto vpd_done; - for (i = 0; i < BNX2_VPD_LEN; i += 4) { - data[i] = data[i + BNX2_VPD_LEN + 3]; - data[i + 1] = data[i + BNX2_VPD_LEN + 2]; - data[i + 2] = data[i + BNX2_VPD_LEN + 1]; - data[i + 3] = data[i + BNX2_VPD_LEN]; - } + for (i = 0; i < BNX2_VPD_LEN; i += 4) + swab32s((u32 *)&data[i]); i = pci_vpd_find_tag(data, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA); if (i < 0) @@ -8224,15 +8224,15 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev) persist_dma_mask = dma_mask = DMA_BIT_MASK(64); /* Configure DMA attributes. */ - if (pci_set_dma_mask(pdev, dma_mask) == 0) { + if (dma_set_mask(&pdev->dev, dma_mask) == 0) { dev->features |= NETIF_F_HIGHDMA; - rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask); + rc = dma_set_coherent_mask(&pdev->dev, persist_dma_mask); if (rc) { dev_err(&pdev->dev, "pci_set_consistent_dma_mask failed, aborting\n"); goto err_out_unmap; } - } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) { + } else if ((rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) { dev_err(&pdev->dev, "System does not support DMA, aborting\n"); goto err_out_unmap; } @@ -8546,7 +8546,7 @@ static const struct net_device_ops bnx2_netdev_ops = { .ndo_stop = bnx2_close, .ndo_get_stats64 = bnx2_get_stats64, .ndo_set_rx_mode = bnx2_set_rx_mode, - .ndo_do_ioctl = bnx2_ioctl, + .ndo_eth_ioctl = bnx2_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = bnx2_change_mac_addr, .ndo_change_mtu = bnx2_change_mtu, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index 32245bbe88a8..472a3a478038 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -1878,7 +1878,9 @@ static int bnx2x_set_eeprom(struct net_device *dev, } static int bnx2x_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct bnx2x *bp = netdev_priv(dev); @@ -1891,7 +1893,9 @@ static int bnx2x_get_coalesce(struct net_device *dev, } static int bnx2x_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct bnx2x *bp = netdev_priv(dev); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 2acbc73dcd18..6d98134913cd 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -13048,7 +13048,7 @@ static const struct net_device_ops bnx2x_netdev_ops = { .ndo_set_rx_mode = bnx2x_set_rx_mode, .ndo_set_mac_address = bnx2x_change_mac_addr, .ndo_validate_addr = bnx2x_validate_addr, - .ndo_do_ioctl = bnx2x_ioctl, + .ndo_eth_ioctl = bnx2x_ioctl, .ndo_change_mtu = bnx2x_change_mtu, .ndo_fix_features = bnx2x_fix_features, .ndo_set_features = bnx2x_set_features, diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c index 27943b0446c2..f255fd0b16db 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c @@ -1858,7 +1858,6 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) { int i; int first_queue_query_index, num_queues_req; - dma_addr_t cur_data_offset; struct stats_query_entry *cur_query_entry; u8 stats_count = 0; bool is_fcoe = false; @@ -1879,10 +1878,6 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) BNX2X_NUM_ETH_QUEUES(bp), is_fcoe, first_queue_query_index, first_queue_query_index + num_queues_req); - cur_data_offset = bp->fw_stats_data_mapping + - offsetof(struct bnx2x_fw_stats_data, queue_stats) + - num_queues_req * sizeof(struct per_queue_stats); - cur_query_entry = &bp->fw_stats_req-> query[first_queue_query_index + num_queues_req]; @@ -1933,7 +1928,6 @@ void bnx2x_iov_adjust_stats_req(struct bnx2x *bp) cur_query_entry->funcID, j, cur_query_entry->index); cur_query_entry++; - cur_data_offset += sizeof(struct per_queue_stats); stats_count++; /* all stats are coalesced to the leading queue */ diff --git a/drivers/net/ethernet/broadcom/bnxt/Makefile b/drivers/net/ethernet/broadcom/bnxt/Makefile index 2b8ae687b3c1..c6ef7ec2c115 100644 --- a/drivers/net/ethernet/broadcom/bnxt/Makefile +++ b/drivers/net/ethernet/broadcom/bnxt/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_BNXT) += bnxt_en.o -bnxt_en-y := bnxt.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_ptp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o +bnxt_en-y := bnxt.o bnxt_hwrm.o bnxt_sriov.o bnxt_ethtool.o bnxt_dcb.o bnxt_ulp.o bnxt_xdp.o bnxt_ptp.o bnxt_vfr.o bnxt_devlink.o bnxt_dim.o bnxt_en-$(CONFIG_BNXT_FLOWER_OFFLOAD) += bnxt_tc.o bnxt_en-$(CONFIG_DEBUG_FS) += bnxt_debugfs.o diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 8a97640cdfe7..627f85ee3922 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -60,6 +60,7 @@ #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_hwrm.h" #include "bnxt_ulp.h" #include "bnxt_sriov.h" #include "bnxt_ethtool.h" @@ -276,8 +277,11 @@ static const u16 bnxt_async_events_arr[] = { ASYNC_EVENT_CMPL_EVENT_ID_RESET_NOTIFY, ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY, ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION, + ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE, ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG, ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST, + ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP, + ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT, }; static struct workqueue_struct *bnxt_pf_wq; @@ -670,7 +674,7 @@ tx_dma_error: prod = txr->tx_prod; tx_buf = &txr->tx_buf_ring[prod]; dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), - skb_headlen(skb), PCI_DMA_TODEVICE); + skb_headlen(skb), DMA_TO_DEVICE); prod = NEXT_TX(prod); /* unmap remaining mapped pages */ @@ -679,7 +683,7 @@ tx_dma_error: tx_buf = &txr->tx_buf_ring[prod]; dma_unmap_page(&pdev->dev, dma_unmap_addr(tx_buf, mapping), skb_frag_size(&skb_shinfo(skb)->frags[i]), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); } tx_free: @@ -718,7 +722,7 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) } dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), - skb_headlen(skb), PCI_DMA_TODEVICE); + skb_headlen(skb), DMA_TO_DEVICE); last = tx_buf->nr_frags; for (j = 0; j < last; j++) { @@ -728,7 +732,7 @@ static void bnxt_tx_int(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) &pdev->dev, dma_unmap_addr(tx_buf, mapping), skb_frag_size(&skb_shinfo(skb)->frags[j]), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); } if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { if (bp->flags & BNXT_FLAG_CHIP_P5) { @@ -901,7 +905,7 @@ static inline int bnxt_alloc_rx_page(struct bnxt *bp, } mapping = dma_map_page_attrs(&pdev->dev, page, offset, - BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE, + BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE, DMA_ATTR_WEAK_ORDERING); if (dma_mapping_error(&pdev->dev, mapping)) { __free_page(page); @@ -1141,7 +1145,7 @@ static struct sk_buff *bnxt_rx_pages(struct bnxt *bp, } dma_unmap_page_attrs(&pdev->dev, mapping, BNXT_RX_PAGE_SIZE, - PCI_DMA_FROMDEVICE, + DMA_FROM_DEVICE, DMA_ATTR_WEAK_ORDERING); skb->data_len += frag_len; @@ -1649,6 +1653,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, skb = bnxt_copy_skb(bnapi, data_ptr, len, mapping); if (!skb) { bnxt_abort_tpa(cpr, idx, agg_bufs); + cpr->sw_stats.rx.rx_oom_discards += 1; return NULL; } } else { @@ -1658,6 +1663,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, new_data = __bnxt_alloc_rx_data(bp, &new_mapping, GFP_ATOMIC); if (!new_data) { bnxt_abort_tpa(cpr, idx, agg_bufs); + cpr->sw_stats.rx.rx_oom_discards += 1; return NULL; } @@ -1673,6 +1679,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, if (!skb) { kfree(data); bnxt_abort_tpa(cpr, idx, agg_bufs); + cpr->sw_stats.rx.rx_oom_discards += 1; return NULL; } skb_reserve(skb, bp->rx_offset); @@ -1683,6 +1690,7 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, skb = bnxt_rx_pages(bp, cpr, skb, idx, agg_bufs, true); if (!skb) { /* Page reuse already handled by bnxt_rx_pages(). */ + cpr->sw_stats.rx.rx_oom_discards += 1; return NULL; } } @@ -1886,6 +1894,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, if (agg_bufs) bnxt_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs, false); + cpr->sw_stats.rx.rx_oom_discards += 1; rc = -ENOMEM; goto next_rx; } @@ -1899,6 +1908,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, skb = bp->rx_skb_func(bp, rxr, cons, data, data_ptr, dma_addr, payload | len); if (!skb) { + cpr->sw_stats.rx.rx_oom_discards += 1; rc = -ENOMEM; goto next_rx; } @@ -1907,6 +1917,7 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, if (agg_bufs) { skb = bnxt_rx_pages(bp, cpr, skb, cp_cons, agg_bufs, false); if (!skb) { + cpr->sw_stats.rx.rx_oom_discards += 1; rc = -ENOMEM; goto next_rx; } @@ -2001,6 +2012,7 @@ static int bnxt_force_rx_discard(struct bnxt *bp, struct rx_cmp *rxcmp; u16 cp_cons; u8 cmp_type; + int rc; cp_cons = RING_CMP(tmp_raw_cons); rxcmp = (struct rx_cmp *) @@ -2029,7 +2041,10 @@ static int bnxt_force_rx_discard(struct bnxt *bp, tpa_end1->rx_tpa_end_cmp_errors_v2 |= cpu_to_le32(RX_TPA_END_CMP_ERRORS); } - return bnxt_rx_pkt(bp, cpr, raw_cons, event); + rc = bnxt_rx_pkt(bp, cpr, raw_cons, event); + if (rc && rc != -EBUSY) + cpr->sw_stats.rx.rx_netpoll_discards += 1; + return rc; } u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx) @@ -2074,6 +2089,19 @@ static u16 bnxt_agg_ring_id_to_grp_idx(struct bnxt *bp, u16 ring_id) return INVALID_HW_RING_ID; } +static void bnxt_event_error_report(struct bnxt *bp, u32 data1, u32 data2) +{ + switch (BNXT_EVENT_ERROR_REPORT_TYPE(data1)) { + case ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_INVALID_SIGNAL: + netdev_err(bp->dev, "1PPS: Received invalid signal on pin%lu from the external source. Please fix the signal and reconfigure the pin\n", + BNXT_EVENT_INVALID_SIGNAL_DATA(data2)); + break; + default: + netdev_err(bp->dev, "FW reported unknown error type\n"); + break; + } +} + #define BNXT_GET_EVENT_PORT(data) \ ((data) & \ ASYNC_EVENT_CMPL_PORT_CONN_NOT_ALLOWED_EVENT_DATA1_PORT_ID_MASK) @@ -2234,6 +2262,20 @@ static int bnxt_async_event_process(struct bnxt *bp, } goto async_event_process_exit; } + case ASYNC_EVENT_CMPL_EVENT_ID_PPS_TIMESTAMP: { + bnxt_ptp_pps_event(bp, data1, data2); + goto async_event_process_exit; + } + case ASYNC_EVENT_CMPL_EVENT_ID_ERROR_REPORT: { + bnxt_event_error_report(bp, data1, data2); + goto async_event_process_exit; + } + case ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE: { + u16 seq_id = le32_to_cpu(cmpl->event_data2) & 0xffff; + + hwrm_update_token(bp, seq_id, BNXT_HWRM_DEFERRED); + goto async_event_process_exit; + } default: goto async_event_process_exit; } @@ -2253,10 +2295,7 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) switch (cmpl_type) { case CMPL_BASE_TYPE_HWRM_DONE: seq_id = le16_to_cpu(h_cmpl->sequence_id); - if (seq_id == bp->hwrm_intr_seq_id) - bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id; - else - netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id); + hwrm_update_token(bp, seq_id, BNXT_HWRM_COMPLETE); break; case CMPL_BASE_TYPE_HWRM_FWD_REQ: @@ -2690,7 +2729,7 @@ static void bnxt_free_tx_skbs(struct bnxt *bp) dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), dma_unmap_len(tx_buf, len), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); xdp_return_frame(tx_buf->xdpf); tx_buf->action = 0; tx_buf->xdpf = NULL; @@ -2715,7 +2754,7 @@ static void bnxt_free_tx_skbs(struct bnxt *bp) dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), skb_headlen(skb), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); last = tx_buf->nr_frags; j += 2; @@ -2727,7 +2766,7 @@ static void bnxt_free_tx_skbs(struct bnxt *bp) dma_unmap_page( &pdev->dev, dma_unmap_addr(tx_buf, mapping), - skb_frag_size(frag), PCI_DMA_TODEVICE); + skb_frag_size(frag), DMA_TO_DEVICE); } dev_kfree_skb(skb); } @@ -2794,7 +2833,7 @@ skip_rx_tpa_free: continue; dma_unmap_page_attrs(&pdev->dev, rx_agg_buf->mapping, - BNXT_RX_PAGE_SIZE, PCI_DMA_FROMDEVICE, + BNXT_RX_PAGE_SIZE, DMA_FROM_DEVICE, DMA_ATTR_WEAK_ORDERING); rx_agg_buf->page = NULL; @@ -3176,6 +3215,58 @@ static int bnxt_alloc_tx_rings(struct bnxt *bp) return 0; } +static void bnxt_free_cp_arrays(struct bnxt_cp_ring_info *cpr) +{ + kfree(cpr->cp_desc_ring); + cpr->cp_desc_ring = NULL; + kfree(cpr->cp_desc_mapping); + cpr->cp_desc_mapping = NULL; +} + +static int bnxt_alloc_cp_arrays(struct bnxt_cp_ring_info *cpr, int n) +{ + cpr->cp_desc_ring = kcalloc(n, sizeof(*cpr->cp_desc_ring), GFP_KERNEL); + if (!cpr->cp_desc_ring) + return -ENOMEM; + cpr->cp_desc_mapping = kcalloc(n, sizeof(*cpr->cp_desc_mapping), + GFP_KERNEL); + if (!cpr->cp_desc_mapping) + return -ENOMEM; + return 0; +} + +static void bnxt_free_all_cp_arrays(struct bnxt *bp) +{ + int i; + + if (!bp->bnapi) + return; + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + + if (!bnapi) + continue; + bnxt_free_cp_arrays(&bnapi->cp_ring); + } +} + +static int bnxt_alloc_all_cp_arrays(struct bnxt *bp) +{ + int i, n = bp->cp_nr_pages; + + for (i = 0; i < bp->cp_nr_rings; i++) { + struct bnxt_napi *bnapi = bp->bnapi[i]; + int rc; + + if (!bnapi) + continue; + rc = bnxt_alloc_cp_arrays(&bnapi->cp_ring, n); + if (rc) + return rc; + } + return 0; +} + static void bnxt_free_cp_rings(struct bnxt *bp) { int i; @@ -3203,6 +3294,7 @@ static void bnxt_free_cp_rings(struct bnxt *bp) if (cpr2) { ring = &cpr2->cp_ring_struct; bnxt_free_ring(bp, &ring->ring_mem); + bnxt_free_cp_arrays(cpr2); kfree(cpr2); cpr->cp_ring_arr[j] = NULL; } @@ -3221,6 +3313,12 @@ static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp) if (!cpr) return NULL; + rc = bnxt_alloc_cp_arrays(cpr, bp->cp_nr_pages); + if (rc) { + bnxt_free_cp_arrays(cpr); + kfree(cpr); + return NULL; + } ring = &cpr->cp_ring_struct; rmem = &ring->ring_mem; rmem->nr_pages = bp->cp_nr_pages; @@ -3231,6 +3329,7 @@ static struct bnxt_cp_ring_info *bnxt_alloc_cp_sub_ring(struct bnxt *bp) rc = bnxt_alloc_ring(bp, rmem); if (rc) { bnxt_free_ring(bp, rmem); + bnxt_free_cp_arrays(cpr); kfree(cpr); cpr = NULL; } @@ -3663,9 +3762,15 @@ void bnxt_set_ring_params(struct bnxt *bp) if (jumbo_factor > agg_factor) agg_factor = jumbo_factor; } - agg_ring_size = ring_size * agg_factor; + if (agg_factor) { + if (ring_size > BNXT_MAX_RX_DESC_CNT_JUM_ENA) { + ring_size = BNXT_MAX_RX_DESC_CNT_JUM_ENA; + netdev_warn(bp->dev, "RX ring size reduced from %d to %d because the jumbo ring is now enabled\n", + bp->rx_ring_size, ring_size); + bp->rx_ring_size = ring_size; + } + agg_ring_size = ring_size * agg_factor; - if (agg_ring_size) { bp->rx_agg_nr_pages = bnxt_calc_nr_ring_pages(agg_ring_size, RX_DESC_CNT); if (bp->rx_agg_nr_pages > MAX_RX_AGG_PAGES) { @@ -3855,77 +3960,26 @@ out: static void bnxt_free_hwrm_resources(struct bnxt *bp) { - struct pci_dev *pdev = bp->pdev; - - if (bp->hwrm_cmd_resp_addr) { - dma_free_coherent(&pdev->dev, PAGE_SIZE, bp->hwrm_cmd_resp_addr, - bp->hwrm_cmd_resp_dma_addr); - bp->hwrm_cmd_resp_addr = NULL; - } - - if (bp->hwrm_cmd_kong_resp_addr) { - dma_free_coherent(&pdev->dev, PAGE_SIZE, - bp->hwrm_cmd_kong_resp_addr, - bp->hwrm_cmd_kong_resp_dma_addr); - bp->hwrm_cmd_kong_resp_addr = NULL; - } -} + struct bnxt_hwrm_wait_token *token; -static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp) -{ - struct pci_dev *pdev = bp->pdev; + dma_pool_destroy(bp->hwrm_dma_pool); + bp->hwrm_dma_pool = NULL; - if (bp->hwrm_cmd_kong_resp_addr) - return 0; - - bp->hwrm_cmd_kong_resp_addr = - dma_alloc_coherent(&pdev->dev, PAGE_SIZE, - &bp->hwrm_cmd_kong_resp_dma_addr, - GFP_KERNEL); - if (!bp->hwrm_cmd_kong_resp_addr) - return -ENOMEM; - - return 0; + rcu_read_lock(); + hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node) + WRITE_ONCE(token->state, BNXT_HWRM_CANCELLED); + rcu_read_unlock(); } static int bnxt_alloc_hwrm_resources(struct bnxt *bp) { - struct pci_dev *pdev = bp->pdev; - - bp->hwrm_cmd_resp_addr = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, - &bp->hwrm_cmd_resp_dma_addr, - GFP_KERNEL); - if (!bp->hwrm_cmd_resp_addr) + bp->hwrm_dma_pool = dma_pool_create("bnxt_hwrm", &bp->pdev->dev, + BNXT_HWRM_DMA_SIZE, + BNXT_HWRM_DMA_ALIGN, 0); + if (!bp->hwrm_dma_pool) return -ENOMEM; - return 0; -} - -static void bnxt_free_hwrm_short_cmd_req(struct bnxt *bp) -{ - if (bp->hwrm_short_cmd_req_addr) { - struct pci_dev *pdev = bp->pdev; - - dma_free_coherent(&pdev->dev, bp->hwrm_max_ext_req_len, - bp->hwrm_short_cmd_req_addr, - bp->hwrm_short_cmd_req_dma_addr); - bp->hwrm_short_cmd_req_addr = NULL; - } -} - -static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp) -{ - struct pci_dev *pdev = bp->pdev; - - if (bp->hwrm_short_cmd_req_addr) - return 0; - - bp->hwrm_short_cmd_req_addr = - dma_alloc_coherent(&pdev->dev, bp->hwrm_max_ext_req_len, - &bp->hwrm_short_cmd_req_dma_addr, - GFP_KERNEL); - if (!bp->hwrm_short_cmd_req_addr) - return -ENOMEM; + INIT_HLIST_HEAD(&bp->hwrm_pending_list); return 0; } @@ -3986,8 +4040,8 @@ static void bnxt_copy_hw_masks(u64 *mask_arr, __le64 *hw_mask_arr, int count) static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp, struct bnxt_stats_mem *stats) { - struct hwrm_func_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_func_qstats_ext_input req = {0}; + struct hwrm_func_qstats_ext_output *resp; + struct hwrm_func_qstats_ext_input *req; __le64 *hw_masks; int rc; @@ -3995,19 +4049,20 @@ static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp, !(bp->flags & BNXT_FLAG_CHIP_P5)) return -EOPNOTSUPP; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QSTATS_EXT, -1, -1); - req.fid = cpu_to_le16(0xffff); - req.flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FUNC_QSTATS_EXT); if (rc) - goto qstat_exit; + return rc; - hw_masks = &resp->rx_ucast_pkts; - bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8); + req->fid = cpu_to_le16(0xffff); + req->flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; -qstat_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) { + hw_masks = &resp->rx_ucast_pkts; + bnxt_copy_hw_masks(stats->hw_masks, hw_masks, stats->len / 8); + } + hwrm_req_drop(bp, req); return rc; } @@ -4266,6 +4321,7 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) bnxt_free_tx_rings(bp); bnxt_free_rx_rings(bp); bnxt_free_cp_rings(bp); + bnxt_free_all_cp_arrays(bp); bnxt_free_ntp_fltrs(bp, irq_re_init); if (irq_re_init) { bnxt_free_ring_stats(bp); @@ -4386,6 +4442,10 @@ static int bnxt_alloc_mem(struct bnxt *bp, bool irq_re_init) goto alloc_mem_err; } + rc = bnxt_alloc_all_cp_arrays(bp); + if (rc) + goto alloc_mem_err; + bnxt_init_ring_struct(bp); rc = bnxt_alloc_rx_rings(bp); @@ -4468,313 +4528,38 @@ static void bnxt_enable_int(struct bnxt *bp) } } -void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type, - u16 cmpl_ring, u16 target_id) -{ - struct input *req = request; - - req->req_type = cpu_to_le16(req_type); - req->cmpl_ring = cpu_to_le16(cmpl_ring); - req->target_id = cpu_to_le16(target_id); - if (bnxt_kong_hwrm_message(bp, req)) - req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr); - else - req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); -} - -static int bnxt_hwrm_to_stderr(u32 hwrm_err) -{ - switch (hwrm_err) { - case HWRM_ERR_CODE_SUCCESS: - return 0; - case HWRM_ERR_CODE_RESOURCE_LOCKED: - return -EROFS; - case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED: - return -EACCES; - case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR: - return -ENOSPC; - case HWRM_ERR_CODE_INVALID_PARAMS: - case HWRM_ERR_CODE_INVALID_FLAGS: - case HWRM_ERR_CODE_INVALID_ENABLES: - case HWRM_ERR_CODE_UNSUPPORTED_TLV: - case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR: - return -EINVAL; - case HWRM_ERR_CODE_NO_BUFFER: - return -ENOMEM; - case HWRM_ERR_CODE_HOT_RESET_PROGRESS: - case HWRM_ERR_CODE_BUSY: - return -EAGAIN; - case HWRM_ERR_CODE_CMD_NOT_SUPPORTED: - return -EOPNOTSUPP; - default: - return -EIO; - } -} - -static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, - int timeout, bool silent) -{ - int i, intr_process, rc, tmo_count; - struct input *req = msg; - u32 *data = msg; - u8 *valid; - u16 cp_ring_id, len = 0; - struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; - u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; - struct hwrm_short_input short_input = {0}; - u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER; - u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM; - u16 dst = BNXT_HWRM_CHNL_CHIMP; - - if (BNXT_NO_FW_ACCESS(bp) && - le16_to_cpu(req->req_type) != HWRM_FUNC_RESET) - return -EBUSY; - - if (msg_len > BNXT_HWRM_MAX_REQ_LEN) { - if (msg_len > bp->hwrm_max_ext_req_len || - !bp->hwrm_short_cmd_req_addr) - return -EINVAL; - } - - if (bnxt_hwrm_kong_chnl(bp, req)) { - dst = BNXT_HWRM_CHNL_KONG; - bar_offset = BNXT_GRCPF_REG_KONG_COMM; - doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER; - resp = bp->hwrm_cmd_kong_resp_addr; - } - - memset(resp, 0, PAGE_SIZE); - cp_ring_id = le16_to_cpu(req->cmpl_ring); - intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1; - - req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst)); - /* currently supports only one outstanding message */ - if (intr_process) - bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id); - - if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || - msg_len > BNXT_HWRM_MAX_REQ_LEN) { - void *short_cmd_req = bp->hwrm_short_cmd_req_addr; - u16 max_msg_len; - - /* Set boundary for maximum extended request length for short - * cmd format. If passed up from device use the max supported - * internal req length. - */ - max_msg_len = bp->hwrm_max_ext_req_len; - - memcpy(short_cmd_req, req, msg_len); - if (msg_len < max_msg_len) - memset(short_cmd_req + msg_len, 0, - max_msg_len - msg_len); - - short_input.req_type = req->req_type; - short_input.signature = - cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD); - short_input.size = cpu_to_le16(msg_len); - short_input.req_addr = - cpu_to_le64(bp->hwrm_short_cmd_req_dma_addr); - - data = (u32 *)&short_input; - msg_len = sizeof(short_input); - - /* Sync memory write before updating doorbell */ - wmb(); - - max_req_len = BNXT_HWRM_SHORT_REQ_LEN; - } - - /* Write request msg to hwrm channel */ - __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4); - - for (i = msg_len; i < max_req_len; i += 4) - writel(0, bp->bar0 + bar_offset + i); - - /* Ring channel doorbell */ - writel(1, bp->bar0 + doorbell_offset); - - if (!pci_is_enabled(bp->pdev)) - return -ENODEV; - - if (!timeout) - timeout = DFLT_HWRM_CMD_TIMEOUT; - /* Limit timeout to an upper limit */ - timeout = min(timeout, HWRM_CMD_MAX_TIMEOUT); - /* convert timeout to usec */ - timeout *= 1000; - - i = 0; - /* Short timeout for the first few iterations: - * number of loops = number of loops for short timeout + - * number of loops for standard timeout. - */ - tmo_count = HWRM_SHORT_TIMEOUT_COUNTER; - timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER; - tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT); - - if (intr_process) { - u16 seq_id = bp->hwrm_intr_seq_id; - - /* Wait until hwrm response cmpl interrupt is processed */ - while (bp->hwrm_intr_seq_id != (u16)~seq_id && - i++ < tmo_count) { - /* Abort the wait for completion if the FW health - * check has failed. - */ - if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) - return -EBUSY; - /* on first few passes, just barely sleep */ - if (i < HWRM_SHORT_TIMEOUT_COUNTER) { - usleep_range(HWRM_SHORT_MIN_TIMEOUT, - HWRM_SHORT_MAX_TIMEOUT); - } else { - if (HWRM_WAIT_MUST_ABORT(bp, req)) - break; - usleep_range(HWRM_MIN_TIMEOUT, - HWRM_MAX_TIMEOUT); - } - } - - if (bp->hwrm_intr_seq_id != (u16)~seq_id) { - if (!silent) - netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", - le16_to_cpu(req->req_type)); - return -EBUSY; - } - len = le16_to_cpu(resp->resp_len); - valid = ((u8 *)resp) + len - 1; - } else { - int j; - - /* Check if response len is updated */ - for (i = 0; i < tmo_count; i++) { - /* Abort the wait for completion if the FW health - * check has failed. - */ - if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) - return -EBUSY; - len = le16_to_cpu(resp->resp_len); - if (len) - break; - /* on first few passes, just barely sleep */ - if (i < HWRM_SHORT_TIMEOUT_COUNTER) { - usleep_range(HWRM_SHORT_MIN_TIMEOUT, - HWRM_SHORT_MAX_TIMEOUT); - } else { - if (HWRM_WAIT_MUST_ABORT(bp, req)) - goto timeout_abort; - usleep_range(HWRM_MIN_TIMEOUT, - HWRM_MAX_TIMEOUT); - } - } - - if (i >= tmo_count) { -timeout_abort: - if (!silent) - netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d\n", - HWRM_TOTAL_TIMEOUT(i), - le16_to_cpu(req->req_type), - le16_to_cpu(req->seq_id), len); - return -EBUSY; - } - - /* Last byte of resp contains valid bit */ - valid = ((u8 *)resp) + len - 1; - for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) { - /* make sure we read from updated DMA memory */ - dma_rmb(); - if (*valid) - break; - usleep_range(1, 5); - } - - if (j >= HWRM_VALID_BIT_DELAY_USEC) { - if (!silent) - netdev_err(bp->dev, "Error (timeout: %d) msg {0x%x 0x%x} len:%d v:%d\n", - HWRM_TOTAL_TIMEOUT(i), - le16_to_cpu(req->req_type), - le16_to_cpu(req->seq_id), len, - *valid); - return -EBUSY; - } - } - - /* Zero valid bit for compatibility. Valid bit in an older spec - * may become a new field in a newer spec. We must make sure that - * a new field not implemented by old spec will read zero. - */ - *valid = 0; - rc = le16_to_cpu(resp->error_code); - if (rc && !silent) - netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", - le16_to_cpu(resp->req_type), - le16_to_cpu(resp->seq_id), rc); - return bnxt_hwrm_to_stderr(rc); -} - -int _hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) -{ - return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, false); -} - -int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, - int timeout) -{ - return bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true); -} - -int hwrm_send_message(struct bnxt *bp, void *msg, u32 msg_len, int timeout) -{ - int rc; - - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, msg, msg_len, timeout); - mutex_unlock(&bp->hwrm_cmd_lock); - return rc; -} - -int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len, - int timeout) -{ - int rc; - - mutex_lock(&bp->hwrm_cmd_lock); - rc = bnxt_hwrm_do_send_msg(bp, msg, msg_len, timeout, true); - mutex_unlock(&bp->hwrm_cmd_lock); - return rc; -} - int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, bool async_only) { - struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_func_drv_rgtr_input req = {0}; DECLARE_BITMAP(async_events_bmap, 256); u32 *events = (u32 *)async_events_bmap; + struct hwrm_func_drv_rgtr_output *resp; + struct hwrm_func_drv_rgtr_input *req; u32 flags; int rc, i; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_RGTR); + if (rc) + return rc; - req.enables = - cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | - FUNC_DRV_RGTR_REQ_ENABLES_VER | - FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); + req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE | + FUNC_DRV_RGTR_REQ_ENABLES_VER | + FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); - req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); + req->os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX); flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE; if (bp->fw_cap & BNXT_FW_CAP_HOT_RESET) flags |= FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT; if (bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY) flags |= FUNC_DRV_RGTR_REQ_FLAGS_ERROR_RECOVERY_SUPPORT | FUNC_DRV_RGTR_REQ_FLAGS_MASTER_SUPPORT; - req.flags = cpu_to_le32(flags); - req.ver_maj_8b = DRV_VER_MAJ; - req.ver_min_8b = DRV_VER_MIN; - req.ver_upd_8b = DRV_VER_UPD; - req.ver_maj = cpu_to_le16(DRV_VER_MAJ); - req.ver_min = cpu_to_le16(DRV_VER_MIN); - req.ver_upd = cpu_to_le16(DRV_VER_UPD); + req->flags = cpu_to_le32(flags); + req->ver_maj_8b = DRV_VER_MAJ; + req->ver_min_8b = DRV_VER_MIN; + req->ver_upd_8b = DRV_VER_UPD; + req->ver_maj = cpu_to_le16(DRV_VER_MAJ); + req->ver_min = cpu_to_le16(DRV_VER_MIN); + req->ver_upd = cpu_to_le16(DRV_VER_UPD); if (BNXT_PF(bp)) { u32 data[8]; @@ -4791,14 +4576,14 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, } for (i = 0; i < 8; i++) - req.vf_req_fwd[i] = cpu_to_le32(data[i]); + req->vf_req_fwd[i] = cpu_to_le32(data[i]); - req.enables |= + req->enables |= cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); } if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) - req.flags |= cpu_to_le32( + req->flags |= cpu_to_le32( FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE); memset(async_events_bmap, 0, sizeof(async_events_bmap)); @@ -4817,57 +4602,63 @@ int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, } } for (i = 0; i < 8; i++) - req.async_event_fwd[i] |= cpu_to_le32(events[i]); + req->async_event_fwd[i] |= cpu_to_le32(events[i]); if (async_only) - req.enables = + req->enables = cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) { set_bit(BNXT_STATE_DRV_REGISTERED, &bp->state); if (resp->flags & cpu_to_le32(FUNC_DRV_RGTR_RESP_FLAGS_IF_CHANGE_SUPPORTED)) bp->fw_cap |= BNXT_FW_CAP_IF_CHANGE; } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } static int bnxt_hwrm_func_drv_unrgtr(struct bnxt *bp) { - struct hwrm_func_drv_unrgtr_input req = {0}; + struct hwrm_func_drv_unrgtr_input *req; + int rc; if (!test_and_clear_bit(BNXT_STATE_DRV_REGISTERED, &bp->state)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_UNRGTR, -1, -1); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_UNRGTR); + if (rc) + return rc; + return hwrm_req_send(bp, req); } static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) { - u32 rc = 0; - struct hwrm_tunnel_dst_port_free_input req = {0}; + struct hwrm_tunnel_dst_port_free_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_FREE); + if (rc) + return rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_FREE, -1, -1); - req.tunnel_type = tunnel_type; + req->tunnel_type = tunnel_type; switch (tunnel_type) { case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN: - req.tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id); + req->tunnel_dst_port_id = cpu_to_le16(bp->vxlan_fw_dst_port_id); bp->vxlan_fw_dst_port_id = INVALID_HW_RING_ID; break; case TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE: - req.tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id); + req->tunnel_dst_port_id = cpu_to_le16(bp->nge_fw_dst_port_id); bp->nge_fw_dst_port_id = INVALID_HW_RING_ID; break; default: break; } - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); if (rc) netdev_err(bp->dev, "hwrm_tunnel_dst_port_free failed. rc:%d\n", rc); @@ -4877,17 +4668,19 @@ static int bnxt_hwrm_tunnel_dst_port_free(struct bnxt *bp, u8 tunnel_type) static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, u8 tunnel_type) { - u32 rc = 0; - struct hwrm_tunnel_dst_port_alloc_input req = {0}; - struct hwrm_tunnel_dst_port_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_tunnel_dst_port_alloc_output *resp; + struct hwrm_tunnel_dst_port_alloc_input *req; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TUNNEL_DST_PORT_ALLOC, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_TUNNEL_DST_PORT_ALLOC); + if (rc) + return rc; - req.tunnel_type = tunnel_type; - req.tunnel_dst_port_val = port; + req->tunnel_type = tunnel_type; + req->tunnel_dst_port_val = port; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) { netdev_err(bp->dev, "hwrm_tunnel_dst_port_alloc failed. rc:%d\n", rc); @@ -4907,33 +4700,40 @@ static int bnxt_hwrm_tunnel_dst_port_alloc(struct bnxt *bp, __be16 port, } err_out: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } static int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, u16 vnic_id) { - struct hwrm_cfa_l2_set_rx_mask_input req = {0}; + struct hwrm_cfa_l2_set_rx_mask_input *req; struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_SET_RX_MASK, -1, -1); - req.vnic_id = cpu_to_le32(vnic->fw_vnic_id); + rc = hwrm_req_init(bp, req, HWRM_CFA_L2_SET_RX_MASK); + if (rc) + return rc; - req.num_mc_entries = cpu_to_le32(vnic->mc_list_count); - req.mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); - req.mask = cpu_to_le32(vnic->rx_mask); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); + req->num_mc_entries = cpu_to_le32(vnic->mc_list_count); + req->mc_tbl_addr = cpu_to_le64(vnic->mc_list_mapping); + req->mask = cpu_to_le32(vnic->rx_mask); + return hwrm_req_send_silent(bp, req); } #ifdef CONFIG_RFS_ACCEL static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, struct bnxt_ntuple_filter *fltr) { - struct hwrm_cfa_ntuple_filter_free_input req = {0}; + struct hwrm_cfa_ntuple_filter_free_input *req; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_FREE, -1, -1); - req.ntuple_filter_id = fltr->filter_id; - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_FREE); + if (rc) + return rc; + + req->ntuple_filter_id = fltr->filter_id; + return hwrm_req_send(bp, req); } #define BNXT_NTP_FLTR_FLAGS \ @@ -4958,69 +4758,70 @@ static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, struct bnxt_ntuple_filter *fltr) { - struct hwrm_cfa_ntuple_filter_alloc_input req = {0}; struct hwrm_cfa_ntuple_filter_alloc_output *resp; + struct hwrm_cfa_ntuple_filter_alloc_input *req; struct flow_keys *keys = &fltr->fkeys; struct bnxt_vnic_info *vnic; u32 flags = 0; - int rc = 0; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_CFA_NTUPLE_FILTER_ALLOC); + if (rc) + return rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1); - req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx]; + req->l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx]; if (bp->fw_cap & BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2) { flags = CFA_NTUPLE_FILTER_ALLOC_REQ_FLAGS_DEST_RFS_RING_IDX; - req.dst_id = cpu_to_le16(fltr->rxq); + req->dst_id = cpu_to_le16(fltr->rxq); } else { vnic = &bp->vnic_info[fltr->rxq + 1]; - req.dst_id = cpu_to_le16(vnic->fw_vnic_id); + req->dst_id = cpu_to_le16(vnic->fw_vnic_id); } - req.flags = cpu_to_le32(flags); - req.enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); + req->flags = cpu_to_le32(flags); + req->enables = cpu_to_le32(BNXT_NTP_FLTR_FLAGS); - req.ethertype = htons(ETH_P_IP); - memcpy(req.src_macaddr, fltr->src_mac_addr, ETH_ALEN); - req.ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; - req.ip_protocol = keys->basic.ip_proto; + req->ethertype = htons(ETH_P_IP); + memcpy(req->src_macaddr, fltr->src_mac_addr, ETH_ALEN); + req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; + req->ip_protocol = keys->basic.ip_proto; if (keys->basic.n_proto == htons(ETH_P_IPV6)) { int i; - req.ethertype = htons(ETH_P_IPV6); - req.ip_addr_type = + req->ethertype = htons(ETH_P_IPV6); + req->ip_addr_type = CFA_NTUPLE_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV6; - *(struct in6_addr *)&req.src_ipaddr[0] = + *(struct in6_addr *)&req->src_ipaddr[0] = keys->addrs.v6addrs.src; - *(struct in6_addr *)&req.dst_ipaddr[0] = + *(struct in6_addr *)&req->dst_ipaddr[0] = keys->addrs.v6addrs.dst; for (i = 0; i < 4; i++) { - req.src_ipaddr_mask[i] = cpu_to_be32(0xffffffff); - req.dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff); + req->src_ipaddr_mask[i] = cpu_to_be32(0xffffffff); + req->dst_ipaddr_mask[i] = cpu_to_be32(0xffffffff); } } else { - req.src_ipaddr[0] = keys->addrs.v4addrs.src; - req.src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); - req.dst_ipaddr[0] = keys->addrs.v4addrs.dst; - req.dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); + req->src_ipaddr[0] = keys->addrs.v4addrs.src; + req->src_ipaddr_mask[0] = cpu_to_be32(0xffffffff); + req->dst_ipaddr[0] = keys->addrs.v4addrs.dst; + req->dst_ipaddr_mask[0] = cpu_to_be32(0xffffffff); } if (keys->control.flags & FLOW_DIS_ENCAPSULATION) { - req.enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG); - req.tunnel_type = + req->enables |= cpu_to_le32(BNXT_NTP_TUNNEL_FLTR_FLAG); + req->tunnel_type = CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL; } - req.src_port = keys->ports.src; - req.src_port_mask = cpu_to_be16(0xffff); - req.dst_port = keys->ports.dst; - req.dst_port_mask = cpu_to_be16(0xffff); + req->src_port = keys->ports.src; + req->src_port_mask = cpu_to_be16(0xffff); + req->dst_port = keys->ports.dst; + req->dst_port_mask = cpu_to_be16(0xffff); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (!rc) { - resp = bnxt_get_hwrm_resp_addr(bp, &req); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) fltr->filter_id = resp->ntuple_filter_id; - } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } #endif @@ -5028,62 +4829,62 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, static int bnxt_hwrm_set_vnic_filter(struct bnxt *bp, u16 vnic_id, u16 idx, u8 *mac_addr) { - u32 rc = 0; - struct hwrm_cfa_l2_filter_alloc_input req = {0}; - struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_cfa_l2_filter_alloc_output *resp; + struct hwrm_cfa_l2_filter_alloc_input *req; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_ALLOC, -1, -1); - req.flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX); + rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_ALLOC); + if (rc) + return rc; + + req->flags = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_PATH_RX); if (!BNXT_CHIP_TYPE_NITRO_A0(bp)) - req.flags |= + req->flags |= cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_FLAGS_OUTERMOST); - req.dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id); - req.enables = + req->dst_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_vnic_id); + req->enables = cpu_to_le32(CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR | CFA_L2_FILTER_ALLOC_REQ_ENABLES_DST_ID | CFA_L2_FILTER_ALLOC_REQ_ENABLES_L2_ADDR_MASK); - memcpy(req.l2_addr, mac_addr, ETH_ALEN); - req.l2_addr_mask[0] = 0xff; - req.l2_addr_mask[1] = 0xff; - req.l2_addr_mask[2] = 0xff; - req.l2_addr_mask[3] = 0xff; - req.l2_addr_mask[4] = 0xff; - req.l2_addr_mask[5] = 0xff; - - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + memcpy(req->l2_addr, mac_addr, ETH_ALEN); + req->l2_addr_mask[0] = 0xff; + req->l2_addr_mask[1] = 0xff; + req->l2_addr_mask[2] = 0xff; + req->l2_addr_mask[3] = 0xff; + req->l2_addr_mask[4] = 0xff; + req->l2_addr_mask[5] = 0xff; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) bp->vnic_info[vnic_id].fw_l2_filter_id[idx] = resp->l2_filter_id; - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } static int bnxt_hwrm_clear_vnic_filter(struct bnxt *bp) { + struct hwrm_cfa_l2_filter_free_input *req; u16 i, j, num_of_vnics = 1; /* only vnic 0 supported */ - int rc = 0; + int rc; /* Any associated ntuple filters will also be cleared by firmware. */ - mutex_lock(&bp->hwrm_cmd_lock); + rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE); + if (rc) + return rc; + hwrm_req_hold(bp, req); for (i = 0; i < num_of_vnics; i++) { struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; for (j = 0; j < vnic->uc_filter_count; j++) { - struct hwrm_cfa_l2_filter_free_input req = {0}; - - bnxt_hwrm_cmd_hdr_init(bp, &req, - HWRM_CFA_L2_FILTER_FREE, -1, -1); + req->l2_filter_id = vnic->fw_l2_filter_id[j]; - req.l2_filter_id = vnic->fw_l2_filter_id[j]; - - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); } vnic->uc_filter_count = 0; } - mutex_unlock(&bp->hwrm_cmd_lock); - + hwrm_req_drop(bp, req); return rc; } @@ -5091,12 +4892,15 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) { struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; u16 max_aggs = VNIC_TPA_CFG_REQ_MAX_AGGS_MAX; - struct hwrm_vnic_tpa_cfg_input req = {0}; + struct hwrm_vnic_tpa_cfg_input *req; + int rc; if (vnic->fw_vnic_id == INVALID_HW_RING_ID) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_TPA_CFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_VNIC_TPA_CFG); + if (rc) + return rc; if (tpa_flags) { u16 mss = bp->dev->mtu - 40; @@ -5110,9 +4914,9 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) if (tpa_flags & BNXT_FLAG_GRO) flags |= VNIC_TPA_CFG_REQ_FLAGS_GRO; - req.flags = cpu_to_le32(flags); + req->flags = cpu_to_le32(flags); - req.enables = + req->enables = cpu_to_le32(VNIC_TPA_CFG_REQ_ENABLES_MAX_AGG_SEGS | VNIC_TPA_CFG_REQ_ENABLES_MAX_AGGS | VNIC_TPA_CFG_REQ_ENABLES_MIN_AGG_LEN); @@ -5136,14 +4940,14 @@ static int bnxt_hwrm_vnic_set_tpa(struct bnxt *bp, u16 vnic_id, u32 tpa_flags) } else { segs = ilog2(nsegs); } - req.max_agg_segs = cpu_to_le16(segs); - req.max_aggs = cpu_to_le16(max_aggs); + req->max_agg_segs = cpu_to_le16(segs); + req->max_aggs = cpu_to_le16(max_aggs); - req.min_agg_len = cpu_to_le32(512); + req->min_agg_len = cpu_to_le32(512); } - req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); + req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req); } static u16 bnxt_cp_ring_from_grp(struct bnxt *bp, struct bnxt_ring_struct *ring) @@ -5287,86 +5091,102 @@ static void bnxt_fill_hw_rss_tbl(struct bnxt *bp, struct bnxt_vnic_info *vnic) static int bnxt_hwrm_vnic_set_rss(struct bnxt *bp, u16 vnic_id, bool set_rss) { struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; - struct hwrm_vnic_rss_cfg_input req = {0}; + struct hwrm_vnic_rss_cfg_input *req; + int rc; if ((bp->flags & BNXT_FLAG_CHIP_P5) || vnic->fw_rss_cos_lb_ctx[0] == INVALID_HW_RING_ID) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG); + if (rc) + return rc; + if (set_rss) { bnxt_fill_hw_rss_tbl(bp, vnic); - req.hash_type = cpu_to_le32(bp->rss_hash_cfg); - req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; - req.ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); - req.hash_key_tbl_addr = + req->hash_type = cpu_to_le32(bp->rss_hash_cfg); + req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; + req->ring_grp_tbl_addr = cpu_to_le64(vnic->rss_table_dma_addr); + req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); } - req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); + return hwrm_req_send(bp, req); } static int bnxt_hwrm_vnic_set_rss_p5(struct bnxt *bp, u16 vnic_id, bool set_rss) { struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; - struct hwrm_vnic_rss_cfg_input req = {0}; + struct hwrm_vnic_rss_cfg_input *req; dma_addr_t ring_tbl_map; u32 i, nr_ctxs; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_CFG); + if (rc) + return rc; + + req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); + if (!set_rss) + return hwrm_req_send(bp, req); - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_CFG, -1, -1); - req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); - if (!set_rss) { - hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - return 0; - } bnxt_fill_hw_rss_tbl(bp, vnic); - req.hash_type = cpu_to_le32(bp->rss_hash_cfg); - req.hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; - req.hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); + req->hash_type = cpu_to_le32(bp->rss_hash_cfg); + req->hash_mode_flags = VNIC_RSS_CFG_REQ_HASH_MODE_FLAGS_DEFAULT; + req->hash_key_tbl_addr = cpu_to_le64(vnic->rss_hash_key_dma_addr); ring_tbl_map = vnic->rss_table_dma_addr; nr_ctxs = bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings); - for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) { - int rc; - req.ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map); - req.ring_table_pair_index = i; - req.rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + hwrm_req_hold(bp, req); + for (i = 0; i < nr_ctxs; ring_tbl_map += BNXT_RSS_TABLE_SIZE_P5, i++) { + req->ring_grp_tbl_addr = cpu_to_le64(ring_tbl_map); + req->ring_table_pair_index = i; + req->rss_ctx_idx = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[i]); + rc = hwrm_req_send(bp, req); if (rc) - return rc; + goto exit; } - return 0; + +exit: + hwrm_req_drop(bp, req); + return rc; } static int bnxt_hwrm_vnic_set_hds(struct bnxt *bp, u16 vnic_id) { struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; - struct hwrm_vnic_plcmodes_cfg_input req = {0}; + struct hwrm_vnic_plcmodes_cfg_input *req; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_PLCMODES_CFG, -1, -1); - req.flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT | - VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | - VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); - req.enables = + rc = hwrm_req_init(bp, req, HWRM_VNIC_PLCMODES_CFG); + if (rc) + return rc; + + req->flags = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_FLAGS_JUMBO_PLACEMENT | + VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV4 | + VNIC_PLCMODES_CFG_REQ_FLAGS_HDS_IPV6); + req->enables = cpu_to_le32(VNIC_PLCMODES_CFG_REQ_ENABLES_JUMBO_THRESH_VALID | VNIC_PLCMODES_CFG_REQ_ENABLES_HDS_THRESHOLD_VALID); /* thresholds not implemented in firmware yet */ - req.jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); - req.hds_threshold = cpu_to_le16(bp->rx_copy_thresh); - req.vnic_id = cpu_to_le32(vnic->fw_vnic_id); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->jumbo_thresh = cpu_to_le16(bp->rx_copy_thresh); + req->hds_threshold = cpu_to_le16(bp->rx_copy_thresh); + req->vnic_id = cpu_to_le32(vnic->fw_vnic_id); + return hwrm_req_send(bp, req); } static void bnxt_hwrm_vnic_ctx_free_one(struct bnxt *bp, u16 vnic_id, u16 ctx_idx) { - struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {0}; + struct hwrm_vnic_rss_cos_lb_ctx_free_input *req; + + if (hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_FREE)) + return; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_FREE, -1, -1); - req.rss_cos_lb_ctx_id = + req->rss_cos_lb_ctx_id = cpu_to_le16(bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx]); - hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + hwrm_req_send(bp, req); bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = INVALID_HW_RING_ID; } @@ -5387,20 +5207,20 @@ static void bnxt_hwrm_vnic_ctx_free(struct bnxt *bp) static int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, u16 vnic_id, u16 ctx_idx) { + struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp; + struct hwrm_vnic_rss_cos_lb_ctx_alloc_input *req; int rc; - struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {0}; - struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp = - bp->hwrm_cmd_resp_addr; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC, -1, - -1); + rc = hwrm_req_init(bp, req, HWRM_VNIC_RSS_COS_LB_CTX_ALLOC); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) bp->vnic_info[vnic_id].fw_rss_cos_lb_ctx[ctx_idx] = le16_to_cpu(resp->rss_cos_lb_ctx_id); - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -5414,47 +5234,50 @@ static u32 bnxt_get_roce_vnic_mode(struct bnxt *bp) int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) { - unsigned int ring = 0, grp_idx; struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; - struct hwrm_vnic_cfg_input req = {0}; + struct hwrm_vnic_cfg_input *req; + unsigned int ring = 0, grp_idx; u16 def_vlan = 0; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_CFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_VNIC_CFG); + if (rc) + return rc; if (bp->flags & BNXT_FLAG_CHIP_P5) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[0]; - req.default_rx_ring_id = + req->default_rx_ring_id = cpu_to_le16(rxr->rx_ring_struct.fw_ring_id); - req.default_cmpl_ring_id = + req->default_cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, rxr)); - req.enables = + req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DEFAULT_RX_RING_ID | VNIC_CFG_REQ_ENABLES_DEFAULT_CMPL_RING_ID); goto vnic_mru; } - req.enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP); + req->enables = cpu_to_le32(VNIC_CFG_REQ_ENABLES_DFLT_RING_GRP); /* Only RSS support for now TBD: COS & LB */ if (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID) { - req.rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); - req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | + req->rss_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[0]); + req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | VNIC_CFG_REQ_ENABLES_MRU); } else if (vnic->flags & BNXT_VNIC_RFS_NEW_RSS_FLAG) { - req.rss_rule = + req->rss_rule = cpu_to_le16(bp->vnic_info[0].fw_rss_cos_lb_ctx[0]); - req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | + req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_RSS_RULE | VNIC_CFG_REQ_ENABLES_MRU); - req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE); + req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_RSS_DFLT_CR_MODE); } else { - req.rss_rule = cpu_to_le16(0xffff); + req->rss_rule = cpu_to_le16(0xffff); } if (BNXT_CHIP_TYPE_NITRO_A0(bp) && (vnic->fw_rss_cos_lb_ctx[0] != INVALID_HW_RING_ID)) { - req.cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]); - req.enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE); + req->cos_rule = cpu_to_le16(vnic->fw_rss_cos_lb_ctx[1]); + req->enables |= cpu_to_le32(VNIC_CFG_REQ_ENABLES_COS_RULE); } else { - req.cos_rule = cpu_to_le16(0xffff); + req->cos_rule = cpu_to_le16(0xffff); } if (vnic->flags & BNXT_VNIC_RSS_FLAG) @@ -5465,34 +5288,36 @@ int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id) ring = bp->rx_nr_rings - 1; grp_idx = bp->rx_ring[ring].bnapi->index; - req.dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); - req.lb_rule = cpu_to_le16(0xffff); + req->dflt_ring_grp = cpu_to_le16(bp->grp_info[grp_idx].fw_grp_id); + req->lb_rule = cpu_to_le16(0xffff); vnic_mru: - req.mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN); + req->mru = cpu_to_le16(bp->dev->mtu + ETH_HLEN + VLAN_HLEN); - req.vnic_id = cpu_to_le16(vnic->fw_vnic_id); + req->vnic_id = cpu_to_le16(vnic->fw_vnic_id); #ifdef CONFIG_BNXT_SRIOV if (BNXT_VF(bp)) def_vlan = bp->vf.vlan; #endif if ((bp->flags & BNXT_FLAG_STRIP_VLAN) || def_vlan) - req.flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); + req->flags |= cpu_to_le32(VNIC_CFG_REQ_FLAGS_VLAN_STRIP_MODE); if (!vnic_id && bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) - req.flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp)); + req->flags |= cpu_to_le32(bnxt_get_roce_vnic_mode(bp)); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req); } static void bnxt_hwrm_vnic_free_one(struct bnxt *bp, u16 vnic_id) { if (bp->vnic_info[vnic_id].fw_vnic_id != INVALID_HW_RING_ID) { - struct hwrm_vnic_free_input req = {0}; + struct hwrm_vnic_free_input *req; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_FREE, -1, -1); - req.vnic_id = + if (hwrm_req_init(bp, req, HWRM_VNIC_FREE)) + return; + + req->vnic_id = cpu_to_le32(bp->vnic_info[vnic_id].fw_vnic_id); - hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + hwrm_req_send(bp, req); bp->vnic_info[vnic_id].fw_vnic_id = INVALID_HW_RING_ID; } } @@ -5509,11 +5334,15 @@ static int bnxt_hwrm_vnic_alloc(struct bnxt *bp, u16 vnic_id, unsigned int start_rx_ring_idx, unsigned int nr_rings) { - int rc = 0; unsigned int i, j, grp_idx, end_idx = start_rx_ring_idx + nr_rings; - struct hwrm_vnic_alloc_input req = {0}; - struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr; struct bnxt_vnic_info *vnic = &bp->vnic_info[vnic_id]; + struct hwrm_vnic_alloc_output *resp; + struct hwrm_vnic_alloc_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_VNIC_ALLOC); + if (rc) + return rc; if (bp->flags & BNXT_FLAG_CHIP_P5) goto vnic_no_ring_grps; @@ -5533,22 +5362,20 @@ vnic_no_ring_grps: for (i = 0; i < BNXT_MAX_CTX_PER_VNIC; i++) vnic->fw_rss_cos_lb_ctx[i] = INVALID_HW_RING_ID; if (vnic_id == 0) - req.flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); + req->flags = cpu_to_le32(VNIC_ALLOC_REQ_FLAGS_DEFAULT); - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_ALLOC, -1, -1); - - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) vnic->fw_vnic_id = le32_to_cpu(resp->vnic_id); - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) { - struct hwrm_vnic_qcaps_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_vnic_qcaps_input req = {0}; + struct hwrm_vnic_qcaps_output *resp; + struct hwrm_vnic_qcaps_input *req; int rc; bp->hw_ring_stats_size = sizeof(struct ctx_hw_stats); @@ -5556,9 +5383,12 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) if (bp->hwrm_spec_code < 0x10600) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VNIC_QCAPS, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_VNIC_QCAPS); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) { u32 flags = le32_to_cpu(resp->flags); @@ -5584,92 +5414,96 @@ static int bnxt_hwrm_vnic_qcaps(struct bnxt *bp) bp->hw_ring_stats_size = BNXT_RING_STATS_SIZE_P5_SR2; } } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } static int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp) { + struct hwrm_ring_grp_alloc_output *resp; + struct hwrm_ring_grp_alloc_input *req; + int rc; u16 i; - u32 rc = 0; if (bp->flags & BNXT_FLAG_CHIP_P5) return 0; - mutex_lock(&bp->hwrm_cmd_lock); + rc = hwrm_req_init(bp, req, HWRM_RING_GRP_ALLOC); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); for (i = 0; i < bp->rx_nr_rings; i++) { - struct hwrm_ring_grp_alloc_input req = {0}; - struct hwrm_ring_grp_alloc_output *resp = - bp->hwrm_cmd_resp_addr; unsigned int grp_idx = bp->rx_ring[i].bnapi->index; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_ALLOC, -1, -1); + req->cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); + req->rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id); + req->ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id); + req->sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx); - req.cr = cpu_to_le16(bp->grp_info[grp_idx].cp_fw_ring_id); - req.rr = cpu_to_le16(bp->grp_info[grp_idx].rx_fw_ring_id); - req.ar = cpu_to_le16(bp->grp_info[grp_idx].agg_fw_ring_id); - req.sc = cpu_to_le16(bp->grp_info[grp_idx].fw_stats_ctx); + rc = hwrm_req_send(bp, req); - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); if (rc) break; bp->grp_info[grp_idx].fw_grp_id = le32_to_cpu(resp->ring_group_id); } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } static void bnxt_hwrm_ring_grp_free(struct bnxt *bp) { + struct hwrm_ring_grp_free_input *req; u16 i; - struct hwrm_ring_grp_free_input req = {0}; if (!bp->grp_info || (bp->flags & BNXT_FLAG_CHIP_P5)) return; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_GRP_FREE, -1, -1); + if (hwrm_req_init(bp, req, HWRM_RING_GRP_FREE)) + return; - mutex_lock(&bp->hwrm_cmd_lock); + hwrm_req_hold(bp, req); for (i = 0; i < bp->cp_nr_rings; i++) { if (bp->grp_info[i].fw_grp_id == INVALID_HW_RING_ID) continue; - req.ring_group_id = + req->ring_group_id = cpu_to_le32(bp->grp_info[i].fw_grp_id); - _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + hwrm_req_send(bp, req); bp->grp_info[i].fw_grp_id = INVALID_HW_RING_ID; } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); } static int hwrm_ring_alloc_send_msg(struct bnxt *bp, struct bnxt_ring_struct *ring, u32 ring_type, u32 map_index) { - int rc = 0, err = 0; - struct hwrm_ring_alloc_input req = {0}; - struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_ring_alloc_output *resp; + struct hwrm_ring_alloc_input *req; struct bnxt_ring_mem_info *rmem = &ring->ring_mem; struct bnxt_ring_grp_info *grp_info; + int rc, err = 0; u16 ring_id; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_ALLOC, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_RING_ALLOC); + if (rc) + goto exit; - req.enables = 0; + req->enables = 0; if (rmem->nr_pages > 1) { - req.page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map); + req->page_tbl_addr = cpu_to_le64(rmem->pg_tbl_map); /* Page size is in log2 units */ - req.page_size = BNXT_PAGE_SHIFT; - req.page_tbl_depth = 1; + req->page_size = BNXT_PAGE_SHIFT; + req->page_tbl_depth = 1; } else { - req.page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]); + req->page_tbl_addr = cpu_to_le64(rmem->dma_arr[0]); } - req.fbo = 0; + req->fbo = 0; /* Association of ring index with doorbell index and MSIX number */ - req.logical_id = cpu_to_le16(map_index); + req->logical_id = cpu_to_le16(map_index); switch (ring_type) { case HWRM_RING_ALLOC_TX: { @@ -5677,67 +5511,67 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, txr = container_of(ring, struct bnxt_tx_ring_info, tx_ring_struct); - req.ring_type = RING_ALLOC_REQ_RING_TYPE_TX; + req->ring_type = RING_ALLOC_REQ_RING_TYPE_TX; /* Association of transmit ring with completion ring */ grp_info = &bp->grp_info[ring->grp_idx]; - req.cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr)); - req.length = cpu_to_le32(bp->tx_ring_mask + 1); - req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); - req.queue_id = cpu_to_le16(ring->queue_id); + req->cmpl_ring_id = cpu_to_le16(bnxt_cp_ring_for_tx(bp, txr)); + req->length = cpu_to_le32(bp->tx_ring_mask + 1); + req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); + req->queue_id = cpu_to_le16(ring->queue_id); break; } case HWRM_RING_ALLOC_RX: - req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; - req.length = cpu_to_le32(bp->rx_ring_mask + 1); + req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX; + req->length = cpu_to_le32(bp->rx_ring_mask + 1); if (bp->flags & BNXT_FLAG_CHIP_P5) { u16 flags = 0; /* Association of rx ring with stats context */ grp_info = &bp->grp_info[ring->grp_idx]; - req.rx_buf_size = cpu_to_le16(bp->rx_buf_use_size); - req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); - req.enables |= cpu_to_le32( + req->rx_buf_size = cpu_to_le16(bp->rx_buf_use_size); + req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); + req->enables |= cpu_to_le32( RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); if (NET_IP_ALIGN == 2) flags = RING_ALLOC_REQ_FLAGS_RX_SOP_PAD; - req.flags = cpu_to_le16(flags); + req->flags = cpu_to_le16(flags); } break; case HWRM_RING_ALLOC_AGG: if (bp->flags & BNXT_FLAG_CHIP_P5) { - req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG; + req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX_AGG; /* Association of agg ring with rx ring */ grp_info = &bp->grp_info[ring->grp_idx]; - req.rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id); - req.rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE); - req.stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); - req.enables |= cpu_to_le32( + req->rx_ring_id = cpu_to_le16(grp_info->rx_fw_ring_id); + req->rx_buf_size = cpu_to_le16(BNXT_RX_PAGE_SIZE); + req->stat_ctx_id = cpu_to_le32(grp_info->fw_stats_ctx); + req->enables |= cpu_to_le32( RING_ALLOC_REQ_ENABLES_RX_RING_ID_VALID | RING_ALLOC_REQ_ENABLES_RX_BUF_SIZE_VALID); } else { - req.ring_type = RING_ALLOC_REQ_RING_TYPE_RX; + req->ring_type = RING_ALLOC_REQ_RING_TYPE_RX; } - req.length = cpu_to_le32(bp->rx_agg_ring_mask + 1); + req->length = cpu_to_le32(bp->rx_agg_ring_mask + 1); break; case HWRM_RING_ALLOC_CMPL: - req.ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; - req.length = cpu_to_le32(bp->cp_ring_mask + 1); + req->ring_type = RING_ALLOC_REQ_RING_TYPE_L2_CMPL; + req->length = cpu_to_le32(bp->cp_ring_mask + 1); if (bp->flags & BNXT_FLAG_CHIP_P5) { /* Association of cp ring with nq */ grp_info = &bp->grp_info[map_index]; - req.nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); - req.cq_handle = cpu_to_le64(ring->handle); - req.enables |= cpu_to_le32( + req->nq_ring_id = cpu_to_le16(grp_info->cp_fw_ring_id); + req->cq_handle = cpu_to_le64(ring->handle); + req->enables |= cpu_to_le32( RING_ALLOC_REQ_ENABLES_NQ_RING_ID_VALID); } else if (bp->flags & BNXT_FLAG_USING_MSIX) { - req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; + req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; } break; case HWRM_RING_ALLOC_NQ: - req.ring_type = RING_ALLOC_REQ_RING_TYPE_NQ; - req.length = cpu_to_le32(bp->cp_ring_mask + 1); + req->ring_type = RING_ALLOC_REQ_RING_TYPE_NQ; + req->length = cpu_to_le32(bp->cp_ring_mask + 1); if (bp->flags & BNXT_FLAG_USING_MSIX) - req.int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; + req->int_mode = RING_ALLOC_REQ_INT_MODE_MSIX; break; default: netdev_err(bp->dev, "hwrm alloc invalid ring type %d\n", @@ -5745,12 +5579,13 @@ static int hwrm_ring_alloc_send_msg(struct bnxt *bp, return -1; } - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); err = le16_to_cpu(resp->error_code); ring_id = le16_to_cpu(resp->ring_id); - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); +exit: if (rc || err) { netdev_err(bp->dev, "hwrm_ring_alloc type %d failed. rc:%x err:%x\n", ring_type, rc, err); @@ -5765,23 +5600,28 @@ static int bnxt_hwrm_set_async_event_cr(struct bnxt *bp, int idx) int rc; if (BNXT_PF(bp)) { - struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_input *req; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(0xffff); - req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR); - req.async_event_cr = cpu_to_le16(idx); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(0xffff); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ASYNC_EVENT_CR); + req->async_event_cr = cpu_to_le16(idx); + return hwrm_req_send(bp, req); } else { - struct hwrm_func_vf_cfg_input req = {0}; + struct hwrm_func_vf_cfg_input *req; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); - req.enables = + rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG); + if (rc) + return rc; + + req->enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_ASYNC_EVENT_CR); - req.async_event_cr = cpu_to_le16(idx); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->async_event_cr = cpu_to_le16(idx); + return hwrm_req_send(bp, req); } - return rc; } static void bnxt_set_db(struct bnxt *bp, struct bnxt_db_info *db, u32 ring_type, @@ -5952,23 +5792,27 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp, struct bnxt_ring_struct *ring, u32 ring_type, int cmpl_ring_id) { + struct hwrm_ring_free_output *resp; + struct hwrm_ring_free_input *req; + u16 error_code = 0; int rc; - struct hwrm_ring_free_input req = {0}; - struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; - u16 error_code; if (BNXT_NO_FW_ACCESS(bp)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1); - req.ring_type = ring_type; - req.ring_id = cpu_to_le16(ring->fw_ring_id); + rc = hwrm_req_init(bp, req, HWRM_RING_FREE); + if (rc) + goto exit; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - error_code = le16_to_cpu(resp->error_code); - mutex_unlock(&bp->hwrm_cmd_lock); + req->cmpl_ring = cpu_to_le16(cmpl_ring_id); + req->ring_type = ring_type; + req->ring_id = cpu_to_le16(ring->fw_ring_id); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + error_code = le16_to_cpu(resp->error_code); + hwrm_req_drop(bp, req); +exit: if (rc || error_code) { netdev_err(bp->dev, "hwrm_ring_free type %d failed. rc:%x err:%x\n", ring_type, rc, error_code); @@ -6083,20 +5927,23 @@ static int bnxt_trim_rings(struct bnxt *bp, int *rx, int *tx, int max, static int bnxt_hwrm_get_rings(struct bnxt *bp) { - struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; struct bnxt_hw_resc *hw_resc = &bp->hw_resc; - struct hwrm_func_qcfg_input req = {0}; + struct hwrm_func_qcfg_output *resp; + struct hwrm_func_qcfg_input *req; int rc; if (bp->hwrm_spec_code < 0x10601) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); - req.fid = cpu_to_le16(0xffff); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(0xffff); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) { - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -6130,39 +5977,45 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp) hw_resc->resv_cp_rings = cp; hw_resc->resv_stat_ctxs = stats; } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return 0; } -/* Caller must hold bp->hwrm_cmd_lock */ int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings) { - struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_func_qcfg_input req = {0}; + struct hwrm_func_qcfg_output *resp; + struct hwrm_func_qcfg_input *req; int rc; if (bp->hwrm_spec_code < 0x10601) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); - req.fid = cpu_to_le16(fid); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(fid); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) *tx_rings = le16_to_cpu(resp->alloc_tx_rings); + hwrm_req_drop(bp, req); return rc; } static bool bnxt_rfs_supported(struct bnxt *bp); -static void -__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req, - int tx_rings, int rx_rings, int ring_grps, - int cp_rings, int stats, int vnics) +static struct hwrm_func_cfg_input * +__bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, + int ring_grps, int cp_rings, int stats, int vnics) { + struct hwrm_func_cfg_input *req; u32 enables = 0; - bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_CFG, -1, -1); + if (hwrm_req_init(bp, req, HWRM_FUNC_CFG)) + return NULL; + req->fid = cpu_to_le16(0xffff); enables |= tx_rings ? FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; req->num_tx_rings = cpu_to_le16(tx_rings); @@ -6203,17 +6056,19 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req, req->num_vnics = cpu_to_le16(vnics); } req->enables = cpu_to_le32(enables); + return req; } -static void -__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, - struct hwrm_func_vf_cfg_input *req, int tx_rings, - int rx_rings, int ring_grps, int cp_rings, - int stats, int vnics) +static struct hwrm_func_vf_cfg_input * +__bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, + int ring_grps, int cp_rings, int stats, int vnics) { + struct hwrm_func_vf_cfg_input *req; u32 enables = 0; - bnxt_hwrm_cmd_hdr_init(bp, req, HWRM_FUNC_VF_CFG, -1, -1); + if (hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG)) + return NULL; + enables |= tx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_TX_RINGS : 0; enables |= rx_rings ? FUNC_VF_CFG_REQ_ENABLES_NUM_RX_RINGS | FUNC_VF_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS : 0; @@ -6245,21 +6100,27 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, req->num_vnics = cpu_to_le16(vnics); req->enables = cpu_to_le32(enables); + return req; } static int bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, int ring_grps, int cp_rings, int stats, int vnics) { - struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_input *req; int rc; - __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, - cp_rings, stats, vnics); - if (!req.enables) + req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps, + cp_rings, stats, vnics); + if (!req) + return -ENOMEM; + + if (!req->enables) { + hwrm_req_drop(bp, req); return 0; + } - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); if (rc) return rc; @@ -6273,7 +6134,7 @@ static int bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, int ring_grps, int cp_rings, int stats, int vnics) { - struct hwrm_func_vf_cfg_input req = {0}; + struct hwrm_func_vf_cfg_input *req; int rc; if (!BNXT_NEW_RM(bp)) { @@ -6281,9 +6142,12 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, return 0; } - __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, - cp_rings, stats, vnics); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps, + cp_rings, stats, vnics); + if (!req) + return -ENOMEM; + + rc = hwrm_req_send(bp, req); if (rc) return rc; @@ -6484,14 +6348,14 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, int ring_grps, int cp_rings, int stats, int vnics) { - struct hwrm_func_vf_cfg_input req = {0}; + struct hwrm_func_vf_cfg_input *req; u32 flags; if (!BNXT_NEW_RM(bp)) return 0; - __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, - cp_rings, stats, vnics); + req = __bnxt_hwrm_reserve_vf_rings(bp, tx_rings, rx_rings, ring_grps, + cp_rings, stats, vnics); flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | @@ -6501,20 +6365,19 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, if (!(bp->flags & BNXT_FLAG_CHIP_P5)) flags |= FUNC_VF_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; - req.flags = cpu_to_le32(flags); - return hwrm_send_message_silent(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + req->flags = cpu_to_le32(flags); + return hwrm_req_send_silent(bp, req); } static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, int ring_grps, int cp_rings, int stats, int vnics) { - struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_input *req; u32 flags; - __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, - cp_rings, stats, vnics); + req = __bnxt_hwrm_reserve_pf_rings(bp, tx_rings, rx_rings, ring_grps, + cp_rings, stats, vnics); flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; if (BNXT_NEW_RM(bp)) { flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | @@ -6528,9 +6391,8 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, flags |= FUNC_CFG_REQ_FLAGS_RING_GRP_ASSETS_TEST; } - req.flags = cpu_to_le32(flags); - return hwrm_send_message_silent(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + req->flags = cpu_to_le32(flags); + return hwrm_req_send_silent(bp, req); } static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, @@ -6551,9 +6413,9 @@ static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) { - struct hwrm_ring_aggint_qcaps_output *resp = bp->hwrm_cmd_resp_addr; struct bnxt_coal_cap *coal_cap = &bp->coal_cap; - struct hwrm_ring_aggint_qcaps_input req = {0}; + struct hwrm_ring_aggint_qcaps_output *resp; + struct hwrm_ring_aggint_qcaps_input *req; int rc; coal_cap->cmpl_params = BNXT_LEGACY_COAL_CMPL_PARAMS; @@ -6569,9 +6431,11 @@ static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) if (bp->hwrm_spec_code < 0x10902) return; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_AGGINT_QCAPS, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (hwrm_req_init(bp, req, HWRM_RING_AGGINT_QCAPS)) + return; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); if (!rc) { coal_cap->cmpl_params = le32_to_cpu(resp->cmpl_params); coal_cap->nq_params = le32_to_cpu(resp->nq_params); @@ -6591,7 +6455,7 @@ static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) le16_to_cpu(resp->num_cmpl_aggr_int_max); coal_cap->timer_units = le16_to_cpu(resp->timer_units); } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); } static u16 bnxt_usec_to_coal_tmr(struct bnxt *bp, u16 usec) @@ -6659,37 +6523,40 @@ static void bnxt_hwrm_set_coal_params(struct bnxt *bp, req->enables |= cpu_to_le16(BNXT_COAL_CMPL_ENABLES); } -/* Caller holds bp->hwrm_cmd_lock */ static int __bnxt_hwrm_set_coal_nq(struct bnxt *bp, struct bnxt_napi *bnapi, struct bnxt_coal *hw_coal) { - struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req = {0}; + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; struct bnxt_coal_cap *coal_cap = &bp->coal_cap; u32 nq_params = coal_cap->nq_params; u16 tmr; + int rc; if (!(nq_params & RING_AGGINT_QCAPS_RESP_NQ_PARAMS_INT_LAT_TMR_MIN)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, - -1, -1); - req.ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id); - req.flags = + rc = hwrm_req_init(bp, req, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); + if (rc) + return rc; + + req->ring_id = cpu_to_le16(cpr->cp_ring_struct.fw_ring_id); + req->flags = cpu_to_le16(RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_IS_NQ); tmr = bnxt_usec_to_coal_tmr(bp, hw_coal->coal_ticks) / 2; tmr = clamp_t(u16, tmr, 1, coal_cap->int_lat_tmr_min_max); - req.int_lat_tmr_min = cpu_to_le16(tmr); - req.enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); - return _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->int_lat_tmr_min = cpu_to_le16(tmr); + req->enables |= cpu_to_le16(BNXT_COAL_CMPL_MIN_TMR_ENABLE); + return hwrm_req_send(bp, req); } int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi) { - struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}; + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; struct bnxt_coal coal; + int rc; /* Tick values in micro seconds. * 1 coal_buf x bufs_per_record = 1 completion record. @@ -6702,48 +6569,53 @@ int bnxt_hwrm_set_ring_coal(struct bnxt *bp, struct bnxt_napi *bnapi) if (!bnapi->rx_ring) return -ENODEV; - bnxt_hwrm_cmd_hdr_init(bp, &req_rx, - HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); + rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); + if (rc) + return rc; - bnxt_hwrm_set_coal_params(bp, &coal, &req_rx); + bnxt_hwrm_set_coal_params(bp, &coal, req_rx); - req_rx.ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring)); + req_rx->ring_id = cpu_to_le16(bnxt_cp_ring_for_rx(bp, bnapi->rx_ring)); - return hwrm_send_message(bp, &req_rx, sizeof(req_rx), - HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req_rx); } int bnxt_hwrm_set_coal(struct bnxt *bp) { - int i, rc = 0; - struct hwrm_ring_cmpl_ring_cfg_aggint_params_input req_rx = {0}, - req_tx = {0}, *req; + struct hwrm_ring_cmpl_ring_cfg_aggint_params_input *req_rx, *req_tx, + *req; + int i, rc; - bnxt_hwrm_cmd_hdr_init(bp, &req_rx, - HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); - bnxt_hwrm_cmd_hdr_init(bp, &req_tx, - HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS, -1, -1); + rc = hwrm_req_init(bp, req_rx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); + if (rc) + return rc; + + rc = hwrm_req_init(bp, req_tx, HWRM_RING_CMPL_RING_CFG_AGGINT_PARAMS); + if (rc) { + hwrm_req_drop(bp, req_rx); + return rc; + } - bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, &req_rx); - bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, &req_tx); + bnxt_hwrm_set_coal_params(bp, &bp->rx_coal, req_rx); + bnxt_hwrm_set_coal_params(bp, &bp->tx_coal, req_tx); - mutex_lock(&bp->hwrm_cmd_lock); + hwrm_req_hold(bp, req_rx); + hwrm_req_hold(bp, req_tx); for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_coal *hw_coal; u16 ring_id; - req = &req_rx; + req = req_rx; if (!bnapi->rx_ring) { ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring); - req = &req_tx; + req = req_tx; } else { ring_id = bnxt_cp_ring_for_rx(bp, bnapi->rx_ring); } req->ring_id = cpu_to_le16(ring_id); - rc = _hwrm_send_message(bp, req, sizeof(*req), - HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); if (rc) break; @@ -6751,11 +6623,10 @@ int bnxt_hwrm_set_coal(struct bnxt *bp) continue; if (bnapi->rx_ring && bnapi->tx_ring) { - req = &req_tx; + req = req_tx; ring_id = bnxt_cp_ring_for_tx(bp, bnapi->tx_ring); req->ring_id = cpu_to_le16(ring_id); - rc = _hwrm_send_message(bp, req, sizeof(*req), - HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); if (rc) break; } @@ -6765,14 +6636,15 @@ int bnxt_hwrm_set_coal(struct bnxt *bp) hw_coal = &bp->tx_coal; __bnxt_hwrm_set_coal_nq(bp, bnapi, hw_coal); } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req_rx); + hwrm_req_drop(bp, req_tx); return rc; } static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp) { - struct hwrm_stat_ctx_clr_stats_input req0 = {0}; - struct hwrm_stat_ctx_free_input req = {0}; + struct hwrm_stat_ctx_clr_stats_input *req0 = NULL; + struct hwrm_stat_ctx_free_input *req; int i; if (!bp->bnapi) @@ -6781,53 +6653,60 @@ static void bnxt_hwrm_stat_ctx_free(struct bnxt *bp) if (BNXT_CHIP_TYPE_NITRO_A0(bp)) return; - bnxt_hwrm_cmd_hdr_init(bp, &req0, HWRM_STAT_CTX_CLR_STATS, -1, -1); - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_FREE, -1, -1); - - mutex_lock(&bp->hwrm_cmd_lock); + if (hwrm_req_init(bp, req, HWRM_STAT_CTX_FREE)) + return; + if (BNXT_FW_MAJ(bp) <= 20) { + if (hwrm_req_init(bp, req0, HWRM_STAT_CTX_CLR_STATS)) { + hwrm_req_drop(bp, req); + return; + } + hwrm_req_hold(bp, req0); + } + hwrm_req_hold(bp, req); for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; if (cpr->hw_stats_ctx_id != INVALID_STATS_CTX_ID) { - req.stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); - if (BNXT_FW_MAJ(bp) <= 20) { - req0.stat_ctx_id = req.stat_ctx_id; - _hwrm_send_message(bp, &req0, sizeof(req0), - HWRM_CMD_TIMEOUT); + req->stat_ctx_id = cpu_to_le32(cpr->hw_stats_ctx_id); + if (req0) { + req0->stat_ctx_id = req->stat_ctx_id; + hwrm_req_send(bp, req0); } - _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + hwrm_req_send(bp, req); cpr->hw_stats_ctx_id = INVALID_STATS_CTX_ID; } } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); + if (req0) + hwrm_req_drop(bp, req0); } static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) { - int rc = 0, i; - struct hwrm_stat_ctx_alloc_input req = {0}; - struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_stat_ctx_alloc_output *resp; + struct hwrm_stat_ctx_alloc_input *req; + int rc, i; if (BNXT_CHIP_TYPE_NITRO_A0(bp)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_STAT_CTX_ALLOC, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_STAT_CTX_ALLOC); + if (rc) + return rc; - req.stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size); - req.update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); + req->stats_dma_length = cpu_to_le16(bp->hw_ring_stats_size); + req->update_period_ms = cpu_to_le32(bp->stats_coal_ticks / 1000); - mutex_lock(&bp->hwrm_cmd_lock); + resp = hwrm_req_hold(bp, req); for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; - req.stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map); + req->stats_dma_addr = cpu_to_le64(cpr->stats.hw_stats_map); - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); if (rc) break; @@ -6835,22 +6714,25 @@ static int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp) bp->grp_info[i].fw_stats_ctx = cpr->hw_stats_ctx_id; } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } static int bnxt_hwrm_func_qcfg(struct bnxt *bp) { - struct hwrm_func_qcfg_input req = {0}; - struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_qcfg_output *resp; + struct hwrm_func_qcfg_input *req; u32 min_db_offset = 0; u16 flags; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); - req.fid = cpu_to_le16(0xffff); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(0xffff); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) goto func_qcfg_exit; @@ -6910,7 +6792,7 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp) bp->db_size = pci_resource_len(bp->pdev, 2); func_qcfg_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -6949,17 +6831,19 @@ static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx, static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) { - struct hwrm_func_backing_store_qcaps_input req = {0}; - struct hwrm_func_backing_store_qcaps_output *resp = - bp->hwrm_cmd_resp_addr; + struct hwrm_func_backing_store_qcaps_output *resp; + struct hwrm_func_backing_store_qcaps_input *req; int rc; if (bp->hwrm_spec_code < 0x10902 || BNXT_VF(bp) || bp->ctx) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_QCAPS, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FUNC_BACKING_STORE_QCAPS); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); if (!rc) { struct bnxt_ctx_pg_info *ctx_pg; struct bnxt_ctx_mem_info *ctx; @@ -7024,7 +6908,7 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp) rc = 0; } ctx_err: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -7055,15 +6939,17 @@ static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr, static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) { - struct hwrm_func_backing_store_cfg_input req = {0}; + struct hwrm_func_backing_store_cfg_input *req; struct bnxt_ctx_mem_info *ctx = bp->ctx; struct bnxt_ctx_pg_info *ctx_pg; - u32 req_len = sizeof(req); + void **__req = (void **)&req; + u32 req_len = sizeof(*req); __le32 *num_entries; __le64 *pg_dir; u32 flags = 0; u8 *pg_attr; u32 ena; + int rc; int i; if (!ctx) @@ -7071,90 +6957,93 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) if (req_len > bp->hwrm_max_ext_req_len) req_len = BNXT_BACKING_STORE_CFG_LEGACY_LEN; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BACKING_STORE_CFG, -1, -1); - req.enables = cpu_to_le32(enables); + rc = __hwrm_req_init(bp, __req, HWRM_FUNC_BACKING_STORE_CFG, req_len); + if (rc) + return rc; + req->enables = cpu_to_le32(enables); if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_QP) { ctx_pg = &ctx->qp_mem; - req.qp_num_entries = cpu_to_le32(ctx_pg->entries); - req.qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries); - req.qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries); - req.qp_entry_size = cpu_to_le16(ctx->qp_entry_size); + req->qp_num_entries = cpu_to_le32(ctx_pg->entries); + req->qp_num_qp1_entries = cpu_to_le16(ctx->qp_min_qp1_entries); + req->qp_num_l2_entries = cpu_to_le16(ctx->qp_max_l2_entries); + req->qp_entry_size = cpu_to_le16(ctx->qp_entry_size); bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, - &req.qpc_pg_size_qpc_lvl, - &req.qpc_page_dir); + &req->qpc_pg_size_qpc_lvl, + &req->qpc_page_dir); } if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_SRQ) { ctx_pg = &ctx->srq_mem; - req.srq_num_entries = cpu_to_le32(ctx_pg->entries); - req.srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries); - req.srq_entry_size = cpu_to_le16(ctx->srq_entry_size); + req->srq_num_entries = cpu_to_le32(ctx_pg->entries); + req->srq_num_l2_entries = cpu_to_le16(ctx->srq_max_l2_entries); + req->srq_entry_size = cpu_to_le16(ctx->srq_entry_size); bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, - &req.srq_pg_size_srq_lvl, - &req.srq_page_dir); + &req->srq_pg_size_srq_lvl, + &req->srq_page_dir); } if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_CQ) { ctx_pg = &ctx->cq_mem; - req.cq_num_entries = cpu_to_le32(ctx_pg->entries); - req.cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries); - req.cq_entry_size = cpu_to_le16(ctx->cq_entry_size); - bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, &req.cq_pg_size_cq_lvl, - &req.cq_page_dir); + req->cq_num_entries = cpu_to_le32(ctx_pg->entries); + req->cq_num_l2_entries = cpu_to_le16(ctx->cq_max_l2_entries); + req->cq_entry_size = cpu_to_le16(ctx->cq_entry_size); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req->cq_pg_size_cq_lvl, + &req->cq_page_dir); } if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_VNIC) { ctx_pg = &ctx->vnic_mem; - req.vnic_num_vnic_entries = + req->vnic_num_vnic_entries = cpu_to_le16(ctx->vnic_max_vnic_entries); - req.vnic_num_ring_table_entries = + req->vnic_num_ring_table_entries = cpu_to_le16(ctx->vnic_max_ring_table_entries); - req.vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size); + req->vnic_entry_size = cpu_to_le16(ctx->vnic_entry_size); bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, - &req.vnic_pg_size_vnic_lvl, - &req.vnic_page_dir); + &req->vnic_pg_size_vnic_lvl, + &req->vnic_page_dir); } if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_STAT) { ctx_pg = &ctx->stat_mem; - req.stat_num_entries = cpu_to_le32(ctx->stat_max_entries); - req.stat_entry_size = cpu_to_le16(ctx->stat_entry_size); + req->stat_num_entries = cpu_to_le32(ctx->stat_max_entries); + req->stat_entry_size = cpu_to_le16(ctx->stat_entry_size); bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, - &req.stat_pg_size_stat_lvl, - &req.stat_page_dir); + &req->stat_pg_size_stat_lvl, + &req->stat_page_dir); } if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) { ctx_pg = &ctx->mrav_mem; - req.mrav_num_entries = cpu_to_le32(ctx_pg->entries); + req->mrav_num_entries = cpu_to_le32(ctx_pg->entries); if (ctx->mrav_num_entries_units) flags |= FUNC_BACKING_STORE_CFG_REQ_FLAGS_MRAV_RESERVATION_SPLIT; - req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size); + req->mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size); bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, - &req.mrav_pg_size_mrav_lvl, - &req.mrav_page_dir); + &req->mrav_pg_size_mrav_lvl, + &req->mrav_page_dir); } if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) { ctx_pg = &ctx->tim_mem; - req.tim_num_entries = cpu_to_le32(ctx_pg->entries); - req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size); + req->tim_num_entries = cpu_to_le32(ctx_pg->entries); + req->tim_entry_size = cpu_to_le16(ctx->tim_entry_size); bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, - &req.tim_pg_size_tim_lvl, - &req.tim_page_dir); + &req->tim_pg_size_tim_lvl, + &req->tim_page_dir); } - for (i = 0, num_entries = &req.tqm_sp_num_entries, - pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl, - pg_dir = &req.tqm_sp_page_dir, + for (i = 0, num_entries = &req->tqm_sp_num_entries, + pg_attr = &req->tqm_sp_pg_size_tqm_sp_lvl, + pg_dir = &req->tqm_sp_page_dir, ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP; i < BNXT_MAX_TQM_RINGS; i++, num_entries++, pg_attr++, pg_dir++, ena <<= 1) { if (!(enables & ena)) continue; - req.tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size); + req->tqm_entry_size = cpu_to_le16(ctx->tqm_entry_size); ctx_pg = ctx->tqm_mem[i]; *num_entries = cpu_to_le32(ctx_pg->entries); bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, pg_attr, pg_dir); } - req.flags = cpu_to_le32(flags); - return hwrm_send_message(bp, &req, req_len, HWRM_CMD_TIMEOUT); + req->flags = cpu_to_le32(flags); + return hwrm_req_send(bp, req); } static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, @@ -7434,17 +7323,18 @@ skip_rdma: int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) { - struct hwrm_func_resource_qcaps_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_func_resource_qcaps_input req = {0}; + struct hwrm_func_resource_qcaps_output *resp; + struct hwrm_func_resource_qcaps_input *req; struct bnxt_hw_resc *hw_resc = &bp->hw_resc; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESOURCE_QCAPS, -1, -1); - req.fid = cpu_to_le16(0xffff); + rc = hwrm_req_init(bp, req, HWRM_FUNC_RESOURCE_QCAPS); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message_silent(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + req->fid = cpu_to_le16(0xffff); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); if (rc) goto hwrm_func_resc_qcaps_exit; @@ -7485,15 +7375,14 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) pf->vf_resv_strategy = BNXT_VF_RESV_STRATEGY_MAXIMAL; } hwrm_func_resc_qcaps_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } -/* bp->hwrm_cmd_lock already held. */ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp) { - struct hwrm_port_mac_ptp_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_port_mac_ptp_qcfg_input req = {0}; + struct hwrm_port_mac_ptp_qcfg_output *resp; + struct hwrm_port_mac_ptp_qcfg_input *req; struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; u8 flags; int rc; @@ -7503,21 +7392,27 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp) goto no_ptp; } - req.port_id = cpu_to_le16(bp->pf.port_id); - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_PTP_QCFG, -1, -1); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_PTP_QCFG); if (rc) goto no_ptp; + req->port_id = cpu_to_le16(bp->pf.port_id); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc) + goto exit; + flags = resp->flags; if (!(flags & PORT_MAC_PTP_QCFG_RESP_FLAGS_HWRM_ACCESS)) { rc = -ENODEV; - goto no_ptp; + goto exit; } if (!ptp) { ptp = kzalloc(sizeof(*ptp), GFP_KERNEL); - if (!ptp) - return -ENOMEM; + if (!ptp) { + rc = -ENOMEM; + goto exit; + } ptp->bp = bp; bp->ptp_cfg = ptp; } @@ -7529,11 +7424,18 @@ static int __bnxt_hwrm_ptp_qcfg(struct bnxt *bp) ptp->refclk_regs[1] = BNXT_TS_REG_TIMESYNC_TS0_UPPER; } else { rc = -ENODEV; - goto no_ptp; + goto exit; } - return 0; + rc = bnxt_ptp_init(bp); + if (rc) + netdev_warn(bp->dev, "PTP initialization failed.\n"); +exit: + hwrm_req_drop(bp, req); + if (!rc) + return 0; no_ptp: + bnxt_ptp_clear(bp); kfree(ptp); bp->ptp_cfg = NULL; return rc; @@ -7541,17 +7443,19 @@ no_ptp: static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) { - int rc = 0; - struct hwrm_func_qcaps_input req = {0}; - struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_qcaps_output *resp; + struct hwrm_func_qcaps_input *req; struct bnxt_hw_resc *hw_resc = &bp->hw_resc; u32 flags, flags_ext; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); - req.fid = cpu_to_le16(0xffff); + rc = hwrm_req_init(bp, req, HWRM_FUNC_QCAPS); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->fid = cpu_to_le16(0xffff); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) goto hwrm_func_qcaps_exit; @@ -7576,6 +7480,8 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) flags_ext = le32_to_cpu(resp->flags_ext); if (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_EXT_HW_STATS_SUPPORTED) bp->fw_cap |= BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED; + if (BNXT_PF(bp) && (flags_ext & FUNC_QCAPS_RESP_FLAGS_EXT_PTP_PPS_SUPPORTED)) + bp->fw_cap |= BNXT_FW_CAP_PTP_PPS; bp->tx_push_thresh = 0; if ((flags & FUNC_QCAPS_RESP_FLAGS_PUSH_MODE_SUPPORTED) && @@ -7613,6 +7519,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) { __bnxt_hwrm_ptp_qcfg(bp); } else { + bnxt_ptp_clear(bp); kfree(bp->ptp_cfg); bp->ptp_cfg = NULL; } @@ -7626,7 +7533,7 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) } hwrm_func_qcaps_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -7657,19 +7564,20 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp) static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp) { - struct hwrm_cfa_adv_flow_mgnt_qcaps_input req = {0}; struct hwrm_cfa_adv_flow_mgnt_qcaps_output *resp; - int rc = 0; + struct hwrm_cfa_adv_flow_mgnt_qcaps_input *req; u32 flags; + int rc; if (!(bp->fw_cap & BNXT_FW_CAP_CFA_ADV_FLOW)) return 0; - resp = bp->hwrm_cmd_resp_addr; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_CFA_ADV_FLOW_MGNT_QCAPS); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) goto hwrm_cfa_adv_qcaps_exit; @@ -7679,7 +7587,7 @@ static int bnxt_hwrm_cfa_adv_flow_mgnt_qcaps(struct bnxt *bp) bp->fw_cap |= BNXT_FW_CAP_CFA_RFS_RING_TBL_IDX_V2; hwrm_cfa_adv_qcaps_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -7822,17 +7730,20 @@ static int bnxt_map_fw_health_regs(struct bnxt *bp) static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) { - struct hwrm_error_recovery_qcfg_output *resp = bp->hwrm_cmd_resp_addr; struct bnxt_fw_health *fw_health = bp->fw_health; - struct hwrm_error_recovery_qcfg_input req = {0}; + struct hwrm_error_recovery_qcfg_output *resp; + struct hwrm_error_recovery_qcfg_input *req; int rc, i; if (!(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_ERROR_RECOVERY_QCFG, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_ERROR_RECOVERY_QCFG); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) goto err_recovery_out; fw_health->flags = le32_to_cpu(resp->flags); @@ -7874,7 +7785,7 @@ static int bnxt_hwrm_error_recovery_qcfg(struct bnxt *bp) resp->delay_after_reset[i]; } err_recovery_out: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); if (!rc) rc = bnxt_map_fw_health_regs(bp); if (rc) @@ -7884,12 +7795,16 @@ err_recovery_out: static int bnxt_hwrm_func_reset(struct bnxt *bp) { - struct hwrm_func_reset_input req = {0}; + struct hwrm_func_reset_input *req; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_RESET, -1, -1); - req.enables = 0; + rc = hwrm_req_init(bp, req, HWRM_FUNC_RESET); + if (rc) + return rc; - return hwrm_send_message(bp, &req, sizeof(req), HWRM_RESET_TIMEOUT); + req->enables = 0; + hwrm_req_timeout(bp, req, HWRM_RESET_TIMEOUT); + return hwrm_req_send(bp, req); } static void bnxt_nvm_cfg_ver_get(struct bnxt *bp) @@ -7904,16 +7819,18 @@ static void bnxt_nvm_cfg_ver_get(struct bnxt *bp) static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) { - int rc = 0; - struct hwrm_queue_qportcfg_input req = {0}; - struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_queue_qportcfg_output *resp; + struct hwrm_queue_qportcfg_input *req; u8 i, j, *qptr; bool no_rdma; + int rc = 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_QPORTCFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_QUEUE_QPORTCFG); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) goto qportcfg_exit; @@ -7947,35 +7864,48 @@ static int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) bp->max_lltc = bp->max_tc; qportcfg_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } -static int __bnxt_hwrm_ver_get(struct bnxt *bp, bool silent) +static int bnxt_hwrm_poll(struct bnxt *bp) { - struct hwrm_ver_get_input req = {0}; + struct hwrm_ver_get_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_VER_GET, -1, -1); - req.hwrm_intf_maj = HWRM_VERSION_MAJOR; - req.hwrm_intf_min = HWRM_VERSION_MINOR; - req.hwrm_intf_upd = HWRM_VERSION_UPDATE; + rc = hwrm_req_init(bp, req, HWRM_VER_GET); + if (rc) + return rc; + + req->hwrm_intf_maj = HWRM_VERSION_MAJOR; + req->hwrm_intf_min = HWRM_VERSION_MINOR; + req->hwrm_intf_upd = HWRM_VERSION_UPDATE; - rc = bnxt_hwrm_do_send_msg(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT, - silent); + hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT); + rc = hwrm_req_send(bp, req); return rc; } static int bnxt_hwrm_ver_get(struct bnxt *bp) { - struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_ver_get_output *resp; + struct hwrm_ver_get_input *req; u16 fw_maj, fw_min, fw_bld, fw_rsv; u32 dev_caps_cfg, hwrm_ver; int rc, len; + rc = hwrm_req_init(bp, req, HWRM_VER_GET); + if (rc) + return rc; + + hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT); bp->hwrm_max_req_len = HWRM_MAX_REQ_LEN; - mutex_lock(&bp->hwrm_cmd_lock); - rc = __bnxt_hwrm_ver_get(bp, false); + req->hwrm_intf_maj = HWRM_VERSION_MAJOR; + req->hwrm_intf_min = HWRM_VERSION_MINOR; + req->hwrm_intf_upd = HWRM_VERSION_UPDATE; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) goto hwrm_ver_get_exit; @@ -8067,29 +7997,33 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp) bp->fw_cap |= BNXT_FW_CAP_CFA_ADV_FLOW; hwrm_ver_get_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } int bnxt_hwrm_fw_set_time(struct bnxt *bp) { - struct hwrm_fw_set_time_input req = {0}; + struct hwrm_fw_set_time_input *req; struct tm tm; time64_t now = ktime_get_real_seconds(); + int rc; if ((BNXT_VF(bp) && bp->hwrm_spec_code < 0x10901) || bp->hwrm_spec_code < 0x10400) return -EOPNOTSUPP; time64_to_tm(now, 0, &tm); - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_SET_TIME, -1, -1); - req.year = cpu_to_le16(1900 + tm.tm_year); - req.month = 1 + tm.tm_mon; - req.day = tm.tm_mday; - req.hour = tm.tm_hour; - req.minute = tm.tm_min; - req.second = tm.tm_sec; - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FW_SET_TIME); + if (rc) + return rc; + + req->year = cpu_to_le16(1900 + tm.tm_year); + req->month = 1 + tm.tm_mon; + req->day = tm.tm_mday; + req->hour = tm.tm_hour; + req->minute = tm.tm_min; + req->second = tm.tm_sec; + return hwrm_req_send(bp, req); } static void bnxt_add_one_ctr(u64 hw, u64 *sw, u64 mask) @@ -8177,8 +8111,9 @@ static void bnxt_accumulate_all_stats(struct bnxt *bp) static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags) { + struct hwrm_port_qstats_input *req; struct bnxt_pf_info *pf = &bp->pf; - struct hwrm_port_qstats_input req = {0}; + int rc; if (!(bp->flags & BNXT_FLAG_PORT_STATS)) return 0; @@ -8186,20 +8121,24 @@ static int bnxt_hwrm_port_qstats(struct bnxt *bp, u8 flags) if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) return -EOPNOTSUPP; - req.flags = flags; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS, -1, -1); - req.port_id = cpu_to_le16(pf->port_id); - req.tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map + + rc = hwrm_req_init(bp, req, HWRM_PORT_QSTATS); + if (rc) + return rc; + + req->flags = flags; + req->port_id = cpu_to_le16(pf->port_id); + req->tx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map + BNXT_TX_PORT_STATS_BYTE_OFFSET); - req.rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->rx_stat_host_addr = cpu_to_le64(bp->port_stats.hw_stats_map); + return hwrm_req_send(bp, req); } static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags) { - struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_queue_pri2cos_qcfg_input req2 = {0}; - struct hwrm_port_qstats_ext_input req = {0}; + struct hwrm_queue_pri2cos_qcfg_output *resp_qc; + struct hwrm_queue_pri2cos_qcfg_input *req_qc; + struct hwrm_port_qstats_ext_output *resp_qs; + struct hwrm_port_qstats_ext_input *req_qs; struct bnxt_pf_info *pf = &bp->pf; u32 tx_stat_size; int rc; @@ -8210,46 +8149,53 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags) if (flags && !(bp->fw_cap & BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED)) return -EOPNOTSUPP; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_QSTATS_EXT, -1, -1); - req.flags = flags; - req.port_id = cpu_to_le16(pf->port_id); - req.rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); - req.rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map); + rc = hwrm_req_init(bp, req_qs, HWRM_PORT_QSTATS_EXT); + if (rc) + return rc; + + req_qs->flags = flags; + req_qs->port_id = cpu_to_le16(pf->port_id); + req_qs->rx_stat_size = cpu_to_le16(sizeof(struct rx_port_stats_ext)); + req_qs->rx_stat_host_addr = cpu_to_le64(bp->rx_port_stats_ext.hw_stats_map); tx_stat_size = bp->tx_port_stats_ext.hw_stats ? sizeof(struct tx_port_stats_ext) : 0; - req.tx_stat_size = cpu_to_le16(tx_stat_size); - req.tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req_qs->tx_stat_size = cpu_to_le16(tx_stat_size); + req_qs->tx_stat_host_addr = cpu_to_le64(bp->tx_port_stats_ext.hw_stats_map); + resp_qs = hwrm_req_hold(bp, req_qs); + rc = hwrm_req_send(bp, req_qs); if (!rc) { - bp->fw_rx_stats_ext_size = le16_to_cpu(resp->rx_stat_size) / 8; + bp->fw_rx_stats_ext_size = + le16_to_cpu(resp_qs->rx_stat_size) / 8; bp->fw_tx_stats_ext_size = tx_stat_size ? - le16_to_cpu(resp->tx_stat_size) / 8 : 0; + le16_to_cpu(resp_qs->tx_stat_size) / 8 : 0; } else { bp->fw_rx_stats_ext_size = 0; bp->fw_tx_stats_ext_size = 0; } + hwrm_req_drop(bp, req_qs); + if (flags) - goto qstats_done; + return rc; if (bp->fw_tx_stats_ext_size <= offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) { - mutex_unlock(&bp->hwrm_cmd_lock); bp->pri2cos_valid = 0; return rc; } - bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1); - req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); + rc = hwrm_req_init(bp, req_qc, HWRM_QUEUE_PRI2COS_QCFG); + if (rc) + return rc; + + req_qc->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); - rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT); + resp_qc = hwrm_req_hold(bp, req_qc); + rc = hwrm_req_send(bp, req_qc); if (!rc) { - struct hwrm_queue_pri2cos_qcfg_output *resp2; u8 *pri2cos; int i, j; - resp2 = bp->hwrm_cmd_resp_addr; - pri2cos = &resp2->pri0_cos_queue_id; + pri2cos = &resp_qc->pri0_cos_queue_id; for (i = 0; i < 8; i++) { u8 queue_id = pri2cos[i]; u8 queue_idx; @@ -8258,17 +8204,18 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp, u8 flags) queue_idx = queue_id % 10; if (queue_idx > BNXT_MAX_QUEUE) { bp->pri2cos_valid = false; - goto qstats_done; + hwrm_req_drop(bp, req_qc); + return rc; } for (j = 0; j < bp->max_q; j++) { if (bp->q_ids[j] == queue_id) bp->pri2cos_idx[i] = queue_idx; } } - bp->pri2cos_valid = 1; + bp->pri2cos_valid = true; } -qstats_done: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req_qc); + return rc; } @@ -8343,35 +8290,46 @@ static void bnxt_hwrm_resource_free(struct bnxt *bp, bool close_path, static int bnxt_hwrm_set_br_mode(struct bnxt *bp, u16 br_mode) { - struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_input *req; + u8 evb_mode; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(0xffff); - req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE); if (br_mode == BRIDGE_MODE_VEB) - req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB; + evb_mode = FUNC_CFG_REQ_EVB_MODE_VEB; else if (br_mode == BRIDGE_MODE_VEPA) - req.evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA; + evb_mode = FUNC_CFG_REQ_EVB_MODE_VEPA; else return -EINVAL; - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(0xffff); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_EVB_MODE); + req->evb_mode = evb_mode; + return hwrm_req_send(bp, req); } static int bnxt_hwrm_set_cache_line_size(struct bnxt *bp, int size) { - struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_input *req; + int rc; if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10803) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(0xffff); - req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE); - req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64; + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(0xffff); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_CACHE_LINESIZE); + req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_64; if (size == 128) - req.options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128; + req->options = FUNC_CFG_REQ_OPTIONS_CACHE_LINESIZE_SIZE_128; - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req); } static int __bnxt_setup_vnic(struct bnxt *bp, u16 vnic_id) @@ -9319,18 +9277,20 @@ static bool bnxt_phy_qcaps_no_speed(struct hwrm_port_phy_qcaps_output *resp) static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) { - int rc = 0; - struct hwrm_port_phy_qcaps_input req = {0}; - struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr; struct bnxt_link_info *link_info = &bp->link_info; + struct hwrm_port_phy_qcaps_output *resp; + struct hwrm_port_phy_qcaps_input *req; + int rc = 0; if (bp->hwrm_spec_code < 0x10201) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) goto hwrm_phy_qcaps_exit; @@ -9368,7 +9328,7 @@ static int bnxt_hwrm_phy_qcaps(struct bnxt *bp) bp->port_count = resp->port_cnt; hwrm_phy_qcaps_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -9381,19 +9341,21 @@ static bool bnxt_support_dropped(u16 advertising, u16 supported) int bnxt_update_link(struct bnxt *bp, bool chng_link_state) { - int rc = 0; struct bnxt_link_info *link_info = &bp->link_info; - struct hwrm_port_phy_qcfg_input req = {0}; - struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_port_phy_qcfg_output *resp; + struct hwrm_port_phy_qcfg_input *req; u8 link_up = link_info->link_up; bool support_changed = false; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCFG); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) { - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -9488,7 +9450,7 @@ int bnxt_update_link(struct bnxt *bp, bool chng_link_state) /* alwasy link down if not require to update link state */ link_info->link_up = 0; } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); if (!BNXT_PHY_CFG_ABLE(bp)) return 0; @@ -9598,18 +9560,20 @@ static void bnxt_hwrm_set_link_common(struct bnxt *bp, struct hwrm_port_phy_cfg_ int bnxt_hwrm_set_pause(struct bnxt *bp) { - struct hwrm_port_phy_cfg_input req = {0}; + struct hwrm_port_phy_cfg_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); - bnxt_hwrm_set_pause_common(bp, &req); + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); + if (rc) + return rc; + + bnxt_hwrm_set_pause_common(bp, req); if ((bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL) || bp->link_info.force_link_chng) - bnxt_hwrm_set_link_common(bp, &req); + bnxt_hwrm_set_link_common(bp, req); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); if (!rc && !(bp->link_info.autoneg & BNXT_AUTONEG_FLOW_CTRL)) { /* since changing of pause setting doesn't trigger any link * change event, the driver needs to update the current pause @@ -9622,7 +9586,6 @@ int bnxt_hwrm_set_pause(struct bnxt *bp) bnxt_report_link(bp); } bp->link_info.force_link_chng = false; - mutex_unlock(&bp->hwrm_cmd_lock); return rc; } @@ -9651,22 +9614,27 @@ static void bnxt_hwrm_set_eee(struct bnxt *bp, int bnxt_hwrm_set_link_setting(struct bnxt *bp, bool set_pause, bool set_eee) { - struct hwrm_port_phy_cfg_input req = {0}; + struct hwrm_port_phy_cfg_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); + if (rc) + return rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); if (set_pause) - bnxt_hwrm_set_pause_common(bp, &req); + bnxt_hwrm_set_pause_common(bp, req); - bnxt_hwrm_set_link_common(bp, &req); + bnxt_hwrm_set_link_common(bp, req); if (set_eee) - bnxt_hwrm_set_eee(bp, &req); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + bnxt_hwrm_set_eee(bp, req); + return hwrm_req_send(bp, req); } static int bnxt_hwrm_shutdown_link(struct bnxt *bp) { - struct hwrm_port_phy_cfg_input req = {0}; + struct hwrm_port_phy_cfg_input *req; + int rc; if (!BNXT_SINGLE_PF(bp)) return 0; @@ -9675,9 +9643,12 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp) !(bp->phy_flags & BNXT_PHY_FL_FW_MANAGED_LKDN)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); - req.flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); + if (rc) + return rc; + + req->flags = cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE_LINK_DWN); + return hwrm_req_send(bp, req); } static int bnxt_fw_init_one(struct bnxt *bp); @@ -9703,16 +9674,14 @@ static int bnxt_try_recover_fw(struct bnxt *bp) int retry = 0, rc; u32 sts; - mutex_lock(&bp->hwrm_cmd_lock); do { sts = bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG); - rc = __bnxt_hwrm_ver_get(bp, true); + rc = bnxt_hwrm_poll(bp); if (!BNXT_FW_IS_BOOTING(sts) && !BNXT_FW_IS_RECOVERING(sts)) break; retry++; } while (rc == -EBUSY && retry < BNXT_FW_RETRY); - mutex_unlock(&bp->hwrm_cmd_lock); if (!BNXT_FW_IS_HEALTHY(sts)) { netdev_err(bp->dev, @@ -9732,8 +9701,8 @@ static int bnxt_try_recover_fw(struct bnxt *bp) static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) { - struct hwrm_func_drv_if_change_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_func_drv_if_change_input req = {0}; + struct hwrm_func_drv_if_change_output *resp; + struct hwrm_func_drv_if_change_input *req; bool fw_reset = !bp->irq_tbl; bool resc_reinit = false; int rc, retry = 0; @@ -9742,29 +9711,34 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) if (!(bp->fw_cap & BNXT_FW_CAP_IF_CHANGE)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_IF_CHANGE, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_FUNC_DRV_IF_CHANGE); + if (rc) + return rc; + if (up) - req.flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP); - mutex_lock(&bp->hwrm_cmd_lock); + req->flags = cpu_to_le32(FUNC_DRV_IF_CHANGE_REQ_FLAGS_UP); + resp = hwrm_req_hold(bp, req); + + hwrm_req_flags(bp, req, BNXT_HWRM_FULL_WAIT); while (retry < BNXT_FW_IF_RETRY) { - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); if (rc != -EAGAIN) break; msleep(50); retry++; } - if (!rc) - flags = le32_to_cpu(resp->flags); - mutex_unlock(&bp->hwrm_cmd_lock); - if (rc == -EAGAIN) + if (rc == -EAGAIN) { + hwrm_req_drop(bp, req); return rc; - if (rc && up) { + } else if (!rc) { + flags = le32_to_cpu(resp->flags); + } else if (up) { rc = bnxt_try_recover_fw(bp); fw_reset = true; } + hwrm_req_drop(bp, req); if (rc) return rc; @@ -9833,8 +9807,8 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) { - struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_port_led_qcaps_input req = {0}; + struct hwrm_port_led_qcaps_output *resp; + struct hwrm_port_led_qcaps_input *req; struct bnxt_pf_info *pf = &bp->pf; int rc; @@ -9842,12 +9816,15 @@ static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1); - req.port_id = cpu_to_le16(pf->port_id); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_PORT_LED_QCAPS); + if (rc) + return rc; + + req->port_id = cpu_to_le16(pf->port_id); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) { - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) { @@ -9867,52 +9844,64 @@ static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp) } } } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return 0; } int bnxt_hwrm_alloc_wol_fltr(struct bnxt *bp) { - struct hwrm_wol_filter_alloc_input req = {0}; - struct hwrm_wol_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_wol_filter_alloc_output *resp; + struct hwrm_wol_filter_alloc_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_ALLOC, -1, -1); - req.port_id = cpu_to_le16(bp->pf.port_id); - req.wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT; - req.enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS); - memcpy(req.mac_address, bp->dev->dev_addr, ETH_ALEN); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_ALLOC); + if (rc) + return rc; + + req->port_id = cpu_to_le16(bp->pf.port_id); + req->wol_type = WOL_FILTER_ALLOC_REQ_WOL_TYPE_MAGICPKT; + req->enables = cpu_to_le32(WOL_FILTER_ALLOC_REQ_ENABLES_MAC_ADDRESS); + memcpy(req->mac_address, bp->dev->dev_addr, ETH_ALEN); + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) bp->wol_filter_id = resp->wol_filter_id; - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } int bnxt_hwrm_free_wol_fltr(struct bnxt *bp) { - struct hwrm_wol_filter_free_input req = {0}; + struct hwrm_wol_filter_free_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_FREE); + if (rc) + return rc; + + req->port_id = cpu_to_le16(bp->pf.port_id); + req->enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID); + req->wol_filter_id = bp->wol_filter_id; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_FREE, -1, -1); - req.port_id = cpu_to_le16(bp->pf.port_id); - req.enables = cpu_to_le32(WOL_FILTER_FREE_REQ_ENABLES_WOL_FILTER_ID); - req.wol_filter_id = bp->wol_filter_id; - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req); } static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle) { - struct hwrm_wol_filter_qcfg_input req = {0}; - struct hwrm_wol_filter_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_wol_filter_qcfg_output *resp; + struct hwrm_wol_filter_qcfg_input *req; u16 next_handle = 0; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_WOL_FILTER_QCFG, -1, -1); - req.port_id = cpu_to_le16(bp->pf.port_id); - req.handle = cpu_to_le16(handle); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_WOL_FILTER_QCFG); + if (rc) + return rc; + + req->port_id = cpu_to_le16(bp->pf.port_id); + req->handle = cpu_to_le16(handle); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) { next_handle = le16_to_cpu(resp->next_handle); if (next_handle != 0) { @@ -9923,7 +9912,7 @@ static u16 bnxt_hwrm_get_wol_fltrs(struct bnxt *bp, u16 handle) } } } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return next_handle; } @@ -9944,19 +9933,20 @@ static void bnxt_get_wol_settings(struct bnxt *bp) static ssize_t bnxt_show_temp(struct device *dev, struct device_attribute *devattr, char *buf) { - struct hwrm_temp_monitor_query_input req = {0}; struct hwrm_temp_monitor_query_output *resp; + struct hwrm_temp_monitor_query_input *req; struct bnxt *bp = dev_get_drvdata(dev); u32 len = 0; int rc; - resp = bp->hwrm_cmd_resp_addr; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY); + if (rc) + return rc; + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */ - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); if (rc) return rc; return len; @@ -9979,12 +9969,13 @@ static void bnxt_hwmon_close(struct bnxt *bp) static void bnxt_hwmon_open(struct bnxt *bp) { - struct hwrm_temp_monitor_query_input req = {0}; + struct hwrm_temp_monitor_query_input *req; struct pci_dev *pdev = bp->pdev; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1); - rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_TEMP_MONITOR_QUERY); + if (!rc) + rc = hwrm_req_send_silent(bp, req); if (rc == -EACCES || rc == -EOPNOTSUPP) { bnxt_hwmon_close(bp); return; @@ -10209,7 +10200,9 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) bnxt_tx_enable(bp); mod_timer(&bp->timer, jiffies + bp->current_interval); /* Poll link status and check for SFP+ module status */ + mutex_lock(&bp->link_lock); bnxt_get_port_module_status(bp); + mutex_unlock(&bp->link_lock); /* VF-reps may need to be re-opened after the PF is re-opened */ if (BNXT_PF(bp)) @@ -10316,15 +10309,9 @@ static int bnxt_open(struct net_device *dev) if (rc) return rc; - if (bnxt_ptp_init(bp)) { - netdev_warn(dev, "PTP initialization failed.\n"); - kfree(bp->ptp_cfg); - bp->ptp_cfg = NULL; - } rc = __bnxt_open_nic(bp, true, true); if (rc) { bnxt_hwrm_if_change(bp, false); - bnxt_ptp_clear(bp); } else { if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) { if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { @@ -10415,7 +10402,6 @@ static int bnxt_close(struct net_device *dev) { struct bnxt *bp = netdev_priv(dev); - bnxt_ptp_clear(bp); bnxt_hwmon_close(bp); bnxt_close_nic(bp, true, true); bnxt_hwrm_shutdown_link(bp); @@ -10426,53 +10412,60 @@ static int bnxt_close(struct net_device *dev) static int bnxt_hwrm_port_phy_read(struct bnxt *bp, u16 phy_addr, u16 reg, u16 *val) { - struct hwrm_port_phy_mdio_read_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_port_phy_mdio_read_input req = {0}; + struct hwrm_port_phy_mdio_read_output *resp; + struct hwrm_port_phy_mdio_read_input *req; int rc; if (bp->hwrm_spec_code < 0x10a00) return -EOPNOTSUPP; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_READ, -1, -1); - req.port_id = cpu_to_le16(bp->pf.port_id); - req.phy_addr = phy_addr; - req.reg_addr = cpu_to_le16(reg & 0x1f); + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_READ); + if (rc) + return rc; + + req->port_id = cpu_to_le16(bp->pf.port_id); + req->phy_addr = phy_addr; + req->reg_addr = cpu_to_le16(reg & 0x1f); if (mdio_phy_id_is_c45(phy_addr)) { - req.cl45_mdio = 1; - req.phy_addr = mdio_phy_id_prtad(phy_addr); - req.dev_addr = mdio_phy_id_devad(phy_addr); - req.reg_addr = cpu_to_le16(reg); + req->cl45_mdio = 1; + req->phy_addr = mdio_phy_id_prtad(phy_addr); + req->dev_addr = mdio_phy_id_devad(phy_addr); + req->reg_addr = cpu_to_le16(reg); } - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) *val = le16_to_cpu(resp->reg_data); - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } static int bnxt_hwrm_port_phy_write(struct bnxt *bp, u16 phy_addr, u16 reg, u16 val) { - struct hwrm_port_phy_mdio_write_input req = {0}; + struct hwrm_port_phy_mdio_write_input *req; + int rc; if (bp->hwrm_spec_code < 0x10a00) return -EOPNOTSUPP; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_MDIO_WRITE, -1, -1); - req.port_id = cpu_to_le16(bp->pf.port_id); - req.phy_addr = phy_addr; - req.reg_addr = cpu_to_le16(reg & 0x1f); + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_MDIO_WRITE); + if (rc) + return rc; + + req->port_id = cpu_to_le16(bp->pf.port_id); + req->phy_addr = phy_addr; + req->reg_addr = cpu_to_le16(reg & 0x1f); if (mdio_phy_id_is_c45(phy_addr)) { - req.cl45_mdio = 1; - req.phy_addr = mdio_phy_id_prtad(phy_addr); - req.dev_addr = mdio_phy_id_devad(phy_addr); - req.reg_addr = cpu_to_le16(reg); + req->cl45_mdio = 1; + req->phy_addr = mdio_phy_id_prtad(phy_addr); + req->dev_addr = mdio_phy_id_devad(phy_addr); + req->reg_addr = cpu_to_le16(reg); } - req.reg_data = cpu_to_le16(val); + req->reg_data = cpu_to_le16(val); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req); } /* rtnl_lock held */ @@ -10551,6 +10544,10 @@ static void bnxt_get_ring_stats(struct bnxt *bp, stats->multicast += BNXT_GET_RING_STATS64(sw, rx_mcast_pkts); stats->tx_dropped += BNXT_GET_RING_STATS64(sw, tx_error_pkts); + + stats->rx_dropped += + cpr->sw_stats.rx.rx_netpoll_discards + + cpr->sw_stats.rx.rx_oom_discards; } } @@ -10565,6 +10562,7 @@ static void bnxt_add_prev_stats(struct bnxt *bp, stats->tx_bytes += prev_stats->tx_bytes; stats->rx_missed_errors += prev_stats->rx_missed_errors; stats->multicast += prev_stats->multicast; + stats->rx_dropped += prev_stats->rx_dropped; stats->tx_dropped += prev_stats->tx_dropped; } @@ -10709,6 +10707,7 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp) { struct net_device *dev = bp->dev; struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; + struct hwrm_cfa_l2_filter_free_input *req; struct netdev_hw_addr *ha; int i, off = 0, rc; bool uc_update; @@ -10720,19 +10719,16 @@ static int bnxt_cfg_rx_mode(struct bnxt *bp) if (!uc_update) goto skip_uc; - mutex_lock(&bp->hwrm_cmd_lock); + rc = hwrm_req_init(bp, req, HWRM_CFA_L2_FILTER_FREE); + if (rc) + return rc; + hwrm_req_hold(bp, req); for (i = 1; i < vnic->uc_filter_count; i++) { - struct hwrm_cfa_l2_filter_free_input req = {0}; - - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_L2_FILTER_FREE, -1, - -1); + req->l2_filter_id = vnic->fw_l2_filter_id[i]; - req.l2_filter_id = vnic->fw_l2_filter_id[i]; - - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); vnic->uc_filter_count = 1; @@ -11084,22 +11080,30 @@ static netdev_features_t bnxt_features_check(struct sk_buff *skb, int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, u32 *reg_buf) { - struct hwrm_dbg_read_direct_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_dbg_read_direct_input req = {0}; + struct hwrm_dbg_read_direct_output *resp; + struct hwrm_dbg_read_direct_input *req; __le32 *dbg_reg_buf; dma_addr_t mapping; int rc, i; - dbg_reg_buf = dma_alloc_coherent(&bp->pdev->dev, num_words * 4, - &mapping, GFP_KERNEL); - if (!dbg_reg_buf) - return -ENOMEM; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_READ_DIRECT, -1, -1); - req.host_dest_addr = cpu_to_le64(mapping); - req.read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR); - req.read_len32 = cpu_to_le32(num_words); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_DBG_READ_DIRECT); + if (rc) + return rc; + + dbg_reg_buf = hwrm_req_dma_slice(bp, req, num_words * 4, + &mapping); + if (!dbg_reg_buf) { + rc = -ENOMEM; + goto dbg_rd_reg_exit; + } + + req->host_dest_addr = cpu_to_le64(mapping); + + resp = hwrm_req_hold(bp, req); + req->read_addr = cpu_to_le32(reg_off + CHIMP_REG_VIEW_ADDR); + req->read_len32 = cpu_to_le32(num_words); + + rc = hwrm_req_send(bp, req); if (rc || resp->error_code) { rc = -EIO; goto dbg_rd_reg_exit; @@ -11108,28 +11112,30 @@ int bnxt_dbg_hwrm_rd_reg(struct bnxt *bp, u32 reg_off, u16 num_words, reg_buf[i] = le32_to_cpu(dbg_reg_buf[i]); dbg_rd_reg_exit: - mutex_unlock(&bp->hwrm_cmd_lock); - dma_free_coherent(&bp->pdev->dev, num_words * 4, dbg_reg_buf, mapping); + hwrm_req_drop(bp, req); return rc; } static int bnxt_dbg_hwrm_ring_info_get(struct bnxt *bp, u8 ring_type, u32 ring_id, u32 *prod, u32 *cons) { - struct hwrm_dbg_ring_info_get_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_dbg_ring_info_get_input req = {0}; + struct hwrm_dbg_ring_info_get_output *resp; + struct hwrm_dbg_ring_info_get_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_RING_INFO_GET, -1, -1); - req.ring_type = ring_type; - req.fw_ring_id = cpu_to_le32(ring_id); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_DBG_RING_INFO_GET); + if (rc) + return rc; + + req->ring_type = ring_type; + req->fw_ring_id = cpu_to_le32(ring_id); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) { *prod = le32_to_cpu(resp->producer_index); *cons = le32_to_cpu(resp->consumer_index); } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -11187,18 +11193,22 @@ static void bnxt_dbg_dump_states(struct bnxt *bp) static int bnxt_hwrm_rx_ring_reset(struct bnxt *bp, int ring_nr) { struct bnxt_rx_ring_info *rxr = &bp->rx_ring[ring_nr]; - struct hwrm_ring_reset_input req = {0}; + struct hwrm_ring_reset_input *req; struct bnxt_napi *bnapi = rxr->bnapi; struct bnxt_cp_ring_info *cpr; u16 cp_ring_id; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_RING_RESET); + if (rc) + return rc; cpr = &bnapi->cp_ring; cp_ring_id = cpr->cp_ring_struct.fw_ring_id; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_RESET, cp_ring_id, -1); - req.ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP; - req.ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id); - return hwrm_send_message_silent(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + req->cmpl_ring = cpu_to_le16(cp_ring_id); + req->ring_type = RING_RESET_REQ_RING_TYPE_RX_RING_GRP; + req->ring_id = cpu_to_le16(bp->grp_info[bnapi->index].fw_grp_id); + return hwrm_req_send_silent(bp, req); } static void bnxt_reset_task(struct bnxt *bp, bool silent) @@ -11405,7 +11415,6 @@ static void bnxt_fw_reset_close(struct bnxt *bp) bnxt_clear_int_mode(bp); pci_disable_device(bp->pdev); } - bnxt_ptp_clear(bp); __bnxt_close_nic(bp, true, false); bnxt_vf_reps_free(bp); bnxt_clear_int_mode(bp); @@ -11441,13 +11450,20 @@ static bool is_bnxt_fw_ok(struct bnxt *bp) static void bnxt_force_fw_reset(struct bnxt *bp) { struct bnxt_fw_health *fw_health = bp->fw_health; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; u32 wait_dsecs; if (!test_bit(BNXT_STATE_OPEN, &bp->state) || test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) return; - set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + if (ptp) { + spin_lock_bh(&ptp->ptp_lock); + set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + spin_unlock_bh(&ptp->ptp_lock); + } else { + set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + } bnxt_fw_reset_close(bp); wait_dsecs = fw_health->master_func_wait_dsecs; if (fw_health->master) { @@ -11503,9 +11519,16 @@ void bnxt_fw_reset(struct bnxt *bp) bnxt_rtnl_lock_sp(bp); if (test_bit(BNXT_STATE_OPEN, &bp->state) && !test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; int n = 0, tmo; - set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + if (ptp) { + spin_lock_bh(&ptp->ptp_lock); + set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + spin_unlock_bh(&ptp->ptp_lock); + } else { + set_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + } if (bp->pf.active_vfs && !test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) n = bnxt_get_registered_vfs(bp); @@ -11614,12 +11637,15 @@ static void bnxt_init_ethtool_link_settings(struct bnxt *bp) static void bnxt_fw_echo_reply(struct bnxt *bp) { struct bnxt_fw_health *fw_health = bp->fw_health; - struct hwrm_func_echo_response_input req = {0}; + struct hwrm_func_echo_response_input *req; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_ECHO_RESPONSE, -1, -1); - req.event_data1 = cpu_to_le32(fw_health->echo_req_data1); - req.event_data2 = cpu_to_le32(fw_health->echo_req_data2); - hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FUNC_ECHO_RESPONSE); + if (rc) + return; + req->event_data1 = cpu_to_le32(fw_health->echo_req_data1); + req->event_data2 = cpu_to_le32(fw_health->echo_req_data2); + hwrm_req_send(bp, req); } static void bnxt_sp_task(struct work_struct *work) @@ -11824,18 +11850,6 @@ static int bnxt_fw_init_one_p1(struct bnxt *bp) return rc; } - if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) { - rc = bnxt_alloc_kong_hwrm_resources(bp); - if (rc) - bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL; - } - - if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || - bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) { - rc = bnxt_alloc_hwrm_short_cmd_req(bp); - if (rc) - return rc; - } bnxt_nvm_cfg_ver_get(bp); rc = bnxt_hwrm_func_reset(bp); @@ -12010,14 +12024,16 @@ static void bnxt_reset_all(struct bnxt *bp) for (i = 0; i < fw_health->fw_reset_seq_cnt; i++) bnxt_fw_reset_writel(bp, i); } else if (fw_health->flags & ERROR_RECOVERY_QCFG_RESP_FLAGS_CO_CPU) { - struct hwrm_fw_reset_input req = {0}; - - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1); - req.resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr); - req.embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP; - req.selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; - req.flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + struct hwrm_fw_reset_input *req; + + rc = hwrm_req_init(bp, req, HWRM_FW_RESET); + if (!rc) { + req->target_id = cpu_to_le16(HWRM_TARGET_ID_KONG); + req->embedded_proc_type = FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP; + req->selfrst_status = FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP; + req->flags = FW_RESET_REQ_FLAGS_RESET_GRACEFUL; + rc = hwrm_req_send(bp, req); + } if (rc != -ENODEV) netdev_warn(bp->dev, "Unable to reset FW rc=%d\n", rc); } @@ -12144,7 +12160,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) fallthrough; case BNXT_FW_RESET_STATE_POLL_FW: bp->hwrm_cmd_timeout = SHORT_HWRM_CMD_TIMEOUT; - rc = __bnxt_hwrm_ver_get(bp, true); + rc = bnxt_hwrm_poll(bp); if (rc) { if (bnxt_fw_reset_timeout(bp)) { netdev_err(bp->dev, "Firmware reset aborted\n"); @@ -12177,6 +12193,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) bnxt_reenable_sriov(bp); bnxt_vf_reps_alloc(bp); bnxt_vf_reps_open(bp); + bnxt_ptp_reapply_pps(bp); bnxt_dl_health_recovery_done(bp); bnxt_dl_health_status_update(bp, true); rtnl_unlock(); @@ -12708,7 +12725,7 @@ static const struct net_device_ops bnxt_netdev_ops = { .ndo_stop = bnxt_close, .ndo_get_stats64 = bnxt_get_stats64, .ndo_set_rx_mode = bnxt_set_rx_mode, - .ndo_do_ioctl = bnxt_ioctl, + .ndo_eth_ioctl = bnxt_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = bnxt_change_mac_addr, .ndo_change_mtu = bnxt_change_mtu, @@ -12747,6 +12764,7 @@ static void bnxt_remove_one(struct pci_dev *pdev) if (BNXT_PF(bp)) devlink_port_type_clear(&bp->dl_port); + bnxt_ptp_clear(bp); pci_disable_pcie_error_reporting(pdev); unregister_netdev(dev); clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); @@ -12762,7 +12780,6 @@ static void bnxt_remove_one(struct pci_dev *pdev) bnxt_clear_int_mode(bp); bnxt_hwrm_func_drv_unrgtr(bp); bnxt_free_hwrm_resources(bp); - bnxt_free_hwrm_short_cmd_req(bp); bnxt_ethtool_free(bp); bnxt_dcb_free(bp); kfree(bp->edev); @@ -12800,8 +12817,10 @@ static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt) if (!fw_dflt) return 0; + mutex_lock(&bp->link_lock); rc = bnxt_update_link(bp, false); if (rc) { + mutex_unlock(&bp->link_lock); netdev_err(bp->dev, "Probe phy can't update link (rc: %x)\n", rc); return rc; @@ -12814,6 +12833,7 @@ static int bnxt_probe_phy(struct bnxt *bp, bool fw_dflt) link_info->support_auto_speeds = link_info->support_speeds; bnxt_init_ethtool_link_settings(bp); + mutex_unlock(&bp->link_lock); return 0; } @@ -13085,6 +13105,12 @@ static void bnxt_vpd_read_info(struct bnxt *bp) goto exit; } + i = pci_vpd_find_tag(vpd_data, vpd_size, PCI_VPD_LRDT_RO_DATA); + if (i < 0) { + netdev_err(bp->dev, "VPD READ-Only not found\n"); + goto exit; + } + ro_size = pci_vpd_lrdt_size(&vpd_data[i]); i += PCI_VPD_LRDT_TAG_SIZE; if (i + ro_size > vpd_size) @@ -13356,9 +13382,9 @@ init_err_cleanup: init_err_pci_clean: bnxt_hwrm_func_drv_unrgtr(bp); - bnxt_free_hwrm_short_cmd_req(bp); bnxt_free_hwrm_resources(bp); bnxt_ethtool_free(bp); + bnxt_ptp_clear(bp); kfree(bp->ptp_cfg); bp->ptp_cfg = NULL; kfree(bp->fw_health); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index ba4e0fc38520..a8212dcdad5f 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -496,6 +496,16 @@ struct rx_tpa_end_cmp_ext { !!((data1) & \ ASYNC_EVENT_CMPL_ERROR_RECOVERY_EVENT_DATA1_FLAGS_RECOVERY_ENABLED) +#define BNXT_EVENT_ERROR_REPORT_TYPE(data1) \ + (((data1) & \ + ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_MASK) >>\ + ASYNC_EVENT_CMPL_ERROR_REPORT_BASE_EVENT_DATA1_ERROR_TYPE_SFT) + +#define BNXT_EVENT_INVALID_SIGNAL_DATA(data2) \ + (((data2) & \ + ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_MASK) >>\ + ASYNC_EVENT_CMPL_ERROR_REPORT_INVALID_SIGNAL_EVENT_DATA2_PIN_ID_SFT) + struct nqe_cn { __le16 type; #define NQ_CN_TYPE_MASK 0x3fUL @@ -586,15 +596,17 @@ struct nqe_cn { #define MAX_TPA_SEGS_P5 0x3f #if (BNXT_PAGE_SHIFT == 16) -#define MAX_RX_PAGES 1 +#define MAX_RX_PAGES_AGG_ENA 1 +#define MAX_RX_PAGES 4 #define MAX_RX_AGG_PAGES 4 #define MAX_TX_PAGES 1 -#define MAX_CP_PAGES 8 +#define MAX_CP_PAGES 16 #else -#define MAX_RX_PAGES 8 +#define MAX_RX_PAGES_AGG_ENA 8 +#define MAX_RX_PAGES 32 #define MAX_RX_AGG_PAGES 32 #define MAX_TX_PAGES 8 -#define MAX_CP_PAGES 64 +#define MAX_CP_PAGES 128 #endif #define RX_DESC_CNT (BNXT_PAGE_SIZE / sizeof(struct rx_bd)) @@ -612,6 +624,7 @@ struct nqe_cn { #define HW_CMPD_RING_SIZE (sizeof(struct tx_cmp) * CP_DESC_CNT) #define BNXT_MAX_RX_DESC_CNT (RX_DESC_CNT * MAX_RX_PAGES - 1) +#define BNXT_MAX_RX_DESC_CNT_JUM_ENA (RX_DESC_CNT * MAX_RX_PAGES_AGG_ENA - 1) #define BNXT_MAX_RX_JUM_DESC_CNT (RX_DESC_CNT * MAX_RX_AGG_PAGES - 1) #define BNXT_MAX_TX_DESC_CNT (TX_DESC_CNT * MAX_TX_PAGES - 1) @@ -656,37 +669,7 @@ struct nqe_cn { #define RING_CMP(idx) ((idx) & bp->cp_ring_mask) #define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1)) -#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len) -#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input) #define DFLT_HWRM_CMD_TIMEOUT 500 -#define HWRM_CMD_MAX_TIMEOUT 40000 -#define SHORT_HWRM_CMD_TIMEOUT 20 -#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout) -#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4) -#define HWRM_COREDUMP_TIMEOUT ((HWRM_CMD_TIMEOUT) * 12) -#define BNXT_HWRM_REQ_MAX_SIZE 128 -#define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \ - BNXT_HWRM_REQ_MAX_SIZE) -#define HWRM_SHORT_MIN_TIMEOUT 3 -#define HWRM_SHORT_MAX_TIMEOUT 10 -#define HWRM_SHORT_TIMEOUT_COUNTER 5 - -#define HWRM_MIN_TIMEOUT 25 -#define HWRM_MAX_TIMEOUT 40 - -#define HWRM_WAIT_MUST_ABORT(bp, req) \ - (le16_to_cpu((req)->req_type) != HWRM_VER_GET && \ - !bnxt_is_fw_healthy(bp)) - -#define HWRM_TOTAL_TIMEOUT(n) (((n) <= HWRM_SHORT_TIMEOUT_COUNTER) ? \ - ((n) * HWRM_SHORT_MIN_TIMEOUT) : \ - (HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT + \ - ((n) - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT)) - -#define HWRM_VALID_BIT_DELAY_USEC 150 - -#define BNXT_HWRM_CHNL_CHIMP 0 -#define BNXT_HWRM_CHNL_KONG 1 #define BNXT_RX_EVENT 1 #define BNXT_AGG_EVENT 2 @@ -926,6 +909,8 @@ struct bnxt_rx_sw_stats { u64 rx_l4_csum_errors; u64 rx_resets; u64 rx_buf_errors; + u64 rx_oom_discards; + u64 rx_netpoll_discards; }; struct bnxt_cmn_sw_stats { @@ -963,11 +948,11 @@ struct bnxt_cp_ring_info { struct dim dim; union { - struct tx_cmp *cp_desc_ring[MAX_CP_PAGES]; - struct nqe_cn *nq_desc_ring[MAX_CP_PAGES]; + struct tx_cmp **cp_desc_ring; + struct nqe_cn **nq_desc_ring; }; - dma_addr_t cp_desc_mapping[MAX_CP_PAGES]; + dma_addr_t *cp_desc_mapping; struct bnxt_stats_mem stats; u32 hw_stats_ctx_id; @@ -1888,19 +1873,15 @@ struct bnxt { #define BNXT_FW_CAP_VLAN_RX_STRIP 0x01000000 #define BNXT_FW_CAP_VLAN_TX_INSERT 0x02000000 #define BNXT_FW_CAP_EXT_HW_STATS_SUPPORTED 0x04000000 + #define BNXT_FW_CAP_PTP_PPS 0x10000000 #define BNXT_FW_CAP_RING_MONITOR 0x40000000 #define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM) u32 hwrm_spec_code; u16 hwrm_cmd_seq; u16 hwrm_cmd_kong_seq; - u16 hwrm_intr_seq_id; - void *hwrm_short_cmd_req_addr; - dma_addr_t hwrm_short_cmd_req_dma_addr; - void *hwrm_cmd_resp_addr; - dma_addr_t hwrm_cmd_resp_dma_addr; - void *hwrm_cmd_kong_resp_addr; - dma_addr_t hwrm_cmd_kong_resp_dma_addr; + struct dma_pool *hwrm_dma_pool; + struct hlist_head hwrm_pending_list; struct rtnl_link_stats64 net_stats_prev; struct bnxt_stats_mem port_stats; @@ -2000,7 +1981,7 @@ struct bnxt { struct mutex sriov_lock; #endif -#if BITS_PER_LONG == 32 +#ifndef writeq /* ensure atomic 64-bit doorbell writes on 32-bit systems. */ spinlock_t db_lock; #endif @@ -2129,7 +2110,7 @@ static inline u32 bnxt_tx_avail(struct bnxt *bp, struct bnxt_tx_ring_info *txr) ((txr->tx_prod - txr->tx_cons) & bp->tx_ring_mask); } -#if BITS_PER_LONG == 32 +#ifndef writeq #define writeq(val64, db) \ do { \ spin_lock(&bp->db_lock); \ @@ -2171,63 +2152,6 @@ static inline void bnxt_db_write(struct bnxt *bp, struct bnxt_db_info *db, } } -static inline bool bnxt_cfa_hwrm_message(u16 req_type) -{ - switch (req_type) { - case HWRM_CFA_ENCAP_RECORD_ALLOC: - case HWRM_CFA_ENCAP_RECORD_FREE: - case HWRM_CFA_DECAP_FILTER_ALLOC: - case HWRM_CFA_DECAP_FILTER_FREE: - case HWRM_CFA_EM_FLOW_ALLOC: - case HWRM_CFA_EM_FLOW_FREE: - case HWRM_CFA_EM_FLOW_CFG: - case HWRM_CFA_FLOW_ALLOC: - case HWRM_CFA_FLOW_FREE: - case HWRM_CFA_FLOW_INFO: - case HWRM_CFA_FLOW_FLUSH: - case HWRM_CFA_FLOW_STATS: - case HWRM_CFA_METER_PROFILE_ALLOC: - case HWRM_CFA_METER_PROFILE_FREE: - case HWRM_CFA_METER_PROFILE_CFG: - case HWRM_CFA_METER_INSTANCE_ALLOC: - case HWRM_CFA_METER_INSTANCE_FREE: - return true; - default: - return false; - } -} - -static inline bool bnxt_kong_hwrm_message(struct bnxt *bp, struct input *req) -{ - return (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL && - bnxt_cfa_hwrm_message(le16_to_cpu(req->req_type))); -} - -static inline bool bnxt_hwrm_kong_chnl(struct bnxt *bp, struct input *req) -{ - return (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL && - req->resp_addr == cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr)); -} - -static inline void *bnxt_get_hwrm_resp_addr(struct bnxt *bp, void *req) -{ - if (bnxt_hwrm_kong_chnl(bp, (struct input *)req)) - return bp->hwrm_cmd_kong_resp_addr; - else - return bp->hwrm_cmd_resp_addr; -} - -static inline u16 bnxt_get_hwrm_seq_id(struct bnxt *bp, u16 dst) -{ - u16 seq_id; - - if (dst == BNXT_HWRM_CHNL_CHIMP) - seq_id = bp->hwrm_cmd_seq++; - else - seq_id = bp->hwrm_cmd_kong_seq++; - return seq_id; -} - extern const u16 bnxt_lhint_arr[]; int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, @@ -2237,11 +2161,6 @@ u32 bnxt_fw_health_readl(struct bnxt *bp, int reg_idx); void bnxt_set_tpa_flags(struct bnxt *bp); void bnxt_set_ring_params(struct bnxt *); int bnxt_set_rx_skb_mode(struct bnxt *bp, bool page_mode); -void bnxt_hwrm_cmd_hdr_init(struct bnxt *, void *, u16, u16, u16); -int _hwrm_send_message(struct bnxt *, void *, u32, int); -int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 len, int timeout); -int hwrm_send_message(struct bnxt *, void *, u32, int); -int hwrm_send_message_silent(struct bnxt *, void *, u32, int); int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size, bool async_only); int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index 8a68df4d9e59..228a5db7e143 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c @@ -18,6 +18,7 @@ #include <rdma/ib_verbs.h> #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_hwrm.h" #include "bnxt_dcb.h" #ifdef CONFIG_BNXT_DCB @@ -38,38 +39,43 @@ static int bnxt_queue_to_tc(struct bnxt *bp, u8 queue_id) static int bnxt_hwrm_queue_pri2cos_cfg(struct bnxt *bp, struct ieee_ets *ets) { - struct hwrm_queue_pri2cos_cfg_input req = {0}; + struct hwrm_queue_pri2cos_cfg_input *req; u8 *pri2cos; - int i; + int rc, i; + + rc = hwrm_req_init(bp, req, HWRM_QUEUE_PRI2COS_CFG); + if (rc) + return rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_CFG, -1, -1); - req.flags = cpu_to_le32(QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR | - QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN); + req->flags = cpu_to_le32(QUEUE_PRI2COS_CFG_REQ_FLAGS_PATH_BIDIR | + QUEUE_PRI2COS_CFG_REQ_FLAGS_IVLAN); - pri2cos = &req.pri0_cos_queue_id; + pri2cos = &req->pri0_cos_queue_id; for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { u8 qidx; - req.enables |= cpu_to_le32( + req->enables |= cpu_to_le32( QUEUE_PRI2COS_CFG_REQ_ENABLES_PRI0_COS_QUEUE_ID << i); qidx = bp->tc_to_qidx[ets->prio_tc[i]]; pri2cos[i] = bp->q_info[qidx].queue_id; } - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req); } static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets) { - struct hwrm_queue_pri2cos_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_queue_pri2cos_qcfg_input req = {0}; - int rc = 0; + struct hwrm_queue_pri2cos_qcfg_output *resp; + struct hwrm_queue_pri2cos_qcfg_input *req; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PRI2COS_QCFG, -1, -1); - req.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); + rc = hwrm_req_init(bp, req, HWRM_QUEUE_PRI2COS_QCFG); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) { u8 *pri2cos = &resp->pri0_cos_queue_id; int i; @@ -83,23 +89,26 @@ static int bnxt_hwrm_queue_pri2cos_qcfg(struct bnxt *bp, struct ieee_ets *ets) ets->prio_tc[i] = tc; } } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets, u8 max_tc) { - struct hwrm_queue_cos2bw_cfg_input req = {0}; + struct hwrm_queue_cos2bw_cfg_input *req; struct bnxt_cos2bw_cfg cos2bw; void *data; - int i; + int rc, i; + + rc = hwrm_req_init(bp, req, HWRM_QUEUE_COS2BW_CFG); + if (rc) + return rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_CFG, -1, -1); for (i = 0; i < max_tc; i++) { u8 qidx = bp->tc_to_qidx[i]; - req.enables |= cpu_to_le32( + req->enables |= cpu_to_le32( QUEUE_COS2BW_CFG_REQ_ENABLES_COS_QUEUE_ID0_VALID << qidx); @@ -120,30 +129,32 @@ static int bnxt_hwrm_queue_cos2bw_cfg(struct bnxt *bp, struct ieee_ets *ets, cpu_to_le32((ets->tc_tx_bw[i] * 100) | BW_VALUE_UNIT_PERCENT1_100); } - data = &req.unused_0 + qidx * (sizeof(cos2bw) - 4); + data = &req->unused_0 + qidx * (sizeof(cos2bw) - 4); memcpy(data, &cos2bw.queue_id, sizeof(cos2bw) - 4); if (qidx == 0) { - req.queue_id0 = cos2bw.queue_id; - req.unused_0 = 0; + req->queue_id0 = cos2bw.queue_id; + req->unused_0 = 0; } } - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req); } static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets) { - struct hwrm_queue_cos2bw_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_queue_cos2bw_qcfg_input req = {0}; + struct hwrm_queue_cos2bw_qcfg_output *resp; + struct hwrm_queue_cos2bw_qcfg_input *req; struct bnxt_cos2bw_cfg cos2bw; void *data; int rc, i; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_COS2BW_QCFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_QUEUE_COS2BW_QCFG); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) { - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -167,7 +178,7 @@ static int bnxt_hwrm_queue_cos2bw_qcfg(struct bnxt *bp, struct ieee_ets *ets) ets->tc_tx_bw[tc] = cos2bw.bw_weight; } } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return 0; } @@ -229,11 +240,12 @@ static int bnxt_queue_remap(struct bnxt *bp, unsigned int lltc_mask) static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc) { - struct hwrm_queue_pfcenable_cfg_input req = {0}; + struct hwrm_queue_pfcenable_cfg_input *req; struct ieee_ets *my_ets = bp->ieee_ets; unsigned int tc_mask = 0, pri_mask = 0; u8 i, pri, lltc_count = 0; bool need_q_remap = false; + int rc; if (!my_ets) return -EINVAL; @@ -266,38 +278,43 @@ static int bnxt_hwrm_queue_pfc_cfg(struct bnxt *bp, struct ieee_pfc *pfc) if (need_q_remap) bnxt_queue_remap(bp, tc_mask); - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_CFG, -1, -1); - req.flags = cpu_to_le32(pri_mask); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCENABLE_CFG); + if (rc) + return rc; + + req->flags = cpu_to_le32(pri_mask); + return hwrm_req_send(bp, req); } static int bnxt_hwrm_queue_pfc_qcfg(struct bnxt *bp, struct ieee_pfc *pfc) { - struct hwrm_queue_pfcenable_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_queue_pfcenable_qcfg_input req = {0}; + struct hwrm_queue_pfcenable_qcfg_output *resp; + struct hwrm_queue_pfcenable_qcfg_input *req; u8 pri_mask; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_PFCENABLE_QCFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_QUEUE_PFCENABLE_QCFG); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (rc) { - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } pri_mask = le32_to_cpu(resp->flags); pfc->pfc_en = pri_mask; - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return 0; } static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app, bool add) { - struct hwrm_fw_set_structured_data_input set = {0}; - struct hwrm_fw_get_structured_data_input get = {0}; + struct hwrm_fw_set_structured_data_input *set; + struct hwrm_fw_get_structured_data_input *get; struct hwrm_struct_data_dcbx_app *fw_app; struct hwrm_struct_hdr *data; dma_addr_t mapping; @@ -307,19 +324,26 @@ static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app, if (bp->hwrm_spec_code < 0x10601) return 0; + rc = hwrm_req_init(bp, get, HWRM_FW_GET_STRUCTURED_DATA); + if (rc) + return rc; + + hwrm_req_hold(bp, get); + hwrm_req_alloc_flags(bp, get, GFP_KERNEL | __GFP_ZERO); + n = IEEE_8021QAZ_MAX_TCS; data_len = sizeof(*data) + sizeof(*fw_app) * n; - data = dma_alloc_coherent(&bp->pdev->dev, data_len, &mapping, - GFP_KERNEL); - if (!data) - return -ENOMEM; + data = hwrm_req_dma_slice(bp, get, data_len, &mapping); + if (!data) { + rc = -ENOMEM; + goto set_app_exit; + } - bnxt_hwrm_cmd_hdr_init(bp, &get, HWRM_FW_GET_STRUCTURED_DATA, -1, -1); - get.dest_data_addr = cpu_to_le64(mapping); - get.structure_id = cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP); - get.subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL); - get.count = 0; - rc = hwrm_send_message(bp, &get, sizeof(get), HWRM_CMD_TIMEOUT); + get->dest_data_addr = cpu_to_le64(mapping); + get->structure_id = cpu_to_le16(STRUCT_HDR_STRUCT_ID_DCBX_APP); + get->subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL); + get->count = 0; + rc = hwrm_req_send(bp, get); if (rc) goto set_app_exit; @@ -365,44 +389,49 @@ static int bnxt_hwrm_set_dcbx_app(struct bnxt *bp, struct dcb_app *app, data->len = cpu_to_le16(sizeof(*fw_app) * n); data->subtype = cpu_to_le16(HWRM_STRUCT_DATA_SUBTYPE_HOST_OPERATIONAL); - bnxt_hwrm_cmd_hdr_init(bp, &set, HWRM_FW_SET_STRUCTURED_DATA, -1, -1); - set.src_data_addr = cpu_to_le64(mapping); - set.data_len = cpu_to_le16(sizeof(*data) + sizeof(*fw_app) * n); - set.hdr_cnt = 1; - rc = hwrm_send_message(bp, &set, sizeof(set), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, set, HWRM_FW_SET_STRUCTURED_DATA); + if (rc) + goto set_app_exit; + + set->src_data_addr = cpu_to_le64(mapping); + set->data_len = cpu_to_le16(sizeof(*data) + sizeof(*fw_app) * n); + set->hdr_cnt = 1; + rc = hwrm_req_send(bp, set); set_app_exit: - dma_free_coherent(&bp->pdev->dev, data_len, data, mapping); + hwrm_req_drop(bp, get); /* dropping get request and associated slice */ return rc; } static int bnxt_hwrm_queue_dscp_qcaps(struct bnxt *bp) { - struct hwrm_queue_dscp_qcaps_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_queue_dscp_qcaps_input req = {0}; + struct hwrm_queue_dscp_qcaps_output *resp; + struct hwrm_queue_dscp_qcaps_input *req; int rc; bp->max_dscp_value = 0; if (bp->hwrm_spec_code < 0x10800 || BNXT_VF(bp)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_DSCP_QCAPS, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_QUEUE_DSCP_QCAPS); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); if (!rc) { bp->max_dscp_value = (1 << resp->num_dscp_bits) - 1; if (bp->max_dscp_value < 0x3f) bp->max_dscp_value = 0; } - - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt *bp, struct dcb_app *app, bool add) { - struct hwrm_queue_dscp2pri_cfg_input req = {0}; + struct hwrm_queue_dscp2pri_cfg_input *req; struct bnxt_dscp2pri_entry *dscp2pri; dma_addr_t mapping; int rc; @@ -410,23 +439,25 @@ static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt *bp, struct dcb_app *app, if (bp->hwrm_spec_code < 0x10800) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_QUEUE_DSCP2PRI_CFG, -1, -1); - dscp2pri = dma_alloc_coherent(&bp->pdev->dev, sizeof(*dscp2pri), - &mapping, GFP_KERNEL); - if (!dscp2pri) + rc = hwrm_req_init(bp, req, HWRM_QUEUE_DSCP2PRI_CFG); + if (rc) + return rc; + + dscp2pri = hwrm_req_dma_slice(bp, req, sizeof(*dscp2pri), &mapping); + if (!dscp2pri) { + hwrm_req_drop(bp, req); return -ENOMEM; + } - req.src_data_addr = cpu_to_le64(mapping); + req->src_data_addr = cpu_to_le64(mapping); dscp2pri->dscp = app->protocol; if (add) dscp2pri->mask = 0x3f; else dscp2pri->mask = 0; dscp2pri->pri = app->priority; - req.entry_cnt = cpu_to_le16(1); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - dma_free_coherent(&bp->pdev->dev, sizeof(*dscp2pri), dscp2pri, - mapping); + req->entry_cnt = cpu_to_le16(1); + rc = hwrm_req_send(bp, req); return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index 64381be935a8..1423cc617d93 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -12,6 +12,7 @@ #include <net/devlink.h> #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_hwrm.h" #include "bnxt_vfr.h" #include "bnxt_devlink.h" #include "bnxt_ethtool.h" @@ -354,28 +355,34 @@ static void bnxt_copy_from_nvm_data(union devlink_param_value *dst, static int bnxt_hwrm_get_nvm_cfg_ver(struct bnxt *bp, union devlink_param_value *nvm_cfg_ver) { - struct hwrm_nvm_get_variable_input req = {0}; + struct hwrm_nvm_get_variable_input *req; union bnxt_nvm_data *data; dma_addr_t data_dma_addr; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1); - data = dma_alloc_coherent(&bp->pdev->dev, sizeof(*data), - &data_dma_addr, GFP_KERNEL); - if (!data) - return -ENOMEM; + rc = hwrm_req_init(bp, req, HWRM_NVM_GET_VARIABLE); + if (rc) + return rc; + + data = hwrm_req_dma_slice(bp, req, sizeof(*data), &data_dma_addr); + if (!data) { + rc = -ENOMEM; + goto exit; + } - req.dest_data_addr = cpu_to_le64(data_dma_addr); - req.data_len = cpu_to_le16(BNXT_NVM_CFG_VER_BITS); - req.option_num = cpu_to_le16(NVM_OFF_NVM_CFG_VER); + hwrm_req_hold(bp, req); + req->dest_data_addr = cpu_to_le64(data_dma_addr); + req->data_len = cpu_to_le16(BNXT_NVM_CFG_VER_BITS); + req->option_num = cpu_to_le16(NVM_OFF_NVM_CFG_VER); - rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send_silent(bp, req); if (!rc) bnxt_copy_from_nvm_data(nvm_cfg_ver, data, BNXT_NVM_CFG_VER_BITS, BNXT_NVM_CFG_VER_BYTES); - dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr); +exit: + hwrm_req_drop(bp, req); return rc; } @@ -562,17 +569,20 @@ static int bnxt_dl_info_get(struct devlink *dl, struct devlink_info_req *req, } static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, - int msg_len, union devlink_param_value *val) + union devlink_param_value *val) { struct hwrm_nvm_get_variable_input *req = msg; struct bnxt_dl_nvm_param nvm_param; + struct hwrm_err_output *resp; union bnxt_nvm_data *data; dma_addr_t data_dma_addr; int idx = 0, rc, i; /* Get/Set NVM CFG parameter is supported only on PFs */ - if (BNXT_VF(bp)) + if (BNXT_VF(bp)) { + hwrm_req_drop(bp, req); return -EPERM; + } for (i = 0; i < ARRAY_SIZE(nvm_params); i++) { if (nvm_params[i].id == param_id) { @@ -581,18 +591,22 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, } } - if (i == ARRAY_SIZE(nvm_params)) + if (i == ARRAY_SIZE(nvm_params)) { + hwrm_req_drop(bp, req); return -EOPNOTSUPP; + } if (nvm_param.dir_type == BNXT_NVM_PORT_CFG) idx = bp->pf.port_id; else if (nvm_param.dir_type == BNXT_NVM_FUNC_CFG) idx = bp->pf.fw_fid - BNXT_FIRST_PF_FID; - data = dma_alloc_coherent(&bp->pdev->dev, sizeof(*data), - &data_dma_addr, GFP_KERNEL); - if (!data) + data = hwrm_req_dma_slice(bp, req, sizeof(*data), &data_dma_addr); + + if (!data) { + hwrm_req_drop(bp, req); return -ENOMEM; + } req->dest_data_addr = cpu_to_le64(data_dma_addr); req->data_len = cpu_to_le16(nvm_param.nvm_num_bits); @@ -601,26 +615,24 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, if (idx) req->dimensions = cpu_to_le16(1); + resp = hwrm_req_hold(bp, req); if (req->req_type == cpu_to_le16(HWRM_NVM_SET_VARIABLE)) { bnxt_copy_to_nvm_data(data, val, nvm_param.nvm_num_bits, nvm_param.dl_num_bytes); - rc = hwrm_send_message(bp, msg, msg_len, HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, msg); } else { - rc = hwrm_send_message_silent(bp, msg, msg_len, - HWRM_CMD_TIMEOUT); + rc = hwrm_req_send_silent(bp, msg); if (!rc) { bnxt_copy_from_nvm_data(val, data, nvm_param.nvm_num_bits, nvm_param.dl_num_bytes); } else { - struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; - if (resp->cmd_err == NVM_GET_VARIABLE_CMD_ERR_CODE_VAR_NOT_EXIST) rc = -EOPNOTSUPP; } } - dma_free_coherent(&bp->pdev->dev, sizeof(*data), data, data_dma_addr); + hwrm_req_drop(bp, req); if (rc == -EACCES) netdev_err(bp->dev, "PF does not have admin privileges to modify NVM config\n"); return rc; @@ -629,15 +641,17 @@ static int bnxt_hwrm_nvm_req(struct bnxt *bp, u32 param_id, void *msg, static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id, struct devlink_param_gset_ctx *ctx) { - struct hwrm_nvm_get_variable_input req = {0}; struct bnxt *bp = bnxt_get_bp_from_dl(dl); + struct hwrm_nvm_get_variable_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_VARIABLE, -1, -1); - rc = bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val); - if (!rc) - if (id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK) - ctx->val.vbool = !ctx->val.vbool; + rc = hwrm_req_init(bp, req, HWRM_NVM_GET_VARIABLE); + if (rc) + return rc; + + rc = bnxt_hwrm_nvm_req(bp, id, req, &ctx->val); + if (!rc && id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK) + ctx->val.vbool = !ctx->val.vbool; return rc; } @@ -645,15 +659,18 @@ static int bnxt_dl_nvm_param_get(struct devlink *dl, u32 id, static int bnxt_dl_nvm_param_set(struct devlink *dl, u32 id, struct devlink_param_gset_ctx *ctx) { - struct hwrm_nvm_set_variable_input req = {0}; struct bnxt *bp = bnxt_get_bp_from_dl(dl); + struct hwrm_nvm_set_variable_input *req; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_SET_VARIABLE, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_NVM_SET_VARIABLE); + if (rc) + return rc; if (id == BNXT_DEVLINK_PARAM_ID_GRE_VER_CHECK) ctx->val.vbool = !ctx->val.vbool; - return bnxt_hwrm_nvm_req(bp, id, &req, sizeof(req), &ctx->val); + return bnxt_hwrm_nvm_req(bp, id, req, &ctx->val); } static int bnxt_dl_msix_validate(struct devlink *dl, u32 id, @@ -743,14 +760,17 @@ static void bnxt_dl_params_unregister(struct bnxt *bp) int bnxt_dl_register(struct bnxt *bp) { + const struct devlink_ops *devlink_ops; struct devlink_port_attrs attrs = {}; struct devlink *dl; int rc; if (BNXT_PF(bp)) - dl = devlink_alloc(&bnxt_dl_ops, sizeof(struct bnxt_dl)); + devlink_ops = &bnxt_dl_ops; else - dl = devlink_alloc(&bnxt_vf_dl_ops, sizeof(struct bnxt_dl)); + devlink_ops = &bnxt_vf_dl_ops; + + dl = devlink_alloc(devlink_ops, sizeof(struct bnxt_dl), &bp->pdev->dev); if (!dl) { netdev_warn(bp->dev, "devlink_alloc failed\n"); return -ENOMEM; @@ -763,7 +783,7 @@ int bnxt_dl_register(struct bnxt *bp) bp->hwrm_spec_code > 0x10803) bp->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY; - rc = devlink_register(dl, &bp->pdev->dev); + rc = devlink_register(dl); if (rc) { netdev_warn(bp->dev, "devlink_register failed. rc=%d\n", rc); goto err_dl_free; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 786ca51e669b..b056e3c29bbd 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -24,6 +24,7 @@ #include <linux/timecounter.h> #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_hwrm.h" #include "bnxt_xdp.h" #include "bnxt_ptp.h" #include "bnxt_ethtool.h" @@ -49,7 +50,9 @@ static void bnxt_set_msglevel(struct net_device *dev, u32 value) } static int bnxt_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct bnxt *bp = netdev_priv(dev); struct bnxt_coal *hw_coal; @@ -79,7 +82,9 @@ static int bnxt_get_coalesce(struct net_device *dev, } static int bnxt_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct bnxt *bp = netdev_priv(dev); bool update_stats = false; @@ -303,6 +308,7 @@ static const char * const bnxt_cmn_sw_stats_str[] = { enum { RX_TOTAL_DISCARDS, TX_TOTAL_DISCARDS, + RX_NETPOLL_DISCARDS, }; static struct { @@ -311,6 +317,7 @@ static struct { } bnxt_sw_func_stats[] = { {0, "rx_total_discard_pkts"}, {0, "tx_total_discard_pkts"}, + {0, "rx_total_netpoll_discards"}, }; #define NUM_RING_RX_SW_STATS ARRAY_SIZE(bnxt_rx_sw_stats_str) @@ -599,6 +606,8 @@ skip_tpa_ring_stats: BNXT_GET_RING_STATS64(sw_stats, rx_discard_pkts); bnxt_sw_func_stats[TX_TOTAL_DISCARDS].counter += BNXT_GET_RING_STATS64(sw_stats, tx_discard_pkts); + bnxt_sw_func_stats[RX_NETPOLL_DISCARDS].counter += + cpr->sw_stats.rx.rx_netpoll_discards; } for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++) @@ -768,8 +777,13 @@ static void bnxt_get_ringparam(struct net_device *dev, { struct bnxt *bp = netdev_priv(dev); - ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT; - ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT; + if (bp->flags & BNXT_FLAG_AGG_RINGS) { + ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT_JUM_ENA; + ering->rx_jumbo_max_pending = BNXT_MAX_RX_JUM_DESC_CNT; + } else { + ering->rx_max_pending = BNXT_MAX_RX_DESC_CNT; + ering->rx_jumbo_max_pending = 0; + } ering->tx_max_pending = BNXT_MAX_TX_DESC_CNT; ering->rx_pending = bp->rx_ring_size; @@ -1352,7 +1366,7 @@ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p) { struct pcie_ctx_hw_stats *hw_pcie_stats; - struct hwrm_pcie_qstats_input req = {0}; + struct hwrm_pcie_qstats_input *req; struct bnxt *bp = netdev_priv(dev); dma_addr_t hw_pcie_stats_addr; int rc; @@ -1363,18 +1377,21 @@ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs, if (!(bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED)) return; - hw_pcie_stats = dma_alloc_coherent(&bp->pdev->dev, - sizeof(*hw_pcie_stats), - &hw_pcie_stats_addr, GFP_KERNEL); - if (!hw_pcie_stats) + if (hwrm_req_init(bp, req, HWRM_PCIE_QSTATS)) return; + hw_pcie_stats = hwrm_req_dma_slice(bp, req, sizeof(*hw_pcie_stats), + &hw_pcie_stats_addr); + if (!hw_pcie_stats) { + hwrm_req_drop(bp, req); + return; + } + regs->version = 1; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PCIE_QSTATS, -1, -1); - req.pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats)); - req.pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + hwrm_req_hold(bp, req); /* hold on to slice */ + req->pcie_stat_size = cpu_to_le16(sizeof(*hw_pcie_stats)); + req->pcie_stat_host_addr = cpu_to_le64(hw_pcie_stats_addr); + rc = hwrm_req_send(bp, req); if (!rc) { __le64 *src = (__le64 *)hw_pcie_stats; u64 *dst = (u64 *)(_p + BNXT_PXP_REG_LEN); @@ -1383,9 +1400,7 @@ static void bnxt_get_regs(struct net_device *dev, struct ethtool_regs *regs, for (i = 0; i < sizeof(*hw_pcie_stats) / sizeof(__le64); i++) dst[i] = le64_to_cpu(src[i]); } - mutex_unlock(&bp->hwrm_cmd_lock); - dma_free_coherent(&bp->pdev->dev, sizeof(*hw_pcie_stats), hw_pcie_stats, - hw_pcie_stats_addr); + hwrm_req_drop(bp, req); } static void bnxt_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) @@ -1965,7 +1980,7 @@ static u32 bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info *link_info, static int bnxt_set_fecparam(struct net_device *dev, struct ethtool_fecparam *fecparam) { - struct hwrm_port_phy_cfg_input req = {0}; + struct hwrm_port_phy_cfg_input *req; struct bnxt *bp = netdev_priv(dev); struct bnxt_link_info *link_info; u32 new_cfg, fec = fecparam->fec; @@ -1997,9 +2012,11 @@ static int bnxt_set_fecparam(struct net_device *dev, } apply_fec: - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); - req.flags = cpu_to_le32(new_cfg | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); + if (rc) + return rc; + req->flags = cpu_to_le32(new_cfg | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); + rc = hwrm_req_send(bp, req); /* update current settings */ if (!rc) { mutex_lock(&bp->link_lock); @@ -2093,19 +2110,22 @@ static u32 bnxt_get_link(struct net_device *dev) int bnxt_hwrm_nvm_get_dev_info(struct bnxt *bp, struct hwrm_nvm_get_dev_info_output *nvm_dev_info) { - struct hwrm_nvm_get_dev_info_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_nvm_get_dev_info_input req = {0}; + struct hwrm_nvm_get_dev_info_output *resp; + struct hwrm_nvm_get_dev_info_input *req; int rc; if (BNXT_VF(bp)) return -EOPNOTSUPP; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DEV_INFO, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DEV_INFO); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) memcpy(nvm_dev_info, resp, sizeof(*resp)); - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -2118,77 +2138,67 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, u16 ext, u16 *index, u32 *item_length, u32 *data_length); -static int __bnxt_flash_nvram(struct net_device *dev, u16 dir_type, - u16 dir_ordinal, u16 dir_ext, u16 dir_attr, - u32 dir_item_len, const u8 *data, - size_t data_len) +static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type, + u16 dir_ordinal, u16 dir_ext, u16 dir_attr, + u32 dir_item_len, const u8 *data, + size_t data_len) { struct bnxt *bp = netdev_priv(dev); + struct hwrm_nvm_write_input *req; int rc; - struct hwrm_nvm_write_input req = {0}; - dma_addr_t dma_handle; - u8 *kmem = NULL; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_WRITE, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_NVM_WRITE); + if (rc) + return rc; - req.dir_type = cpu_to_le16(dir_type); - req.dir_ordinal = cpu_to_le16(dir_ordinal); - req.dir_ext = cpu_to_le16(dir_ext); - req.dir_attr = cpu_to_le16(dir_attr); - req.dir_item_length = cpu_to_le32(dir_item_len); if (data_len && data) { - req.dir_data_length = cpu_to_le32(data_len); + dma_addr_t dma_handle; + u8 *kmem; - kmem = dma_alloc_coherent(&bp->pdev->dev, data_len, &dma_handle, - GFP_KERNEL); - if (!kmem) + kmem = hwrm_req_dma_slice(bp, req, data_len, &dma_handle); + if (!kmem) { + hwrm_req_drop(bp, req); return -ENOMEM; + } + + req->dir_data_length = cpu_to_le32(data_len); memcpy(kmem, data, data_len); - req.host_src_addr = cpu_to_le64(dma_handle); + req->host_src_addr = cpu_to_le64(dma_handle); } - rc = _hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT); - if (kmem) - dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle); + hwrm_req_timeout(bp, req, FLASH_NVRAM_TIMEOUT); + req->dir_type = cpu_to_le16(dir_type); + req->dir_ordinal = cpu_to_le16(dir_ordinal); + req->dir_ext = cpu_to_le16(dir_ext); + req->dir_attr = cpu_to_le16(dir_attr); + req->dir_item_length = cpu_to_le32(dir_item_len); + rc = hwrm_req_send(bp, req); if (rc == -EACCES) bnxt_print_admin_err(bp); return rc; } -static int bnxt_flash_nvram(struct net_device *dev, u16 dir_type, - u16 dir_ordinal, u16 dir_ext, u16 dir_attr, - const u8 *data, size_t data_len) -{ - struct bnxt *bp = netdev_priv(dev); - int rc; - - mutex_lock(&bp->hwrm_cmd_lock); - rc = __bnxt_flash_nvram(dev, dir_type, dir_ordinal, dir_ext, dir_attr, - 0, data, data_len); - mutex_unlock(&bp->hwrm_cmd_lock); - return rc; -} - static int bnxt_hwrm_firmware_reset(struct net_device *dev, u8 proc_type, u8 self_reset, u8 flags) { - struct hwrm_fw_reset_input req = {0}; struct bnxt *bp = netdev_priv(dev); + struct hwrm_fw_reset_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_FW_RESET); + if (rc) + return rc; - req.embedded_proc_type = proc_type; - req.selfrst_status = self_reset; - req.flags = flags; + req->embedded_proc_type = proc_type; + req->selfrst_status = self_reset; + req->flags = flags; if (proc_type == FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP) { - rc = hwrm_send_message_silent(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + rc = hwrm_req_send_silent(bp, req); } else { - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); if (rc == -EACCES) bnxt_print_admin_err(bp); } @@ -2326,7 +2336,7 @@ static int bnxt_flash_firmware(struct net_device *dev, return -EINVAL; } rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, - 0, 0, fw_data, fw_size); + 0, 0, 0, fw_data, fw_size); if (rc == 0) /* Firmware update successful */ rc = bnxt_firmware_reset(dev, dir_type); @@ -2379,7 +2389,7 @@ static int bnxt_flash_microcode(struct net_device *dev, return -EINVAL; } rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, - 0, 0, fw_data, fw_size); + 0, 0, 0, fw_data, fw_size); return rc; } @@ -2445,7 +2455,7 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev, rc = bnxt_flash_microcode(dev, dir_type, fw->data, fw->size); else rc = bnxt_flash_nvram(dev, dir_type, BNX_DIR_ORDINAL_FIRST, - 0, 0, fw->data, fw->size); + 0, 0, 0, fw->data, fw->size); release_firmware(fw); return rc; } @@ -2457,21 +2467,23 @@ static int bnxt_flash_firmware_from_file(struct net_device *dev, int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware *fw, u32 install_type) { - struct hwrm_nvm_install_update_input install = {0}; - struct hwrm_nvm_install_update_output resp = {0}; - struct hwrm_nvm_modify_input modify = {0}; + struct hwrm_nvm_install_update_input *install; + struct hwrm_nvm_install_update_output *resp; + struct hwrm_nvm_modify_input *modify; struct bnxt *bp = netdev_priv(dev); bool defrag_attempted = false; dma_addr_t dma_handle; u8 *kmem = NULL; u32 modify_len; u32 item_len; - int rc = 0; u16 index; + int rc; bnxt_hwrm_fw_set_time(bp); - bnxt_hwrm_cmd_hdr_init(bp, &modify, HWRM_NVM_MODIFY, -1, -1); + rc = hwrm_req_init(bp, modify, HWRM_NVM_MODIFY); + if (rc) + return rc; /* Try allocating a large DMA buffer first. Older fw will * cause excessive NVRAM erases when using small blocks. @@ -2479,22 +2491,33 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware modify_len = roundup_pow_of_two(fw->size); modify_len = min_t(u32, modify_len, BNXT_PKG_DMA_SIZE); while (1) { - kmem = dma_alloc_coherent(&bp->pdev->dev, modify_len, - &dma_handle, GFP_KERNEL); + kmem = hwrm_req_dma_slice(bp, modify, modify_len, &dma_handle); if (!kmem && modify_len > PAGE_SIZE) modify_len /= 2; else break; } - if (!kmem) + if (!kmem) { + hwrm_req_drop(bp, modify); return -ENOMEM; + } - modify.host_src_addr = cpu_to_le64(dma_handle); + rc = hwrm_req_init(bp, install, HWRM_NVM_INSTALL_UPDATE); + if (rc) { + hwrm_req_drop(bp, modify); + return rc; + } - bnxt_hwrm_cmd_hdr_init(bp, &install, HWRM_NVM_INSTALL_UPDATE, -1, -1); + hwrm_req_timeout(bp, modify, FLASH_PACKAGE_TIMEOUT); + hwrm_req_timeout(bp, install, INSTALL_PACKAGE_TIMEOUT); + + hwrm_req_hold(bp, modify); + modify->host_src_addr = cpu_to_le64(dma_handle); + + resp = hwrm_req_hold(bp, install); if ((install_type & 0xffff) == 0) install_type >>= 16; - install.install_type = cpu_to_le32(install_type); + install->install_type = cpu_to_le32(install_type); do { u32 copied = 0, len = modify_len; @@ -2514,76 +2537,69 @@ int bnxt_flash_package_from_fw_obj(struct net_device *dev, const struct firmware break; } - modify.dir_idx = cpu_to_le16(index); + modify->dir_idx = cpu_to_le16(index); if (fw->size > modify_len) - modify.flags = BNXT_NVM_MORE_FLAG; + modify->flags = BNXT_NVM_MORE_FLAG; while (copied < fw->size) { u32 balance = fw->size - copied; if (balance <= modify_len) { len = balance; if (copied) - modify.flags |= BNXT_NVM_LAST_FLAG; + modify->flags |= BNXT_NVM_LAST_FLAG; } memcpy(kmem, fw->data + copied, len); - modify.len = cpu_to_le32(len); - modify.offset = cpu_to_le32(copied); - rc = hwrm_send_message(bp, &modify, sizeof(modify), - FLASH_PACKAGE_TIMEOUT); + modify->len = cpu_to_le32(len); + modify->offset = cpu_to_le32(copied); + rc = hwrm_req_send(bp, modify); if (rc) goto pkg_abort; copied += len; } - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message_silent(bp, &install, sizeof(install), - INSTALL_PACKAGE_TIMEOUT); - memcpy(&resp, bp->hwrm_cmd_resp_addr, sizeof(resp)); + + rc = hwrm_req_send_silent(bp, install); if (defrag_attempted) { /* We have tried to defragment already in the previous * iteration. Return with the result for INSTALL_UPDATE */ - mutex_unlock(&bp->hwrm_cmd_lock); break; } - if (rc && ((struct hwrm_err_output *)&resp)->cmd_err == + if (rc && ((struct hwrm_err_output *)resp)->cmd_err == NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) { - install.flags = + install->flags = cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG); - rc = _hwrm_send_message_silent(bp, &install, - sizeof(install), - INSTALL_PACKAGE_TIMEOUT); - memcpy(&resp, bp->hwrm_cmd_resp_addr, sizeof(resp)); + rc = hwrm_req_send_silent(bp, install); - if (rc && ((struct hwrm_err_output *)&resp)->cmd_err == + if (rc && ((struct hwrm_err_output *)resp)->cmd_err == NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE) { /* FW has cleared NVM area, driver will create * UPDATE directory and try the flash again */ defrag_attempted = true; - install.flags = 0; - rc = __bnxt_flash_nvram(bp->dev, - BNX_DIR_TYPE_UPDATE, - BNX_DIR_ORDINAL_FIRST, - 0, 0, item_len, NULL, - 0); + install->flags = 0; + rc = bnxt_flash_nvram(bp->dev, + BNX_DIR_TYPE_UPDATE, + BNX_DIR_ORDINAL_FIRST, + 0, 0, item_len, NULL, 0); } else if (rc) { netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x\n", rc); } } else if (rc) { netdev_err(dev, "HWRM_NVM_INSTALL_UPDATE failure rc :%x\n", rc); } - mutex_unlock(&bp->hwrm_cmd_lock); } while (defrag_attempted && !rc); pkg_abort: - dma_free_coherent(&bp->pdev->dev, modify_len, kmem, dma_handle); - if (resp.result) { + hwrm_req_drop(bp, modify); + hwrm_req_drop(bp, install); + + if (resp->result) { netdev_err(dev, "PKG install error = %d, problem_item = %d\n", - (s8)resp.result, (int)resp.problem_item); + (s8)resp->result, (int)resp->problem_item); rc = -ENOPKG; } if (rc == -EACCES) @@ -2629,20 +2645,22 @@ static int bnxt_flash_device(struct net_device *dev, static int nvm_get_dir_info(struct net_device *dev, u32 *entries, u32 *length) { + struct hwrm_nvm_get_dir_info_output *output; + struct hwrm_nvm_get_dir_info_input *req; struct bnxt *bp = netdev_priv(dev); int rc; - struct hwrm_nvm_get_dir_info_input req = {0}; - struct hwrm_nvm_get_dir_info_output *output = bp->hwrm_cmd_resp_addr; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_INFO, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_INFO); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + output = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) { *entries = le32_to_cpu(output->entries); *length = le32_to_cpu(output->entry_length); } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -2668,7 +2686,7 @@ static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data) u8 *buf; size_t buflen; dma_addr_t dma_handle; - struct hwrm_nvm_get_dir_entries_input req = {0}; + struct hwrm_nvm_get_dir_entries_input *req; rc = nvm_get_dir_info(dev, &dir_entries, &entry_length); if (rc != 0) @@ -2686,20 +2704,23 @@ static int bnxt_get_nvram_directory(struct net_device *dev, u32 len, u8 *data) len -= 2; memset(data, 0xff, len); + rc = hwrm_req_init(bp, req, HWRM_NVM_GET_DIR_ENTRIES); + if (rc) + return rc; + buflen = dir_entries * entry_length; - buf = dma_alloc_coherent(&bp->pdev->dev, buflen, &dma_handle, - GFP_KERNEL); + buf = hwrm_req_dma_slice(bp, req, buflen, &dma_handle); if (!buf) { - netdev_err(dev, "dma_alloc_coherent failure, length = %u\n", - (unsigned)buflen); + hwrm_req_drop(bp, req); return -ENOMEM; } - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_GET_DIR_ENTRIES, -1, -1); - req.host_dest_addr = cpu_to_le64(dma_handle); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->host_dest_addr = cpu_to_le64(dma_handle); + + hwrm_req_hold(bp, req); /* hold the slice */ + rc = hwrm_req_send(bp, req); if (rc == 0) memcpy(data, buf, len > buflen ? buflen : len); - dma_free_coherent(&bp->pdev->dev, buflen, buf, dma_handle); + hwrm_req_drop(bp, req); return rc; } @@ -2710,28 +2731,31 @@ static int bnxt_get_nvram_item(struct net_device *dev, u32 index, u32 offset, int rc; u8 *buf; dma_addr_t dma_handle; - struct hwrm_nvm_read_input req = {0}; + struct hwrm_nvm_read_input *req; if (!length) return -EINVAL; - buf = dma_alloc_coherent(&bp->pdev->dev, length, &dma_handle, - GFP_KERNEL); + rc = hwrm_req_init(bp, req, HWRM_NVM_READ); + if (rc) + return rc; + + buf = hwrm_req_dma_slice(bp, req, length, &dma_handle); if (!buf) { - netdev_err(dev, "dma_alloc_coherent failure, length = %u\n", - (unsigned)length); + hwrm_req_drop(bp, req); return -ENOMEM; } - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_READ, -1, -1); - req.host_dest_addr = cpu_to_le64(dma_handle); - req.dir_idx = cpu_to_le16(index); - req.offset = cpu_to_le32(offset); - req.len = cpu_to_le32(length); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->host_dest_addr = cpu_to_le64(dma_handle); + req->dir_idx = cpu_to_le16(index); + req->offset = cpu_to_le32(offset); + req->len = cpu_to_le32(length); + + hwrm_req_hold(bp, req); /* hold the slice */ + rc = hwrm_req_send(bp, req); if (rc == 0) memcpy(data, buf, length); - dma_free_coherent(&bp->pdev->dev, length, buf, dma_handle); + hwrm_req_drop(bp, req); return rc; } @@ -2739,20 +2763,23 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, u16 ext, u16 *index, u32 *item_length, u32 *data_length) { + struct hwrm_nvm_find_dir_entry_output *output; + struct hwrm_nvm_find_dir_entry_input *req; struct bnxt *bp = netdev_priv(dev); int rc; - struct hwrm_nvm_find_dir_entry_input req = {0}; - struct hwrm_nvm_find_dir_entry_output *output = bp->hwrm_cmd_resp_addr; - - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_FIND_DIR_ENTRY, -1, -1); - req.enables = 0; - req.dir_idx = 0; - req.dir_type = cpu_to_le16(type); - req.dir_ordinal = cpu_to_le16(ordinal); - req.dir_ext = cpu_to_le16(ext); - req.opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ; - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + + rc = hwrm_req_init(bp, req, HWRM_NVM_FIND_DIR_ENTRY); + if (rc) + return rc; + + req->enables = 0; + req->dir_idx = 0; + req->dir_type = cpu_to_le16(type); + req->dir_ordinal = cpu_to_le16(ordinal); + req->dir_ext = cpu_to_le16(ext); + req->opt_ordinal = NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ; + output = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); if (rc == 0) { if (index) *index = le16_to_cpu(output->dir_idx); @@ -2761,7 +2788,7 @@ static int bnxt_find_nvram_item(struct net_device *dev, u16 type, u16 ordinal, if (data_length) *data_length = le32_to_cpu(output->dir_data_length); } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -2856,12 +2883,16 @@ static int bnxt_get_eeprom(struct net_device *dev, static int bnxt_erase_nvram_directory(struct net_device *dev, u8 index) { + struct hwrm_nvm_erase_dir_entry_input *req; struct bnxt *bp = netdev_priv(dev); - struct hwrm_nvm_erase_dir_entry_input req = {0}; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_NVM_ERASE_DIR_ENTRY, -1, -1); - req.dir_idx = cpu_to_le16(index); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_NVM_ERASE_DIR_ENTRY); + if (rc) + return rc; + + req->dir_idx = cpu_to_le16(index); + return hwrm_req_send(bp, req); } static int bnxt_set_eeprom(struct net_device *dev, @@ -2901,7 +2932,7 @@ static int bnxt_set_eeprom(struct net_device *dev, ordinal = eeprom->offset >> 16; attr = eeprom->offset & 0xffff; - return bnxt_flash_nvram(dev, type, ordinal, ext, attr, data, + return bnxt_flash_nvram(dev, type, ordinal, ext, attr, 0, data, eeprom->len); } @@ -2989,31 +3020,33 @@ static int bnxt_read_sfp_module_eeprom_info(struct bnxt *bp, u16 i2c_addr, u16 page_number, u16 start_addr, u16 data_length, u8 *buf) { - struct hwrm_port_phy_i2c_read_input req = {0}; - struct hwrm_port_phy_i2c_read_output *output = bp->hwrm_cmd_resp_addr; + struct hwrm_port_phy_i2c_read_output *output; + struct hwrm_port_phy_i2c_read_input *req; int rc, byte_offset = 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_I2C_READ, -1, -1); - req.i2c_slave_addr = i2c_addr; - req.page_number = cpu_to_le16(page_number); - req.port_id = cpu_to_le16(bp->pf.port_id); + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_I2C_READ); + if (rc) + return rc; + + output = hwrm_req_hold(bp, req); + req->i2c_slave_addr = i2c_addr; + req->page_number = cpu_to_le16(page_number); + req->port_id = cpu_to_le16(bp->pf.port_id); do { u16 xfer_size; xfer_size = min_t(u16, data_length, BNXT_MAX_PHY_I2C_RESP_SIZE); data_length -= xfer_size; - req.page_offset = cpu_to_le16(start_addr + byte_offset); - req.data_length = xfer_size; - req.enables = cpu_to_le32(start_addr + byte_offset ? + req->page_offset = cpu_to_le16(start_addr + byte_offset); + req->data_length = xfer_size; + req->enables = cpu_to_le32(start_addr + byte_offset ? PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET : 0); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); if (!rc) memcpy(buf + byte_offset, output->data, xfer_size); - mutex_unlock(&bp->hwrm_cmd_lock); byte_offset += xfer_size; } while (!rc && data_length > 0); + hwrm_req_drop(bp, req); return rc; } @@ -3122,13 +3155,13 @@ static int bnxt_nway_reset(struct net_device *dev) static int bnxt_set_phys_id(struct net_device *dev, enum ethtool_phys_id_state state) { - struct hwrm_port_led_cfg_input req = {0}; + struct hwrm_port_led_cfg_input *req; struct bnxt *bp = netdev_priv(dev); struct bnxt_pf_info *pf = &bp->pf; struct bnxt_led_cfg *led_cfg; u8 led_state; __le16 duration; - int i; + int rc, i; if (!bp->num_leds || BNXT_VF(bp)) return -EOPNOTSUPP; @@ -3142,27 +3175,35 @@ static int bnxt_set_phys_id(struct net_device *dev, } else { return -EINVAL; } - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_CFG, -1, -1); - req.port_id = cpu_to_le16(pf->port_id); - req.num_leds = bp->num_leds; - led_cfg = (struct bnxt_led_cfg *)&req.led0_id; + rc = hwrm_req_init(bp, req, HWRM_PORT_LED_CFG); + if (rc) + return rc; + + req->port_id = cpu_to_le16(pf->port_id); + req->num_leds = bp->num_leds; + led_cfg = (struct bnxt_led_cfg *)&req->led0_id; for (i = 0; i < bp->num_leds; i++, led_cfg++) { - req.enables |= BNXT_LED_DFLT_ENABLES(i); + req->enables |= BNXT_LED_DFLT_ENABLES(i); led_cfg->led_id = bp->leds[i].led_id; led_cfg->led_state = led_state; led_cfg->led_blink_on = duration; led_cfg->led_blink_off = duration; led_cfg->led_group_id = bp->leds[i].led_group_id; } - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req); } static int bnxt_hwrm_selftest_irq(struct bnxt *bp, u16 cmpl_ring) { - struct hwrm_selftest_irq_input req = {0}; + struct hwrm_selftest_irq_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_SELFTEST_IRQ); + if (rc) + return rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_IRQ, cmpl_ring, -1); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->cmpl_ring = cpu_to_le16(cmpl_ring); + return hwrm_req_send(bp, req); } static int bnxt_test_irq(struct bnxt *bp) @@ -3182,31 +3223,37 @@ static int bnxt_test_irq(struct bnxt *bp) static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable) { - struct hwrm_port_mac_cfg_input req = {0}; + struct hwrm_port_mac_cfg_input *req; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG); + if (rc) + return rc; - req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK); + req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK); if (enable) - req.lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL; + req->lpbk = PORT_MAC_CFG_REQ_LPBK_LOCAL; else - req.lpbk = PORT_MAC_CFG_REQ_LPBK_NONE; - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->lpbk = PORT_MAC_CFG_REQ_LPBK_NONE; + return hwrm_req_send(bp, req); } static int bnxt_query_force_speeds(struct bnxt *bp, u16 *force_speeds) { - struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_port_phy_qcaps_input req = {0}; + struct hwrm_port_phy_qcaps_output *resp; + struct hwrm_port_phy_qcaps_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_QCAPS); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) *force_speeds = le16_to_cpu(resp->supported_speeds_force_mode); - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -3241,7 +3288,7 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp, req->force_link_speed = cpu_to_le16(fw_speed); req->flags |= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE | PORT_PHY_CFG_REQ_FLAGS_RESET_PHY); - rc = hwrm_send_message(bp, req, sizeof(*req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); req->flags = 0; req->force_link_speed = cpu_to_le16(0); return rc; @@ -3249,21 +3296,29 @@ static int bnxt_disable_an_for_lpbk(struct bnxt *bp, static int bnxt_hwrm_phy_loopback(struct bnxt *bp, bool enable, bool ext) { - struct hwrm_port_phy_cfg_input req = {0}; + struct hwrm_port_phy_cfg_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_PORT_PHY_CFG); + if (rc) + return rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_CFG, -1, -1); + /* prevent bnxt_disable_an_for_lpbk() from consuming the request */ + hwrm_req_hold(bp, req); if (enable) { - bnxt_disable_an_for_lpbk(bp, &req); + bnxt_disable_an_for_lpbk(bp, req); if (ext) - req.lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL; + req->lpbk = PORT_PHY_CFG_REQ_LPBK_EXTERNAL; else - req.lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL; + req->lpbk = PORT_PHY_CFG_REQ_LPBK_LOCAL; } else { - req.lpbk = PORT_PHY_CFG_REQ_LPBK_NONE; + req->lpbk = PORT_PHY_CFG_REQ_LPBK_NONE; } - req.enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->enables = cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK); + rc = hwrm_req_send(bp, req); + hwrm_req_drop(bp, req); + return rc; } static int bnxt_rx_loopback(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, @@ -3361,7 +3416,7 @@ static int bnxt_run_loopback(struct bnxt *bp) data[i] = (u8)(i & 0xff); map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); if (dma_mapping_error(&bp->pdev->dev, map)) { dev_kfree_skb(skb); return -EIO; @@ -3374,24 +3429,28 @@ static int bnxt_run_loopback(struct bnxt *bp) bnxt_db_write(bp, &txr->tx_db, txr->tx_prod); rc = bnxt_poll_loopback(bp, cpr, pkt_size); - dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE); + dma_unmap_single(&bp->pdev->dev, map, pkt_size, DMA_TO_DEVICE); dev_kfree_skb(skb); return rc; } static int bnxt_run_fw_tests(struct bnxt *bp, u8 test_mask, u8 *test_results) { - struct hwrm_selftest_exec_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_selftest_exec_input req = {0}; + struct hwrm_selftest_exec_output *resp; + struct hwrm_selftest_exec_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_EXEC, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - resp->test_success = 0; - req.flags = test_mask; - rc = _hwrm_send_message(bp, &req, sizeof(req), bp->test_info->timeout); + rc = hwrm_req_init(bp, req, HWRM_SELFTEST_EXEC); + if (rc) + return rc; + + hwrm_req_timeout(bp, req, bp->test_info->timeout); + req->flags = test_mask; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); *test_results = resp->test_success; - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -3550,32 +3609,34 @@ static int bnxt_reset(struct net_device *dev, u32 *flags) return 0; } -static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len, +static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, struct bnxt_hwrm_dbg_dma_info *info) { - struct hwrm_dbg_cmn_output *cmn_resp = bp->hwrm_cmd_resp_addr; struct hwrm_dbg_cmn_input *cmn_req = msg; __le16 *seq_ptr = msg + info->seq_off; + struct hwrm_dbg_cmn_output *cmn_resp; u16 seq = 0, len, segs_off; - void *resp = cmn_resp; dma_addr_t dma_handle; + void *dma_buf, *resp; int rc, off = 0; - void *dma_buf; - dma_buf = dma_alloc_coherent(&bp->pdev->dev, info->dma_len, &dma_handle, - GFP_KERNEL); - if (!dma_buf) + dma_buf = hwrm_req_dma_slice(bp, msg, info->dma_len, &dma_handle); + if (!dma_buf) { + hwrm_req_drop(bp, msg); return -ENOMEM; + } + + hwrm_req_timeout(bp, msg, HWRM_COREDUMP_TIMEOUT); + cmn_resp = hwrm_req_hold(bp, msg); + resp = cmn_resp; segs_off = offsetof(struct hwrm_dbg_coredump_list_output, total_segments); cmn_req->host_dest_addr = cpu_to_le64(dma_handle); cmn_req->host_buf_len = cpu_to_le32(info->dma_len); - mutex_lock(&bp->hwrm_cmd_lock); while (1) { *seq_ptr = cpu_to_le16(seq); - rc = _hwrm_send_message(bp, msg, msg_len, - HWRM_COREDUMP_TIMEOUT); + rc = hwrm_req_send(bp, msg); if (rc) break; @@ -3619,26 +3680,27 @@ static int bnxt_hwrm_dbg_dma_data(struct bnxt *bp, void *msg, int msg_len, seq++; off += len; } - mutex_unlock(&bp->hwrm_cmd_lock); - dma_free_coherent(&bp->pdev->dev, info->dma_len, dma_buf, dma_handle); + hwrm_req_drop(bp, msg); return rc; } static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp, struct bnxt_coredump *coredump) { - struct hwrm_dbg_coredump_list_input req = {0}; struct bnxt_hwrm_dbg_dma_info info = {NULL}; + struct hwrm_dbg_coredump_list_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_LIST, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_LIST); + if (rc) + return rc; info.dma_len = COREDUMP_LIST_BUF_LEN; info.seq_off = offsetof(struct hwrm_dbg_coredump_list_input, seq_no); info.data_len_off = offsetof(struct hwrm_dbg_coredump_list_output, data_len); - rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info); + rc = bnxt_hwrm_dbg_dma_data(bp, req, &info); if (!rc) { coredump->data = info.dest_buf; coredump->data_size = info.dest_buf_size; @@ -3650,26 +3712,34 @@ static int bnxt_hwrm_dbg_coredump_list(struct bnxt *bp, static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt *bp, u16 component_id, u16 segment_id) { - struct hwrm_dbg_coredump_initiate_input req = {0}; + struct hwrm_dbg_coredump_initiate_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_INITIATE); + if (rc) + return rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_INITIATE, -1, -1); - req.component_id = cpu_to_le16(component_id); - req.segment_id = cpu_to_le16(segment_id); + hwrm_req_timeout(bp, req, HWRM_COREDUMP_TIMEOUT); + req->component_id = cpu_to_le16(component_id); + req->segment_id = cpu_to_le16(segment_id); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_COREDUMP_TIMEOUT); + return hwrm_req_send(bp, req); } static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id, u16 segment_id, u32 *seg_len, void *buf, u32 buf_len, u32 offset) { - struct hwrm_dbg_coredump_retrieve_input req = {0}; + struct hwrm_dbg_coredump_retrieve_input *req; struct bnxt_hwrm_dbg_dma_info info = {NULL}; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_DBG_COREDUMP_RETRIEVE, -1, -1); - req.component_id = cpu_to_le16(component_id); - req.segment_id = cpu_to_le16(segment_id); + rc = hwrm_req_init(bp, req, HWRM_DBG_COREDUMP_RETRIEVE); + if (rc) + return rc; + + req->component_id = cpu_to_le16(component_id); + req->segment_id = cpu_to_le16(segment_id); info.dma_len = COREDUMP_RETRIEVE_BUF_LEN; info.seq_off = offsetof(struct hwrm_dbg_coredump_retrieve_input, @@ -3682,7 +3752,7 @@ static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt *bp, u16 component_id, info.seg_start = offset; } - rc = bnxt_hwrm_dbg_dma_data(bp, &req, sizeof(req), &info); + rc = bnxt_hwrm_dbg_dma_data(bp, req, &info); if (!rc) *seg_len = info.dest_buf_size; @@ -3961,8 +4031,8 @@ static int bnxt_get_ts_info(struct net_device *dev, void bnxt_ethtool_init(struct bnxt *bp) { - struct hwrm_selftest_qlist_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_selftest_qlist_input req = {0}; + struct hwrm_selftest_qlist_output *resp; + struct hwrm_selftest_qlist_input *req; struct bnxt_test_info *test_info; struct net_device *dev = bp->dev; int i, rc; @@ -3974,19 +4044,22 @@ void bnxt_ethtool_init(struct bnxt *bp) if (bp->hwrm_spec_code < 0x10704 || !BNXT_PF(bp)) return; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_SELFTEST_QLIST, -1, -1); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc) - goto ethtool_init_exit; - test_info = bp->test_info; - if (!test_info) + if (!test_info) { test_info = kzalloc(sizeof(*bp->test_info), GFP_KERNEL); - if (!test_info) + if (!test_info) + return; + bp->test_info = test_info; + } + + if (hwrm_req_init(bp, req, HWRM_SELFTEST_QLIST)) + return; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); + if (rc) goto ethtool_init_exit; - bp->test_info = test_info; bp->num_tests = resp->num_tests + BNXT_DRV_TESTS; if (bp->num_tests > BNXT_MAX_TEST) bp->num_tests = BNXT_MAX_TEST; @@ -4020,7 +4093,7 @@ void bnxt_ethtool_init(struct bnxt *bp) } ethtool_init_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); } static void bnxt_get_eth_phy_stats(struct net_device *dev, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c new file mode 100644 index 000000000000..acef61abe35d --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c @@ -0,0 +1,763 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2020 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#include <asm/byteorder.h> +#include <linux/dma-mapping.h> +#include <linux/dmapool.h> +#include <linux/errno.h> +#include <linux/ethtool.h> +#include <linux/if_ether.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/kernel.h> +#include <linux/list.h> +#include <linux/netdevice.h> +#include <linux/pci.h> +#include <linux/skbuff.h> + +#include "bnxt_hsi.h" +#include "bnxt.h" +#include "bnxt_hwrm.h" + +static u64 hwrm_calc_sentinel(struct bnxt_hwrm_ctx *ctx, u16 req_type) +{ + return (((uintptr_t)ctx) + req_type) ^ BNXT_HWRM_SENTINEL; +} + +/** + * __hwrm_req_init() - Initialize an HWRM request. + * @bp: The driver context. + * @req: A pointer to the request pointer to initialize. + * @req_type: The request type. This will be converted to the little endian + * before being written to the req_type field of the returned request. + * @req_len: The length of the request to be allocated. + * + * Allocate DMA resources and initialize a new HWRM request object of the + * given type. The response address field in the request is configured with + * the DMA bus address that has been mapped for the response and the passed + * request is pointed to kernel virtual memory mapped for the request (such + * that short_input indirection can be accomplished without copying). The + * request’s target and completion ring are initialized to default values and + * can be overridden by writing to the returned request object directly. + * + * The initialized request can be further customized by writing to its fields + * directly, taking care to covert such fields to little endian. The request + * object will be consumed (and all its associated resources release) upon + * passing it to hwrm_req_send() unless ownership of the request has been + * claimed by the caller via a call to hwrm_req_hold(). If the request is not + * consumed, either because it is never sent or because ownership has been + * claimed, then it must be released by a call to hwrm_req_drop(). + * + * Return: zero on success, negative error code otherwise: + * E2BIG: the type of request pointer is too large to fit. + * ENOMEM: an allocation failure occurred. + */ +int __hwrm_req_init(struct bnxt *bp, void **req, u16 req_type, u32 req_len) +{ + struct bnxt_hwrm_ctx *ctx; + dma_addr_t dma_handle; + u8 *req_addr; + + if (req_len > BNXT_HWRM_CTX_OFFSET) + return -E2BIG; + + req_addr = dma_pool_alloc(bp->hwrm_dma_pool, GFP_KERNEL | __GFP_ZERO, + &dma_handle); + if (!req_addr) + return -ENOMEM; + + ctx = (struct bnxt_hwrm_ctx *)(req_addr + BNXT_HWRM_CTX_OFFSET); + /* safety first, sentinel used to check for invalid requests */ + ctx->sentinel = hwrm_calc_sentinel(ctx, req_type); + ctx->req_len = req_len; + ctx->req = (struct input *)req_addr; + ctx->resp = (struct output *)(req_addr + BNXT_HWRM_RESP_OFFSET); + ctx->dma_handle = dma_handle; + ctx->flags = 0; /* __GFP_ZERO, but be explicit regarding ownership */ + ctx->timeout = bp->hwrm_cmd_timeout ?: DFLT_HWRM_CMD_TIMEOUT; + ctx->allocated = BNXT_HWRM_DMA_SIZE - BNXT_HWRM_CTX_OFFSET; + ctx->gfp = GFP_KERNEL; + ctx->slice_addr = NULL; + + /* initialize common request fields */ + ctx->req->req_type = cpu_to_le16(req_type); + ctx->req->resp_addr = cpu_to_le64(dma_handle + BNXT_HWRM_RESP_OFFSET); + ctx->req->cmpl_ring = cpu_to_le16(BNXT_HWRM_NO_CMPL_RING); + ctx->req->target_id = cpu_to_le16(BNXT_HWRM_TARGET); + *req = ctx->req; + + return 0; +} + +static struct bnxt_hwrm_ctx *__hwrm_ctx(struct bnxt *bp, u8 *req_addr) +{ + void *ctx_addr = req_addr + BNXT_HWRM_CTX_OFFSET; + struct input *req = (struct input *)req_addr; + struct bnxt_hwrm_ctx *ctx = ctx_addr; + u64 sentinel; + + if (!req) { + /* can only be due to software bug, be loud */ + netdev_err(bp->dev, "null HWRM request"); + dump_stack(); + return NULL; + } + + /* HWRM API has no type safety, verify sentinel to validate address */ + sentinel = hwrm_calc_sentinel(ctx, le16_to_cpu(req->req_type)); + if (ctx->sentinel != sentinel) { + /* can only be due to software bug, be loud */ + netdev_err(bp->dev, "HWRM sentinel mismatch, req_type = %u\n", + (u32)le16_to_cpu(req->req_type)); + dump_stack(); + return NULL; + } + + return ctx; +} + +/** + * hwrm_req_timeout() - Set the completion timeout for the request. + * @bp: The driver context. + * @req: The request to set the timeout. + * @timeout: The timeout in milliseconds. + * + * Set the timeout associated with the request for subsequent calls to + * hwrm_req_send(). Some requests are long running and require a different + * timeout than the default. + */ +void hwrm_req_timeout(struct bnxt *bp, void *req, unsigned int timeout) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + + if (ctx) + ctx->timeout = timeout; +} + +/** + * hwrm_req_alloc_flags() - Sets GFP allocation flags for slices. + * @bp: The driver context. + * @req: The request for which calls to hwrm_req_dma_slice() will have altered + * allocation flags. + * @flags: A bitmask of GFP flags. These flags are passed to + * dma_alloc_coherent() whenever it is used to allocate backing memory + * for slices. Note that calls to hwrm_req_dma_slice() will not always + * result in new allocations, however, memory suballocated from the + * request buffer is already __GFP_ZERO. + * + * Sets the GFP allocation flags associated with the request for subsequent + * calls to hwrm_req_dma_slice(). This can be useful for specifying __GFP_ZERO + * for slice allocations. + */ +void hwrm_req_alloc_flags(struct bnxt *bp, void *req, gfp_t gfp) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + + if (ctx) + ctx->gfp = gfp; +} + +/** + * hwrm_req_replace() - Replace request data. + * @bp: The driver context. + * @req: The request to modify. A call to hwrm_req_replace() is conceptually + * an assignment of new_req to req. Subsequent calls to HWRM API functions, + * such as hwrm_req_send(), should thus use req and not new_req (in fact, + * calls to HWRM API functions will fail if non-managed request objects + * are passed). + * @len: The length of new_req. + * @new_req: The pre-built request to copy or reference. + * + * Replaces the request data in req with that of new_req. This is useful in + * scenarios where a request object has already been constructed by a third + * party prior to creating a resource managed request using hwrm_req_init(). + * Depending on the length, hwrm_req_replace() will either copy the new + * request data into the DMA memory allocated for req, or it will simply + * reference the new request and use it in lieu of req during subsequent + * calls to hwrm_req_send(). The resource management is associated with + * req and is independent of and does not apply to new_req. The caller must + * ensure that the lifetime of new_req is least as long as req. Any slices + * that may have been associated with the original request are released. + * + * Return: zero on success, negative error code otherwise: + * E2BIG: Request is too large. + * EINVAL: Invalid request to modify. + */ +int hwrm_req_replace(struct bnxt *bp, void *req, void *new_req, u32 len) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + struct input *internal_req = req; + u16 req_type; + + if (!ctx) + return -EINVAL; + + if (len > BNXT_HWRM_CTX_OFFSET) + return -E2BIG; + + /* free any existing slices */ + ctx->allocated = BNXT_HWRM_DMA_SIZE - BNXT_HWRM_CTX_OFFSET; + if (ctx->slice_addr) { + dma_free_coherent(&bp->pdev->dev, ctx->slice_size, + ctx->slice_addr, ctx->slice_handle); + ctx->slice_addr = NULL; + } + ctx->gfp = GFP_KERNEL; + + if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || len > BNXT_HWRM_MAX_REQ_LEN) { + memcpy(internal_req, new_req, len); + } else { + internal_req->req_type = ((struct input *)new_req)->req_type; + ctx->req = new_req; + } + + ctx->req_len = len; + ctx->req->resp_addr = cpu_to_le64(ctx->dma_handle + + BNXT_HWRM_RESP_OFFSET); + + /* update sentinel for potentially new request type */ + req_type = le16_to_cpu(internal_req->req_type); + ctx->sentinel = hwrm_calc_sentinel(ctx, req_type); + + return 0; +} + +/** + * hwrm_req_flags() - Set non internal flags of the ctx + * @bp: The driver context. + * @req: The request containing the HWRM command + * @flags: ctx flags that don't have BNXT_HWRM_INTERNAL_FLAG set + * + * ctx flags can be used by the callers to instruct how the subsequent + * hwrm_req_send() should behave. Example: callers can use hwrm_req_flags + * with BNXT_HWRM_CTX_SILENT to omit kernel prints of errors of hwrm_req_send() + * or with BNXT_HWRM_FULL_WAIT enforce hwrm_req_send() to wait for full timeout + * even if FW is not responding. + * This generic function can be used to set any flag that is not an internal flag + * of the HWRM module. + */ +void hwrm_req_flags(struct bnxt *bp, void *req, enum bnxt_hwrm_ctx_flags flags) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + + if (ctx) + ctx->flags |= (flags & HWRM_API_FLAGS); +} + +/** + * hwrm_req_hold() - Claim ownership of the request's resources. + * @bp: The driver context. + * @req: A pointer to the request to own. The request will no longer be + * consumed by calls to hwrm_req_send(). + * + * Take ownership of the request. Ownership places responsibility on the + * caller to free the resources associated with the request via a call to + * hwrm_req_drop(). The caller taking ownership implies that a subsequent + * call to hwrm_req_send() will not consume the request (ie. sending will + * not free the associated resources if the request is owned by the caller). + * Taking ownership returns a reference to the response. Retaining and + * accessing the response data is the most common reason to take ownership + * of the request. Ownership can also be acquired in order to reuse the same + * request object across multiple invocations of hwrm_req_send(). + * + * Return: A pointer to the response object. + * + * The resources associated with the response will remain available to the + * caller until ownership of the request is relinquished via a call to + * hwrm_req_drop(). It is not possible for hwrm_req_hold() to return NULL if + * a valid request is provided. A returned NULL value would imply a driver + * bug and the implementation will complain loudly in the logs to aid in + * detection. It should not be necessary to check the result for NULL. + */ +void *hwrm_req_hold(struct bnxt *bp, void *req) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + struct input *input = (struct input *)req; + + if (!ctx) + return NULL; + + if (ctx->flags & BNXT_HWRM_INTERNAL_CTX_OWNED) { + /* can only be due to software bug, be loud */ + netdev_err(bp->dev, "HWRM context already owned, req_type = %u\n", + (u32)le16_to_cpu(input->req_type)); + dump_stack(); + return NULL; + } + + ctx->flags |= BNXT_HWRM_INTERNAL_CTX_OWNED; + return ((u8 *)req) + BNXT_HWRM_RESP_OFFSET; +} + +static void __hwrm_ctx_drop(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx) +{ + void *addr = ((u8 *)ctx) - BNXT_HWRM_CTX_OFFSET; + dma_addr_t dma_handle = ctx->dma_handle; /* save before invalidate */ + + /* unmap any auxiliary DMA slice */ + if (ctx->slice_addr) + dma_free_coherent(&bp->pdev->dev, ctx->slice_size, + ctx->slice_addr, ctx->slice_handle); + + /* invalidate, ensure ownership, sentinel and dma_handle are cleared */ + memset(ctx, 0, sizeof(struct bnxt_hwrm_ctx)); + + /* return the buffer to the DMA pool */ + if (dma_handle) + dma_pool_free(bp->hwrm_dma_pool, addr, dma_handle); +} + +/** + * hwrm_req_drop() - Release all resources associated with the request. + * @bp: The driver context. + * @req: The request to consume, releasing the associated resources. The + * request object, any slices, and its associated response are no + * longer valid. + * + * It is legal to call hwrm_req_drop() on an unowned request, provided it + * has not already been consumed by hwrm_req_send() (for example, to release + * an aborted request). A given request should not be dropped more than once, + * nor should it be dropped after having been consumed by hwrm_req_send(). To + * do so is an error (the context will not be found and a stack trace will be + * rendered in the kernel log). + */ +void hwrm_req_drop(struct bnxt *bp, void *req) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + + if (ctx) + __hwrm_ctx_drop(bp, ctx); +} + +static int __hwrm_to_stderr(u32 hwrm_err) +{ + switch (hwrm_err) { + case HWRM_ERR_CODE_SUCCESS: + return 0; + case HWRM_ERR_CODE_RESOURCE_LOCKED: + return -EROFS; + case HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED: + return -EACCES; + case HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR: + return -ENOSPC; + case HWRM_ERR_CODE_INVALID_PARAMS: + case HWRM_ERR_CODE_INVALID_FLAGS: + case HWRM_ERR_CODE_INVALID_ENABLES: + case HWRM_ERR_CODE_UNSUPPORTED_TLV: + case HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR: + return -EINVAL; + case HWRM_ERR_CODE_NO_BUFFER: + return -ENOMEM; + case HWRM_ERR_CODE_HOT_RESET_PROGRESS: + case HWRM_ERR_CODE_BUSY: + return -EAGAIN; + case HWRM_ERR_CODE_CMD_NOT_SUPPORTED: + return -EOPNOTSUPP; + default: + return -EIO; + } +} + +static struct bnxt_hwrm_wait_token * +__hwrm_acquire_token(struct bnxt *bp, enum bnxt_hwrm_chnl dst) +{ + struct bnxt_hwrm_wait_token *token; + + token = kzalloc(sizeof(*token), GFP_KERNEL); + if (!token) + return NULL; + + mutex_lock(&bp->hwrm_cmd_lock); + + token->dst = dst; + token->state = BNXT_HWRM_PENDING; + if (dst == BNXT_HWRM_CHNL_CHIMP) { + token->seq_id = bp->hwrm_cmd_seq++; + hlist_add_head_rcu(&token->node, &bp->hwrm_pending_list); + } else { + token->seq_id = bp->hwrm_cmd_kong_seq++; + } + + return token; +} + +static void +__hwrm_release_token(struct bnxt *bp, struct bnxt_hwrm_wait_token *token) +{ + if (token->dst == BNXT_HWRM_CHNL_CHIMP) { + hlist_del_rcu(&token->node); + kfree_rcu(token, rcu); + } else { + kfree(token); + } + mutex_unlock(&bp->hwrm_cmd_lock); +} + +void +hwrm_update_token(struct bnxt *bp, u16 seq_id, enum bnxt_hwrm_wait_state state) +{ + struct bnxt_hwrm_wait_token *token; + + rcu_read_lock(); + hlist_for_each_entry_rcu(token, &bp->hwrm_pending_list, node) { + if (token->seq_id == seq_id) { + WRITE_ONCE(token->state, state); + rcu_read_unlock(); + return; + } + } + rcu_read_unlock(); + netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id); +} + +static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx) +{ + u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER; + enum bnxt_hwrm_chnl dst = BNXT_HWRM_CHNL_CHIMP; + u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM; + struct bnxt_hwrm_wait_token *token = NULL; + struct hwrm_short_input short_input = {0}; + u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; + unsigned int i, timeout, tmo_count; + u32 *data = (u32 *)ctx->req; + u32 msg_len = ctx->req_len; + int rc = -EBUSY; + u32 req_type; + u16 len = 0; + u8 *valid; + + if (ctx->flags & BNXT_HWRM_INTERNAL_RESP_DIRTY) + memset(ctx->resp, 0, PAGE_SIZE); + + req_type = le16_to_cpu(ctx->req->req_type); + if (BNXT_NO_FW_ACCESS(bp) && req_type != HWRM_FUNC_RESET) + goto exit; + + if (msg_len > BNXT_HWRM_MAX_REQ_LEN && + msg_len > bp->hwrm_max_ext_req_len) { + rc = -E2BIG; + goto exit; + } + + if (bnxt_kong_hwrm_message(bp, ctx->req)) { + dst = BNXT_HWRM_CHNL_KONG; + bar_offset = BNXT_GRCPF_REG_KONG_COMM; + doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER; + if (le16_to_cpu(ctx->req->cmpl_ring) != INVALID_HW_RING_ID) { + netdev_err(bp->dev, "Ring completions not supported for KONG commands, req_type = %d\n", + req_type); + rc = -EINVAL; + goto exit; + } + } + + token = __hwrm_acquire_token(bp, dst); + if (!token) { + rc = -ENOMEM; + goto exit; + } + ctx->req->seq_id = cpu_to_le16(token->seq_id); + + if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || + msg_len > BNXT_HWRM_MAX_REQ_LEN) { + short_input.req_type = ctx->req->req_type; + short_input.signature = + cpu_to_le16(SHORT_REQ_SIGNATURE_SHORT_CMD); + short_input.size = cpu_to_le16(msg_len); + short_input.req_addr = cpu_to_le64(ctx->dma_handle); + + data = (u32 *)&short_input; + msg_len = sizeof(short_input); + + max_req_len = BNXT_HWRM_SHORT_REQ_LEN; + } + + /* Ensure any associated DMA buffers are written before doorbell */ + wmb(); + + /* Write request msg to hwrm channel */ + __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4); + + for (i = msg_len; i < max_req_len; i += 4) + writel(0, bp->bar0 + bar_offset + i); + + /* Ring channel doorbell */ + writel(1, bp->bar0 + doorbell_offset); + + if (!pci_is_enabled(bp->pdev)) { + rc = -ENODEV; + goto exit; + } + + /* Limit timeout to an upper limit */ + timeout = min_t(uint, ctx->timeout, HWRM_CMD_MAX_TIMEOUT); + /* convert timeout to usec */ + timeout *= 1000; + + i = 0; + /* Short timeout for the first few iterations: + * number of loops = number of loops for short timeout + + * number of loops for standard timeout. + */ + tmo_count = HWRM_SHORT_TIMEOUT_COUNTER; + timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER; + tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT); + + if (le16_to_cpu(ctx->req->cmpl_ring) != INVALID_HW_RING_ID) { + /* Wait until hwrm response cmpl interrupt is processed */ + while (READ_ONCE(token->state) < BNXT_HWRM_COMPLETE && + i++ < tmo_count) { + /* Abort the wait for completion if the FW health + * check has failed. + */ + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) + goto exit; + /* on first few passes, just barely sleep */ + if (i < HWRM_SHORT_TIMEOUT_COUNTER) { + usleep_range(HWRM_SHORT_MIN_TIMEOUT, + HWRM_SHORT_MAX_TIMEOUT); + } else { + if (HWRM_WAIT_MUST_ABORT(bp, ctx)) + break; + usleep_range(HWRM_MIN_TIMEOUT, + HWRM_MAX_TIMEOUT); + } + } + + if (READ_ONCE(token->state) != BNXT_HWRM_COMPLETE) { + if (!(ctx->flags & BNXT_HWRM_CTX_SILENT)) + netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", + le16_to_cpu(ctx->req->req_type)); + goto exit; + } + len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len)); + valid = ((u8 *)ctx->resp) + len - 1; + } else { + __le16 seen_out_of_seq = ctx->req->seq_id; /* will never see */ + int j; + + /* Check if response len is updated */ + for (i = 0; i < tmo_count; i++) { + /* Abort the wait for completion if the FW health + * check has failed. + */ + if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) + goto exit; + + if (token && + READ_ONCE(token->state) == BNXT_HWRM_DEFERRED) { + __hwrm_release_token(bp, token); + token = NULL; + } + + len = le16_to_cpu(READ_ONCE(ctx->resp->resp_len)); + if (len) { + __le16 resp_seq = READ_ONCE(ctx->resp->seq_id); + + if (resp_seq == ctx->req->seq_id) + break; + if (resp_seq != seen_out_of_seq) { + netdev_warn(bp->dev, "Discarding out of seq response: 0x%x for msg {0x%x 0x%x}\n", + le16_to_cpu(resp_seq), + le16_to_cpu(ctx->req->req_type), + le16_to_cpu(ctx->req->seq_id)); + seen_out_of_seq = resp_seq; + } + } + + /* on first few passes, just barely sleep */ + if (i < HWRM_SHORT_TIMEOUT_COUNTER) { + usleep_range(HWRM_SHORT_MIN_TIMEOUT, + HWRM_SHORT_MAX_TIMEOUT); + } else { + if (HWRM_WAIT_MUST_ABORT(bp, ctx)) + goto timeout_abort; + usleep_range(HWRM_MIN_TIMEOUT, + HWRM_MAX_TIMEOUT); + } + } + + if (i >= tmo_count) { +timeout_abort: + if (!(ctx->flags & BNXT_HWRM_CTX_SILENT)) + netdev_err(bp->dev, "Error (timeout: %u) msg {0x%x 0x%x} len:%d\n", + hwrm_total_timeout(i), + le16_to_cpu(ctx->req->req_type), + le16_to_cpu(ctx->req->seq_id), len); + goto exit; + } + + /* Last byte of resp contains valid bit */ + valid = ((u8 *)ctx->resp) + len - 1; + for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) { + /* make sure we read from updated DMA memory */ + dma_rmb(); + if (*valid) + break; + usleep_range(1, 5); + } + + if (j >= HWRM_VALID_BIT_DELAY_USEC) { + if (!(ctx->flags & BNXT_HWRM_CTX_SILENT)) + netdev_err(bp->dev, "Error (timeout: %u) msg {0x%x 0x%x} len:%d v:%d\n", + hwrm_total_timeout(i), + le16_to_cpu(ctx->req->req_type), + le16_to_cpu(ctx->req->seq_id), len, + *valid); + goto exit; + } + } + + /* Zero valid bit for compatibility. Valid bit in an older spec + * may become a new field in a newer spec. We must make sure that + * a new field not implemented by old spec will read zero. + */ + *valid = 0; + rc = le16_to_cpu(ctx->resp->error_code); + if (rc && !(ctx->flags & BNXT_HWRM_CTX_SILENT)) { + netdev_err(bp->dev, "hwrm req_type 0x%x seq id 0x%x error 0x%x\n", + le16_to_cpu(ctx->resp->req_type), + le16_to_cpu(ctx->resp->seq_id), rc); + } + rc = __hwrm_to_stderr(rc); +exit: + if (token) + __hwrm_release_token(bp, token); + if (ctx->flags & BNXT_HWRM_INTERNAL_CTX_OWNED) + ctx->flags |= BNXT_HWRM_INTERNAL_RESP_DIRTY; + else + __hwrm_ctx_drop(bp, ctx); + return rc; +} + +/** + * hwrm_req_send() - Execute an HWRM command. + * @bp: The driver context. + * @req: A pointer to the request to send. The DMA resources associated with + * the request will be released (ie. the request will be consumed) unless + * ownership of the request has been assumed by the caller via a call to + * hwrm_req_hold(). + * + * Send an HWRM request to the device and wait for a response. The request is + * consumed if it is not owned by the caller. This function will block until + * the request has either completed or times out due to an error. + * + * Return: A result code. + * + * The result is zero on success, otherwise the negative error code indicates + * one of the following errors: + * E2BIG: The request was too large. + * EBUSY: The firmware is in a fatal state or the request timed out + * EACCESS: HWRM access denied. + * ENOSPC: HWRM resource allocation error. + * EINVAL: Request parameters are invalid. + * ENOMEM: HWRM has no buffers. + * EAGAIN: HWRM busy or reset in progress. + * EOPNOTSUPP: Invalid request type. + * EIO: Any other error. + * Error handling is orthogonal to request ownership. An unowned request will + * still be consumed on error. If the caller owns the request, then the caller + * is responsible for releasing the resources. Otherwise, hwrm_req_send() will + * always consume the request. + */ +int hwrm_req_send(struct bnxt *bp, void *req) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + + if (!ctx) + return -EINVAL; + + return __hwrm_send(bp, ctx); +} + +/** + * hwrm_req_send_silent() - A silent version of hwrm_req_send(). + * @bp: The driver context. + * @req: The request to send without logging. + * + * The same as hwrm_req_send(), except that the request is silenced using + * hwrm_req_silence() prior the call. This version of the function is + * provided solely to preserve the legacy API’s flavor for this functionality. + * + * Return: A result code, see hwrm_req_send(). + */ +int hwrm_req_send_silent(struct bnxt *bp, void *req) +{ + hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT); + return hwrm_req_send(bp, req); +} + +/** + * hwrm_req_dma_slice() - Allocate a slice of DMA mapped memory. + * @bp: The driver context. + * @req: The request for which indirect data will be associated. + * @size: The size of the allocation. + * @dma: The bus address associated with the allocation. The HWRM API has no + * knowledge about the type of the request and so cannot infer how the + * caller intends to use the indirect data. Thus, the caller is + * responsible for configuring the request object appropriately to + * point to the associated indirect memory. Note, DMA handle has the + * same definition as it does in dma_alloc_coherent(), the caller is + * responsible for endian conversions via cpu_to_le64() before assigning + * this address. + * + * Allocates DMA mapped memory for indirect data related to a request. The + * lifetime of the DMA resources will be bound to that of the request (ie. + * they will be automatically released when the request is either consumed by + * hwrm_req_send() or dropped by hwrm_req_drop()). Small allocations are + * efficiently suballocated out of the request buffer space, hence the name + * slice, while larger requests are satisfied via an underlying call to + * dma_alloc_coherent(). Multiple suballocations are supported, however, only + * one externally mapped region is. + * + * Return: The kernel virtual address of the DMA mapping. + */ +void * +hwrm_req_dma_slice(struct bnxt *bp, void *req, u32 size, dma_addr_t *dma_handle) +{ + struct bnxt_hwrm_ctx *ctx = __hwrm_ctx(bp, req); + u8 *end = ((u8 *)req) + BNXT_HWRM_DMA_SIZE; + struct input *input = req; + u8 *addr, *req_addr = req; + u32 max_offset, offset; + + if (!ctx) + return NULL; + + max_offset = BNXT_HWRM_DMA_SIZE - ctx->allocated; + offset = max_offset - size; + offset = ALIGN_DOWN(offset, BNXT_HWRM_DMA_ALIGN); + addr = req_addr + offset; + + if (addr < req_addr + max_offset && req_addr + ctx->req_len <= addr) { + ctx->allocated = end - addr; + *dma_handle = ctx->dma_handle + offset; + return addr; + } + + /* could not suballocate from ctx buffer, try create a new mapping */ + if (ctx->slice_addr) { + /* if one exists, can only be due to software bug, be loud */ + netdev_err(bp->dev, "HWRM refusing to reallocate DMA slice, req_type = %u\n", + (u32)le16_to_cpu(input->req_type)); + dump_stack(); + return NULL; + } + + addr = dma_alloc_coherent(&bp->pdev->dev, size, dma_handle, ctx->gfp); + + if (!addr) + return NULL; + + ctx->slice_addr = addr; + ctx->slice_size = size; + ctx->slice_handle = *dma_handle; + + return addr; +} diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h new file mode 100644 index 000000000000..4d17f0d5363b --- /dev/null +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.h @@ -0,0 +1,145 @@ +/* Broadcom NetXtreme-C/E network driver. + * + * Copyright (c) 2020 Broadcom Limited + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation. + */ + +#ifndef BNXT_HWRM_H +#define BNXT_HWRM_H + +#include "bnxt_hsi.h" + +enum bnxt_hwrm_ctx_flags { + /* Update the HWRM_API_FLAGS right below for any new non-internal bit added here */ + BNXT_HWRM_INTERNAL_CTX_OWNED = BIT(0), /* caller owns the context */ + BNXT_HWRM_INTERNAL_RESP_DIRTY = BIT(1), /* response contains data */ + BNXT_HWRM_CTX_SILENT = BIT(2), /* squelch firmware errors */ + BNXT_HWRM_FULL_WAIT = BIT(3), /* wait for full timeout of HWRM command */ +}; + +#define HWRM_API_FLAGS (BNXT_HWRM_CTX_SILENT | BNXT_HWRM_FULL_WAIT) + +struct bnxt_hwrm_ctx { + u64 sentinel; + dma_addr_t dma_handle; + struct output *resp; + struct input *req; + dma_addr_t slice_handle; + void *slice_addr; + u32 slice_size; + u32 req_len; + enum bnxt_hwrm_ctx_flags flags; + unsigned int timeout; + u32 allocated; + gfp_t gfp; +}; + +enum bnxt_hwrm_wait_state { + BNXT_HWRM_PENDING, + BNXT_HWRM_DEFERRED, + BNXT_HWRM_COMPLETE, + BNXT_HWRM_CANCELLED, +}; + +enum bnxt_hwrm_chnl { BNXT_HWRM_CHNL_CHIMP, BNXT_HWRM_CHNL_KONG }; + +struct bnxt_hwrm_wait_token { + struct rcu_head rcu; + struct hlist_node node; + enum bnxt_hwrm_wait_state state; + enum bnxt_hwrm_chnl dst; + u16 seq_id; +}; + +void hwrm_update_token(struct bnxt *bp, u16 seq, enum bnxt_hwrm_wait_state s); + +#define BNXT_HWRM_MAX_REQ_LEN (bp->hwrm_max_req_len) +#define BNXT_HWRM_SHORT_REQ_LEN sizeof(struct hwrm_short_input) +#define HWRM_CMD_MAX_TIMEOUT 40000 +#define SHORT_HWRM_CMD_TIMEOUT 20 +#define HWRM_CMD_TIMEOUT (bp->hwrm_cmd_timeout) +#define HWRM_RESET_TIMEOUT ((HWRM_CMD_TIMEOUT) * 4) +#define HWRM_COREDUMP_TIMEOUT ((HWRM_CMD_TIMEOUT) * 12) +#define BNXT_HWRM_TARGET 0xffff +#define BNXT_HWRM_NO_CMPL_RING -1 +#define BNXT_HWRM_REQ_MAX_SIZE 128 +#define BNXT_HWRM_DMA_SIZE (2 * PAGE_SIZE) /* space for req+resp */ +#define BNXT_HWRM_RESP_RESERVED PAGE_SIZE +#define BNXT_HWRM_RESP_OFFSET (BNXT_HWRM_DMA_SIZE - \ + BNXT_HWRM_RESP_RESERVED) +#define BNXT_HWRM_CTX_OFFSET (BNXT_HWRM_RESP_OFFSET - \ + sizeof(struct bnxt_hwrm_ctx)) +#define BNXT_HWRM_DMA_ALIGN 16 +#define BNXT_HWRM_SENTINEL 0xb6e1f68a12e9a7eb /* arbitrary value */ +#define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \ + BNXT_HWRM_REQ_MAX_SIZE) +#define HWRM_SHORT_MIN_TIMEOUT 3 +#define HWRM_SHORT_MAX_TIMEOUT 10 +#define HWRM_SHORT_TIMEOUT_COUNTER 5 + +#define HWRM_MIN_TIMEOUT 25 +#define HWRM_MAX_TIMEOUT 40 + +#define HWRM_WAIT_MUST_ABORT(bp, ctx) \ + (le16_to_cpu((ctx)->req->req_type) != HWRM_VER_GET && \ + !bnxt_is_fw_healthy(bp)) + +static inline unsigned int hwrm_total_timeout(unsigned int n) +{ + return n <= HWRM_SHORT_TIMEOUT_COUNTER ? n * HWRM_SHORT_MIN_TIMEOUT : + HWRM_SHORT_TIMEOUT_COUNTER * HWRM_SHORT_MIN_TIMEOUT + + (n - HWRM_SHORT_TIMEOUT_COUNTER) * HWRM_MIN_TIMEOUT; +} + + +#define HWRM_VALID_BIT_DELAY_USEC 150 + +static inline bool bnxt_cfa_hwrm_message(u16 req_type) +{ + switch (req_type) { + case HWRM_CFA_ENCAP_RECORD_ALLOC: + case HWRM_CFA_ENCAP_RECORD_FREE: + case HWRM_CFA_DECAP_FILTER_ALLOC: + case HWRM_CFA_DECAP_FILTER_FREE: + case HWRM_CFA_EM_FLOW_ALLOC: + case HWRM_CFA_EM_FLOW_FREE: + case HWRM_CFA_EM_FLOW_CFG: + case HWRM_CFA_FLOW_ALLOC: + case HWRM_CFA_FLOW_FREE: + case HWRM_CFA_FLOW_INFO: + case HWRM_CFA_FLOW_FLUSH: + case HWRM_CFA_FLOW_STATS: + case HWRM_CFA_METER_PROFILE_ALLOC: + case HWRM_CFA_METER_PROFILE_FREE: + case HWRM_CFA_METER_PROFILE_CFG: + case HWRM_CFA_METER_INSTANCE_ALLOC: + case HWRM_CFA_METER_INSTANCE_FREE: + return true; + default: + return false; + } +} + +static inline bool bnxt_kong_hwrm_message(struct bnxt *bp, struct input *req) +{ + return (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL && + (bnxt_cfa_hwrm_message(le16_to_cpu(req->req_type)) || + le16_to_cpu(req->target_id) == HWRM_TARGET_ID_KONG)); +} + +int __hwrm_req_init(struct bnxt *bp, void **req, u16 req_type, u32 req_len); +#define hwrm_req_init(bp, req, req_type) \ + __hwrm_req_init((bp), (void **)&(req), (req_type), sizeof(*(req))) +void *hwrm_req_hold(struct bnxt *bp, void *req); +void hwrm_req_drop(struct bnxt *bp, void *req); +void hwrm_req_flags(struct bnxt *bp, void *req, enum bnxt_hwrm_ctx_flags flags); +void hwrm_req_timeout(struct bnxt *bp, void *req, unsigned int timeout); +int hwrm_req_send(struct bnxt *bp, void *req); +int hwrm_req_send_silent(struct bnxt *bp, void *req); +int hwrm_req_replace(struct bnxt *bp, void *req, void *new_req, u32 len); +void hwrm_req_alloc_flags(struct bnxt *bp, void *req, gfp_t flags); +void *hwrm_req_dma_slice(struct bnxt *bp, void *req, u32 size, dma_addr_t *dma); +#endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index 81f40ab748f1..f0aa480799ca 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -18,6 +18,7 @@ #include <linux/ptp_classify.h> #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_hwrm.h" #include "bnxt_ptp.h" int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off) @@ -56,16 +57,19 @@ static int bnxt_ptp_settime(struct ptp_clock_info *ptp_info, } /* Caller holds ptp_lock */ -static u64 bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts) +static int bnxt_refclk_read(struct bnxt *bp, struct ptp_system_timestamp *sts, + u64 *ns) { struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; - u64 ns; + + if (test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) + return -EIO; ptp_read_system_prets(sts); - ns = readl(bp->bar0 + ptp->refclk_mapped_regs[0]); + *ns = readl(bp->bar0 + ptp->refclk_mapped_regs[0]); ptp_read_system_postts(sts); - ns |= (u64)readl(bp->bar0 + ptp->refclk_mapped_regs[1]) << 32; - return ns; + *ns |= (u64)readl(bp->bar0 + ptp->refclk_mapped_regs[1]) << 32; + return 0; } static void bnxt_ptp_get_current_time(struct bnxt *bp) @@ -76,30 +80,34 @@ static void bnxt_ptp_get_current_time(struct bnxt *bp) return; spin_lock_bh(&ptp->ptp_lock); WRITE_ONCE(ptp->old_time, ptp->current_time); - ptp->current_time = bnxt_refclk_read(bp, NULL); + bnxt_refclk_read(bp, NULL, &ptp->current_time); spin_unlock_bh(&ptp->ptp_lock); } static int bnxt_hwrm_port_ts_query(struct bnxt *bp, u32 flags, u64 *ts) { - struct hwrm_port_ts_query_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_port_ts_query_input req = {0}; + struct hwrm_port_ts_query_output *resp; + struct hwrm_port_ts_query_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_TS_QUERY, -1, -1); - req.flags = cpu_to_le32(flags); + rc = hwrm_req_init(bp, req, HWRM_PORT_TS_QUERY); + if (rc) + return rc; + + req->flags = cpu_to_le32(flags); if ((flags & PORT_TS_QUERY_REQ_FLAGS_PATH) == PORT_TS_QUERY_REQ_FLAGS_PATH_TX) { - req.enables = cpu_to_le16(BNXT_PTP_QTS_TX_ENABLES); - req.ptp_seq_id = cpu_to_le32(bp->ptp_cfg->tx_seqid); - req.ptp_hdr_offset = cpu_to_le16(bp->ptp_cfg->tx_hdr_off); - req.ts_req_timeout = cpu_to_le16(BNXT_PTP_QTS_TIMEOUT); + req->enables = cpu_to_le16(BNXT_PTP_QTS_TX_ENABLES); + req->ptp_seq_id = cpu_to_le32(bp->ptp_cfg->tx_seqid); + req->ptp_hdr_offset = cpu_to_le16(bp->ptp_cfg->tx_hdr_off); + req->ts_req_timeout = cpu_to_le16(BNXT_PTP_QTS_TIMEOUT); } - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + + rc = hwrm_req_send(bp, req); if (!rc) *ts = le64_to_cpu(resp->ptp_msg_ts); - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -110,9 +118,14 @@ static int bnxt_ptp_gettimex(struct ptp_clock_info *ptp_info, struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg, ptp_info); u64 ns, cycles; + int rc; spin_lock_bh(&ptp->ptp_lock); - cycles = bnxt_refclk_read(ptp->bp, sts); + rc = bnxt_refclk_read(ptp->bp, sts, &cycles); + if (rc) { + spin_unlock_bh(&ptp->ptp_lock); + return rc; + } ns = timecounter_cyc2time(&ptp->tc, cycles); spin_unlock_bh(&ptp->ptp_lock); *ts = ns_to_timespec64(ns); @@ -135,33 +148,246 @@ static int bnxt_ptp_adjfreq(struct ptp_clock_info *ptp_info, s32 ppb) { struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg, ptp_info); - struct hwrm_port_mac_cfg_input req = {0}; + struct hwrm_port_mac_cfg_input *req; struct bnxt *bp = ptp->bp; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1); - req.ptp_freq_adj_ppb = cpu_to_le32(ppb); - req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG); + if (rc) + return rc; + + req->ptp_freq_adj_ppb = cpu_to_le32(ppb); + req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB); + rc = hwrm_req_send(ptp->bp, req); if (rc) netdev_err(ptp->bp->dev, "ptp adjfreq failed. rc = %d\n", rc); return rc; } -static int bnxt_ptp_enable(struct ptp_clock_info *ptp, +void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + struct ptp_clock_event event; + u64 ns, pps_ts; + + pps_ts = EVENT_PPS_TS(data2, data1); + spin_lock_bh(&ptp->ptp_lock); + ns = timecounter_cyc2time(&ptp->tc, pps_ts); + spin_unlock_bh(&ptp->ptp_lock); + + switch (EVENT_DATA2_PPS_EVENT_TYPE(data2)) { + case ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_INTERNAL: + event.pps_times.ts_real = ns_to_timespec64(ns); + event.type = PTP_CLOCK_PPSUSR; + event.index = EVENT_DATA2_PPS_PIN_NUM(data2); + break; + case ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE_EXTERNAL: + event.timestamp = ns; + event.type = PTP_CLOCK_EXTTS; + event.index = EVENT_DATA2_PPS_PIN_NUM(data2); + break; + } + + ptp_clock_event(bp->ptp_cfg->ptp_clock, &event); +} + +static int bnxt_ptp_cfg_pin(struct bnxt *bp, u8 pin, u8 usage) +{ + struct hwrm_func_ptp_pin_cfg_input *req; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + u8 state = usage != BNXT_PPS_PIN_NONE; + u8 *pin_state, *pin_usg; + u32 enables; + int rc; + + if (!TSIO_PIN_VALID(pin)) { + netdev_err(ptp->bp->dev, "1PPS: Invalid pin. Check pin-function configuration\n"); + return -EOPNOTSUPP; + } + + rc = hwrm_req_init(ptp->bp, req, HWRM_FUNC_PTP_PIN_CFG); + if (rc) + return rc; + + enables = (FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_STATE | + FUNC_PTP_PIN_CFG_REQ_ENABLES_PIN0_USAGE) << (pin * 2); + req->enables = cpu_to_le32(enables); + + pin_state = &req->pin0_state; + pin_usg = &req->pin0_usage; + + *(pin_state + (pin * 2)) = state; + *(pin_usg + (pin * 2)) = usage; + + rc = hwrm_req_send(ptp->bp, req); + if (rc) + return rc; + + ptp->pps_info.pins[pin].usage = usage; + ptp->pps_info.pins[pin].state = state; + + return 0; +} + +static int bnxt_ptp_cfg_event(struct bnxt *bp, u8 event) +{ + struct hwrm_func_ptp_cfg_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_PTP_CFG); + if (rc) + return rc; + + req->enables = cpu_to_le16(FUNC_PTP_CFG_REQ_ENABLES_PTP_PPS_EVENT); + req->ptp_pps_event = event; + return hwrm_req_send(bp, req); +} + +void bnxt_ptp_reapply_pps(struct bnxt *bp) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + struct bnxt_pps *pps; + u32 pin = 0; + int rc; + + if (!ptp || !(bp->fw_cap & BNXT_FW_CAP_PTP_PPS) || + !(ptp->ptp_info.pin_config)) + return; + pps = &ptp->pps_info; + for (pin = 0; pin < BNXT_MAX_TSIO_PINS; pin++) { + if (pps->pins[pin].state) { + rc = bnxt_ptp_cfg_pin(bp, pin, pps->pins[pin].usage); + if (!rc && pps->pins[pin].event) + rc = bnxt_ptp_cfg_event(bp, + pps->pins[pin].event); + if (rc) + netdev_err(bp->dev, "1PPS: Failed to configure pin%d\n", + pin); + } + } +} + +static int bnxt_get_target_cycles(struct bnxt_ptp_cfg *ptp, u64 target_ns, + u64 *cycles_delta) +{ + u64 cycles_now; + u64 nsec_now, nsec_delta; + int rc; + + spin_lock_bh(&ptp->ptp_lock); + rc = bnxt_refclk_read(ptp->bp, NULL, &cycles_now); + if (rc) { + spin_unlock_bh(&ptp->ptp_lock); + return rc; + } + nsec_now = timecounter_cyc2time(&ptp->tc, cycles_now); + spin_unlock_bh(&ptp->ptp_lock); + + nsec_delta = target_ns - nsec_now; + *cycles_delta = div64_u64(nsec_delta << ptp->cc.shift, ptp->cc.mult); + return 0; +} + +static int bnxt_ptp_perout_cfg(struct bnxt_ptp_cfg *ptp, + struct ptp_clock_request *rq) +{ + struct hwrm_func_ptp_cfg_input *req; + struct bnxt *bp = ptp->bp; + struct timespec64 ts; + u64 target_ns, delta; + u16 enables; + int rc; + + ts.tv_sec = rq->perout.start.sec; + ts.tv_nsec = rq->perout.start.nsec; + target_ns = timespec64_to_ns(&ts); + + rc = bnxt_get_target_cycles(ptp, target_ns, &delta); + if (rc) + return rc; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_PTP_CFG); + if (rc) + return rc; + + enables = FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PERIOD | + FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_UP | + FUNC_PTP_CFG_REQ_ENABLES_PTP_FREQ_ADJ_EXT_PHASE; + req->enables = cpu_to_le16(enables); + req->ptp_pps_event = 0; + req->ptp_freq_adj_dll_source = 0; + req->ptp_freq_adj_dll_phase = 0; + req->ptp_freq_adj_ext_period = cpu_to_le32(NSEC_PER_SEC); + req->ptp_freq_adj_ext_up = 0; + req->ptp_freq_adj_ext_phase_lower = cpu_to_le32(delta); + + return hwrm_req_send(bp, req); +} + +static int bnxt_ptp_enable(struct ptp_clock_info *ptp_info, struct ptp_clock_request *rq, int on) { - return -EOPNOTSUPP; + struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg, + ptp_info); + struct bnxt *bp = ptp->bp; + u8 pin_id; + int rc; + + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + /* Configure an External PPS IN */ + pin_id = ptp_find_pin(ptp->ptp_clock, PTP_PF_EXTTS, + rq->extts.index); + if (!on) + break; + rc = bnxt_ptp_cfg_pin(bp, pin_id, BNXT_PPS_PIN_PPS_IN); + if (rc) + return rc; + rc = bnxt_ptp_cfg_event(bp, BNXT_PPS_EVENT_EXTERNAL); + if (!rc) + ptp->pps_info.pins[pin_id].event = BNXT_PPS_EVENT_EXTERNAL; + return rc; + case PTP_CLK_REQ_PEROUT: + /* Configure a Periodic PPS OUT */ + pin_id = ptp_find_pin(ptp->ptp_clock, PTP_PF_PEROUT, + rq->perout.index); + if (!on) + break; + + rc = bnxt_ptp_cfg_pin(bp, pin_id, BNXT_PPS_PIN_PPS_OUT); + if (!rc) + rc = bnxt_ptp_perout_cfg(ptp, rq); + + return rc; + case PTP_CLK_REQ_PPS: + /* Configure PHC PPS IN */ + rc = bnxt_ptp_cfg_pin(bp, 0, BNXT_PPS_PIN_PPS_IN); + if (rc) + return rc; + rc = bnxt_ptp_cfg_event(bp, BNXT_PPS_EVENT_INTERNAL); + if (!rc) + ptp->pps_info.pins[0].event = BNXT_PPS_EVENT_INTERNAL; + return rc; + default: + netdev_err(ptp->bp->dev, "Unrecognized PIN function\n"); + return -EOPNOTSUPP; + } + + return bnxt_ptp_cfg_pin(bp, pin_id, BNXT_PPS_PIN_NONE); } static int bnxt_hwrm_ptp_cfg(struct bnxt *bp) { - struct hwrm_port_mac_cfg_input req = {0}; struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + struct hwrm_port_mac_cfg_input *req; u32 flags = 0; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG); + if (rc) + return rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_MAC_CFG, -1, -1); if (ptp->rx_filter) flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_RX_TS_CAPTURE_ENABLE; else @@ -170,11 +396,11 @@ static int bnxt_hwrm_ptp_cfg(struct bnxt *bp) flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_ENABLE; else flags |= PORT_MAC_CFG_REQ_FLAGS_PTP_TX_TS_CAPTURE_DISABLE; - req.flags = cpu_to_le32(flags); - req.enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE); - req.rx_ts_capture_ptp_msg_type = cpu_to_le16(ptp->rxctl); + req->flags = cpu_to_le32(flags); + req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_RX_TS_CAPTURE_PTP_MSG_TYPE); + req->rx_ts_capture_ptp_msg_type = cpu_to_le16(ptp->rxctl); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req); } int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) @@ -311,8 +537,10 @@ static void bnxt_unmap_ptp_regs(struct bnxt *bp) static u64 bnxt_cc_read(const struct cyclecounter *cc) { struct bnxt_ptp_cfg *ptp = container_of(cc, struct bnxt_ptp_cfg, cc); + u64 ns = 0; - return bnxt_refclk_read(ptp->bp, NULL); + bnxt_refclk_read(ptp->bp, NULL, &ns); + return ns; } static void bnxt_stamp_tx_skb(struct bnxt *bp, struct sk_buff *skb) @@ -410,6 +638,87 @@ static const struct ptp_clock_info bnxt_ptp_caps = { .enable = bnxt_ptp_enable, }; +static int bnxt_ptp_verify(struct ptp_clock_info *ptp_info, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg, + ptp_info); + /* Allow only PPS pin function configuration */ + if (ptp->pps_info.pins[pin].usage <= BNXT_PPS_PIN_PPS_OUT && + func != PTP_PF_PHYSYNC) + return 0; + else + return -EOPNOTSUPP; +} + +static int bnxt_ptp_pps_init(struct bnxt *bp) +{ + struct hwrm_func_ptp_pin_qcfg_output *resp; + struct hwrm_func_ptp_pin_qcfg_input *req; + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + struct ptp_clock_info *ptp_info; + struct bnxt_pps *pps_info; + u8 *pin_usg; + u32 i, rc; + + /* Query current/default PIN CFG */ + rc = hwrm_req_init(bp, req, HWRM_FUNC_PTP_PIN_QCFG); + if (rc) + return rc; + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (rc || !resp->num_pins) { + hwrm_req_drop(bp, req); + return -EOPNOTSUPP; + } + + ptp_info = &ptp->ptp_info; + pps_info = &ptp->pps_info; + pps_info->num_pins = resp->num_pins; + ptp_info->n_pins = pps_info->num_pins; + ptp_info->pin_config = kcalloc(ptp_info->n_pins, + sizeof(*ptp_info->pin_config), + GFP_KERNEL); + if (!ptp_info->pin_config) { + hwrm_req_drop(bp, req); + return -ENOMEM; + } + + /* Report the TSIO capability to kernel */ + pin_usg = &resp->pin0_usage; + for (i = 0; i < pps_info->num_pins; i++, pin_usg++) { + snprintf(ptp_info->pin_config[i].name, + sizeof(ptp_info->pin_config[i].name), "bnxt_pps%d", i); + ptp_info->pin_config[i].index = i; + ptp_info->pin_config[i].chan = i; + if (*pin_usg == BNXT_PPS_PIN_PPS_IN) + ptp_info->pin_config[i].func = PTP_PF_EXTTS; + else if (*pin_usg == BNXT_PPS_PIN_PPS_OUT) + ptp_info->pin_config[i].func = PTP_PF_PEROUT; + else + ptp_info->pin_config[i].func = PTP_PF_NONE; + + pps_info->pins[i].usage = *pin_usg; + } + hwrm_req_drop(bp, req); + + /* Only 1 each of ext_ts and per_out pins is available in HW */ + ptp_info->n_ext_ts = 1; + ptp_info->n_per_out = 1; + ptp_info->pps = 1; + ptp_info->verify = bnxt_ptp_verify; + + return 0; +} + +static bool bnxt_pps_config_ok(struct bnxt *bp) +{ + struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; + + return !(bp->fw_cap & BNXT_FW_CAP_PTP_PPS) == !ptp->ptp_info.pin_config; +} + int bnxt_ptp_init(struct bnxt *bp) { struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; @@ -422,6 +731,15 @@ int bnxt_ptp_init(struct bnxt *bp) if (rc) return rc; + if (ptp->ptp_clock && bnxt_pps_config_ok(bp)) + return 0; + + if (ptp->ptp_clock) { + ptp_clock_unregister(ptp->ptp_clock); + ptp->ptp_clock = NULL; + kfree(ptp->ptp_info.pin_config); + ptp->ptp_info.pin_config = NULL; + } atomic_set(&ptp->tx_avail, BNXT_MAX_TX_TS); spin_lock_init(&ptp->ptp_lock); @@ -435,6 +753,10 @@ int bnxt_ptp_init(struct bnxt *bp) timecounter_init(&ptp->tc, &ptp->cc, ktime_to_ns(ktime_get_real())); ptp->ptp_info = bnxt_ptp_caps; + if ((bp->fw_cap & BNXT_FW_CAP_PTP_PPS)) { + if (bnxt_ptp_pps_init(bp)) + netdev_err(bp->dev, "1pps not initialized, continuing without 1pps support\n"); + } ptp->ptp_clock = ptp_clock_register(&ptp->ptp_info, &bp->pdev->dev); if (IS_ERR(ptp->ptp_clock)) { int err = PTR_ERR(ptp->ptp_clock); @@ -445,7 +767,7 @@ int bnxt_ptp_init(struct bnxt *bp) } if (bp->flags & BNXT_FLAG_CHIP_P5) { spin_lock_bh(&ptp->ptp_lock); - ptp->current_time = bnxt_refclk_read(bp, NULL); + bnxt_refclk_read(bp, NULL, &ptp->current_time); WRITE_ONCE(ptp->old_time, ptp->current_time); spin_unlock_bh(&ptp->ptp_lock); ptp_schedule_worker(ptp->ptp_clock, 0); @@ -464,6 +786,9 @@ void bnxt_ptp_clear(struct bnxt *bp) ptp_clock_unregister(ptp->ptp_clock); ptp->ptp_clock = NULL; + kfree(ptp->ptp_info.pin_config); + ptp->ptp_info.pin_config = NULL; + if (ptp->tx_skb) { dev_kfree_skb_any(ptp->tx_skb); ptp->tx_skb = NULL; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h index 524f1c272054..fa5f05708e6d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h @@ -22,11 +22,62 @@ PORT_TS_QUERY_REQ_ENABLES_TS_REQ_TIMEOUT | \ PORT_TS_QUERY_REQ_ENABLES_PTP_HDR_OFFSET) +struct pps_pin { + u8 event; + u8 usage; + u8 state; +}; + +#define TSIO_PIN_VALID(pin) ((pin) < (BNXT_MAX_TSIO_PINS)) + +#define EVENT_DATA2_PPS_EVENT_TYPE(data2) \ + ((data2) & ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_EVENT_TYPE) + +#define EVENT_DATA2_PPS_PIN_NUM(data2) \ + (((data2) & \ + ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_MASK) >>\ + ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PIN_NUMBER_SFT) + +#define BNXT_DATA2_UPPER_MSK \ + ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_MASK + +#define BNXT_DATA2_UPPER_SFT \ + (32 - \ + ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA2_PPS_TIMESTAMP_UPPER_SFT) + +#define BNXT_DATA1_LOWER_MSK \ + ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_MASK + +#define BNXT_DATA1_LOWER_SFT \ + ASYNC_EVENT_CMPL_PPS_TIMESTAMP_EVENT_DATA1_PPS_TIMESTAMP_LOWER_SFT + +#define EVENT_PPS_TS(data2, data1) \ + (((u64)((data2) & BNXT_DATA2_UPPER_MSK) << BNXT_DATA2_UPPER_SFT) |\ + (((data1) & BNXT_DATA1_LOWER_MSK) >> BNXT_DATA1_LOWER_SFT)) + +#define BNXT_PPS_PIN_DISABLE 0 +#define BNXT_PPS_PIN_ENABLE 1 +#define BNXT_PPS_PIN_NONE 0 +#define BNXT_PPS_PIN_PPS_IN 1 +#define BNXT_PPS_PIN_PPS_OUT 2 +#define BNXT_PPS_PIN_SYNC_IN 3 +#define BNXT_PPS_PIN_SYNC_OUT 4 + +#define BNXT_PPS_EVENT_INTERNAL 1 +#define BNXT_PPS_EVENT_EXTERNAL 2 + +struct bnxt_pps { + u8 num_pins; +#define BNXT_MAX_TSIO_PINS 4 + struct pps_pin pins[BNXT_MAX_TSIO_PINS]; +}; + struct bnxt_ptp_cfg { struct ptp_clock_info ptp_info; struct ptp_clock *ptp_clock; struct cyclecounter cc; struct timecounter tc; + struct bnxt_pps pps_info; /* serialize timecounter access */ spinlock_t ptp_lock; struct sk_buff *tx_skb; @@ -77,6 +128,8 @@ do { \ #endif int bnxt_ptp_parse(struct sk_buff *skb, u16 *seq_id, u16 *hdr_off); +void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2); +void bnxt_ptp_reapply_pps(struct bnxt *bp); int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr); int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr); int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 7fa881e1cd80..70d8ca3039dc 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -17,6 +17,7 @@ #include <linux/etherdevice.h> #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_hwrm.h" #include "bnxt_ulp.h" #include "bnxt_sriov.h" #include "bnxt_vfr.h" @@ -26,21 +27,26 @@ static int bnxt_hwrm_fwd_async_event_cmpl(struct bnxt *bp, struct bnxt_vf_info *vf, u16 event_id) { - struct hwrm_fwd_async_event_cmpl_input req = {0}; + struct hwrm_fwd_async_event_cmpl_input *req; struct hwrm_async_event_cmpl *async_cmpl; int rc = 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_ASYNC_EVENT_CMPL, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_FWD_ASYNC_EVENT_CMPL); + if (rc) + goto exit; + if (vf) - req.encap_async_event_target_id = cpu_to_le16(vf->fw_fid); + req->encap_async_event_target_id = cpu_to_le16(vf->fw_fid); else /* broadcast this async event to all VFs */ - req.encap_async_event_target_id = cpu_to_le16(0xffff); - async_cmpl = (struct hwrm_async_event_cmpl *)req.encap_async_event_cmpl; + req->encap_async_event_target_id = cpu_to_le16(0xffff); + async_cmpl = + (struct hwrm_async_event_cmpl *)req->encap_async_event_cmpl; async_cmpl->type = cpu_to_le16(ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT); async_cmpl->event_id = cpu_to_le16(event_id); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); +exit: if (rc) netdev_err(bp->dev, "hwrm_fwd_async_event_cmpl failed. rc:%d\n", rc); @@ -62,10 +68,10 @@ static int bnxt_vf_ndo_prep(struct bnxt *bp, int vf_id) int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) { - struct hwrm_func_cfg_input req = {0}; struct bnxt *bp = netdev_priv(dev); - struct bnxt_vf_info *vf; + struct hwrm_func_cfg_input *req; bool old_setting = false; + struct bnxt_vf_info *vf; u32 func_flags; int rc; @@ -89,36 +95,38 @@ int bnxt_set_vf_spoofchk(struct net_device *dev, int vf_id, bool setting) /*TODO: if the driver supports VLAN filter on guest VLAN, * the spoof check should also include vlan anti-spoofing */ - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(vf->fw_fid); - req.flags = cpu_to_le32(func_flags); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); if (!rc) { - if (setting) - vf->flags |= BNXT_VF_SPOOFCHK; - else - vf->flags &= ~BNXT_VF_SPOOFCHK; + req->fid = cpu_to_le16(vf->fw_fid); + req->flags = cpu_to_le32(func_flags); + rc = hwrm_req_send(bp, req); + if (!rc) { + if (setting) + vf->flags |= BNXT_VF_SPOOFCHK; + else + vf->flags &= ~BNXT_VF_SPOOFCHK; + } } return rc; } static int bnxt_hwrm_func_qcfg_flags(struct bnxt *bp, struct bnxt_vf_info *vf) { - struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_func_qcfg_input req = {0}; + struct hwrm_func_qcfg_output *resp; + struct hwrm_func_qcfg_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); - req.fid = cpu_to_le16(BNXT_PF(bp) ? vf->fw_fid : 0xffff); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (rc) { - mutex_unlock(&bp->hwrm_cmd_lock); + rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); + if (rc) return rc; - } - vf->func_qcfg_flags = le16_to_cpu(resp->flags); - mutex_unlock(&bp->hwrm_cmd_lock); - return 0; + + req->fid = cpu_to_le16(BNXT_PF(bp) ? vf->fw_fid : 0xffff); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) + vf->func_qcfg_flags = le16_to_cpu(resp->flags); + hwrm_req_drop(bp, req); + return rc; } bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) @@ -132,18 +140,22 @@ bool bnxt_is_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) static int bnxt_hwrm_set_trusted_vf(struct bnxt *bp, struct bnxt_vf_info *vf) { - struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_input *req; + int rc; if (!(bp->fw_cap & BNXT_FW_CAP_TRUSTED_VF)) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(vf->fw_fid); + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + if (rc) + return rc; + + req->fid = cpu_to_le16(vf->fw_fid); if (vf->flags & BNXT_VF_TRUST) - req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE); + req->flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE); else - req.flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->flags = cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_DISABLE); + return hwrm_req_send(bp, req); } int bnxt_set_vf_trust(struct net_device *dev, int vf_id, bool trusted) @@ -203,8 +215,8 @@ int bnxt_get_vf_config(struct net_device *dev, int vf_id, int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac) { - struct hwrm_func_cfg_input req = {0}; struct bnxt *bp = netdev_priv(dev); + struct hwrm_func_cfg_input *req; struct bnxt_vf_info *vf; int rc; @@ -220,19 +232,23 @@ int bnxt_set_vf_mac(struct net_device *dev, int vf_id, u8 *mac) } vf = &bp->pf.vf[vf_id]; + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + if (rc) + return rc; + memcpy(vf->mac_addr, mac, ETH_ALEN); - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(vf->fw_fid); - req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); - memcpy(req.dflt_mac_addr, mac, ETH_ALEN); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + + req->fid = cpu_to_le16(vf->fw_fid); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); + memcpy(req->dflt_mac_addr, mac, ETH_ALEN); + return hwrm_req_send(bp, req); } int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos, __be16 vlan_proto) { - struct hwrm_func_cfg_input req = {0}; struct bnxt *bp = netdev_priv(dev); + struct hwrm_func_cfg_input *req; struct bnxt_vf_info *vf; u16 vlan_tag; int rc; @@ -258,21 +274,23 @@ int bnxt_set_vf_vlan(struct net_device *dev, int vf_id, u16 vlan_id, u8 qos, if (vlan_tag == vf->vlan) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(vf->fw_fid); - req.dflt_vlan = cpu_to_le16(vlan_tag); - req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (!rc) - vf->vlan = vlan_tag; + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + if (!rc) { + req->fid = cpu_to_le16(vf->fw_fid); + req->dflt_vlan = cpu_to_le16(vlan_tag); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); + rc = hwrm_req_send(bp, req); + if (!rc) + vf->vlan = vlan_tag; + } return rc; } int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate, int max_tx_rate) { - struct hwrm_func_cfg_input req = {0}; struct bnxt *bp = netdev_priv(dev); + struct hwrm_func_cfg_input *req; struct bnxt_vf_info *vf; u32 pf_link_speed; int rc; @@ -296,16 +314,18 @@ int bnxt_set_vf_bw(struct net_device *dev, int vf_id, int min_tx_rate, } if (min_tx_rate == vf->min_tx_rate && max_tx_rate == vf->max_tx_rate) return 0; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(vf->fw_fid); - req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); - req.max_bw = cpu_to_le32(max_tx_rate); - req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); - req.min_bw = cpu_to_le32(min_tx_rate); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); if (!rc) { - vf->min_tx_rate = min_tx_rate; - vf->max_tx_rate = max_tx_rate; + req->fid = cpu_to_le16(vf->fw_fid); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW | + FUNC_CFG_REQ_ENABLES_MIN_BW); + req->max_bw = cpu_to_le32(max_tx_rate); + req->min_bw = cpu_to_le32(min_tx_rate); + rc = hwrm_req_send(bp, req); + if (!rc) { + vf->min_tx_rate = min_tx_rate; + vf->max_tx_rate = max_tx_rate; + } } return rc; } @@ -358,21 +378,22 @@ static int bnxt_set_vf_attr(struct bnxt *bp, int num_vfs) static int bnxt_hwrm_func_vf_resource_free(struct bnxt *bp, int num_vfs) { - int i, rc = 0; + struct hwrm_func_vf_resc_free_input *req; struct bnxt_pf_info *pf = &bp->pf; - struct hwrm_func_vf_resc_free_input req = {0}; + int i, rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESC_FREE, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_RESC_FREE); + if (rc) + return rc; - mutex_lock(&bp->hwrm_cmd_lock); + hwrm_req_hold(bp, req); for (i = pf->first_vf_id; i < pf->first_vf_id + num_vfs; i++) { - req.vf_id = cpu_to_le16(i); - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + req->vf_id = cpu_to_le16(i); + rc = hwrm_req_send(bp, req); if (rc) break; } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -446,51 +467,55 @@ static int bnxt_alloc_vf_resources(struct bnxt *bp, int num_vfs) static int bnxt_hwrm_func_buf_rgtr(struct bnxt *bp) { - struct hwrm_func_buf_rgtr_input req = {0}; + struct hwrm_func_buf_rgtr_input *req; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_BUF_RGTR, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_FUNC_BUF_RGTR); + if (rc) + return rc; - req.req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages); - req.req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT); - req.req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE); - req.req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]); - req.req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]); - req.req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]); - req.req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]); + req->req_buf_num_pages = cpu_to_le16(bp->pf.hwrm_cmd_req_pages); + req->req_buf_page_size = cpu_to_le16(BNXT_PAGE_SHIFT); + req->req_buf_len = cpu_to_le16(BNXT_HWRM_REQ_MAX_SIZE); + req->req_buf_page_addr0 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[0]); + req->req_buf_page_addr1 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[1]); + req->req_buf_page_addr2 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[2]); + req->req_buf_page_addr3 = cpu_to_le64(bp->pf.hwrm_cmd_req_dma_addr[3]); - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req); } -/* Caller holds bp->hwrm_cmd_lock mutex lock */ -static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id) +static int __bnxt_set_vf_params(struct bnxt *bp, int vf_id) { - struct hwrm_func_cfg_input req = {0}; + struct hwrm_func_cfg_input *req; struct bnxt_vf_info *vf; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + if (rc) + return rc; vf = &bp->pf.vf[vf_id]; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - req.fid = cpu_to_le16(vf->fw_fid); + req->fid = cpu_to_le16(vf->fw_fid); if (is_valid_ether_addr(vf->mac_addr)) { - req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); - memcpy(req.dflt_mac_addr, vf->mac_addr, ETH_ALEN); + req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); + memcpy(req->dflt_mac_addr, vf->mac_addr, ETH_ALEN); } if (vf->vlan) { - req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); - req.dflt_vlan = cpu_to_le16(vf->vlan); + req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_VLAN); + req->dflt_vlan = cpu_to_le16(vf->vlan); } if (vf->max_tx_rate) { - req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW); - req.max_bw = cpu_to_le32(vf->max_tx_rate); -#ifdef HAVE_IFLA_TX_RATE - req.enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MIN_BW); - req.min_bw = cpu_to_le32(vf->min_tx_rate); -#endif + req->enables |= cpu_to_le32(FUNC_CFG_REQ_ENABLES_MAX_BW | + FUNC_CFG_REQ_ENABLES_MIN_BW); + req->max_bw = cpu_to_le32(vf->max_tx_rate); + req->min_bw = cpu_to_le32(vf->min_tx_rate); } if (vf->flags & BNXT_VF_TRUST) - req.flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE); + req->flags |= cpu_to_le32(FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE); - _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + return hwrm_req_send(bp, req); } /* Only called by PF to reserve resources for VFs, returns actual number of @@ -498,7 +523,7 @@ static void __bnxt_set_vf_params(struct bnxt *bp, int vf_id) */ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) { - struct hwrm_func_vf_resource_cfg_input req = {0}; + struct hwrm_func_vf_resource_cfg_input *req; struct bnxt_hw_resc *hw_resc = &bp->hw_resc; u16 vf_tx_rings, vf_rx_rings, vf_cp_rings; u16 vf_stat_ctx, vf_vnics, vf_ring_grps; @@ -507,7 +532,9 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) u16 vf_msix = 0; u16 vf_rss; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_RESOURCE_CFG); + if (rc) + return rc; if (bp->flags & BNXT_FLAG_CHIP_P5) { vf_msix = hw_resc->max_nqs - bnxt_nq_rings_in_use(bp); @@ -526,21 +553,21 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); vf_rss = hw_resc->max_rsscos_ctxs - bp->rsscos_nr_ctxs; - req.min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX); + req->min_rsscos_ctx = cpu_to_le16(BNXT_VF_MIN_RSS_CTX); if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) { min = 0; - req.min_rsscos_ctx = cpu_to_le16(min); + req->min_rsscos_ctx = cpu_to_le16(min); } if (pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL || pf->vf_resv_strategy == BNXT_VF_RESV_STRATEGY_MINIMAL_STATIC) { - req.min_cmpl_rings = cpu_to_le16(min); - req.min_tx_rings = cpu_to_le16(min); - req.min_rx_rings = cpu_to_le16(min); - req.min_l2_ctxs = cpu_to_le16(min); - req.min_vnics = cpu_to_le16(min); - req.min_stat_ctx = cpu_to_le16(min); + req->min_cmpl_rings = cpu_to_le16(min); + req->min_tx_rings = cpu_to_le16(min); + req->min_rx_rings = cpu_to_le16(min); + req->min_l2_ctxs = cpu_to_le16(min); + req->min_vnics = cpu_to_le16(min); + req->min_stat_ctx = cpu_to_le16(min); if (!(bp->flags & BNXT_FLAG_CHIP_P5)) - req.min_hw_ring_grps = cpu_to_le16(min); + req->min_hw_ring_grps = cpu_to_le16(min); } else { vf_cp_rings /= num_vfs; vf_tx_rings /= num_vfs; @@ -550,56 +577,57 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) vf_ring_grps /= num_vfs; vf_rss /= num_vfs; - req.min_cmpl_rings = cpu_to_le16(vf_cp_rings); - req.min_tx_rings = cpu_to_le16(vf_tx_rings); - req.min_rx_rings = cpu_to_le16(vf_rx_rings); - req.min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); - req.min_vnics = cpu_to_le16(vf_vnics); - req.min_stat_ctx = cpu_to_le16(vf_stat_ctx); - req.min_hw_ring_grps = cpu_to_le16(vf_ring_grps); - req.min_rsscos_ctx = cpu_to_le16(vf_rss); + req->min_cmpl_rings = cpu_to_le16(vf_cp_rings); + req->min_tx_rings = cpu_to_le16(vf_tx_rings); + req->min_rx_rings = cpu_to_le16(vf_rx_rings); + req->min_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); + req->min_vnics = cpu_to_le16(vf_vnics); + req->min_stat_ctx = cpu_to_le16(vf_stat_ctx); + req->min_hw_ring_grps = cpu_to_le16(vf_ring_grps); + req->min_rsscos_ctx = cpu_to_le16(vf_rss); } - req.max_cmpl_rings = cpu_to_le16(vf_cp_rings); - req.max_tx_rings = cpu_to_le16(vf_tx_rings); - req.max_rx_rings = cpu_to_le16(vf_rx_rings); - req.max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); - req.max_vnics = cpu_to_le16(vf_vnics); - req.max_stat_ctx = cpu_to_le16(vf_stat_ctx); - req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps); - req.max_rsscos_ctx = cpu_to_le16(vf_rss); + req->max_cmpl_rings = cpu_to_le16(vf_cp_rings); + req->max_tx_rings = cpu_to_le16(vf_tx_rings); + req->max_rx_rings = cpu_to_le16(vf_rx_rings); + req->max_l2_ctxs = cpu_to_le16(BNXT_VF_MAX_L2_CTX); + req->max_vnics = cpu_to_le16(vf_vnics); + req->max_stat_ctx = cpu_to_le16(vf_stat_ctx); + req->max_hw_ring_grps = cpu_to_le16(vf_ring_grps); + req->max_rsscos_ctx = cpu_to_le16(vf_rss); if (bp->flags & BNXT_FLAG_CHIP_P5) - req.max_msix = cpu_to_le16(vf_msix / num_vfs); + req->max_msix = cpu_to_le16(vf_msix / num_vfs); - mutex_lock(&bp->hwrm_cmd_lock); + hwrm_req_hold(bp, req); for (i = 0; i < num_vfs; i++) { if (reset) __bnxt_set_vf_params(bp, i); - req.vf_id = cpu_to_le16(pf->first_vf_id + i); - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + req->vf_id = cpu_to_le16(pf->first_vf_id + i); + rc = hwrm_req_send(bp, req); if (rc) break; pf->active_vfs = i + 1; pf->vf[i].fw_fid = pf->first_vf_id + i; } - mutex_unlock(&bp->hwrm_cmd_lock); + if (pf->active_vfs) { u16 n = pf->active_vfs; - hw_resc->max_tx_rings -= le16_to_cpu(req.min_tx_rings) * n; - hw_resc->max_rx_rings -= le16_to_cpu(req.min_rx_rings) * n; - hw_resc->max_hw_ring_grps -= le16_to_cpu(req.min_hw_ring_grps) * - n; - hw_resc->max_cp_rings -= le16_to_cpu(req.min_cmpl_rings) * n; - hw_resc->max_rsscos_ctxs -= le16_to_cpu(req.min_rsscos_ctx) * n; - hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n; - hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n; + hw_resc->max_tx_rings -= le16_to_cpu(req->min_tx_rings) * n; + hw_resc->max_rx_rings -= le16_to_cpu(req->min_rx_rings) * n; + hw_resc->max_hw_ring_grps -= + le16_to_cpu(req->min_hw_ring_grps) * n; + hw_resc->max_cp_rings -= le16_to_cpu(req->min_cmpl_rings) * n; + hw_resc->max_rsscos_ctxs -= + le16_to_cpu(req->min_rsscos_ctx) * n; + hw_resc->max_stat_ctxs -= le16_to_cpu(req->min_stat_ctx) * n; + hw_resc->max_vnics -= le16_to_cpu(req->min_vnics) * n; if (bp->flags & BNXT_FLAG_CHIP_P5) hw_resc->max_irqs -= vf_msix * n; rc = pf->active_vfs; } + hwrm_req_drop(bp, req); return rc; } @@ -608,15 +636,18 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs, bool reset) */ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) { - u32 rc = 0, mtu, i; u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics; struct bnxt_hw_resc *hw_resc = &bp->hw_resc; - struct hwrm_func_cfg_input req = {0}; struct bnxt_pf_info *pf = &bp->pf; + struct hwrm_func_cfg_input *req; int total_vf_tx_rings = 0; u16 vf_ring_grps; + u32 mtu, i; + int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_FUNC_CFG); + if (rc) + return rc; /* Remaining rings are distributed equally amongs VF's for now */ vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp) / num_vfs; @@ -632,50 +663,49 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) vf_vnics = (hw_resc->max_vnics - bp->nr_vnics) / num_vfs; vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); - req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_MTU | - FUNC_CFG_REQ_ENABLES_MRU | - FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS | - FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS | - FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | - FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS | - FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | - FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS | - FUNC_CFG_REQ_ENABLES_NUM_VNICS | - FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS); + req->enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_ADMIN_MTU | + FUNC_CFG_REQ_ENABLES_MRU | + FUNC_CFG_REQ_ENABLES_NUM_RSSCOS_CTXS | + FUNC_CFG_REQ_ENABLES_NUM_STAT_CTXS | + FUNC_CFG_REQ_ENABLES_NUM_CMPL_RINGS | + FUNC_CFG_REQ_ENABLES_NUM_TX_RINGS | + FUNC_CFG_REQ_ENABLES_NUM_RX_RINGS | + FUNC_CFG_REQ_ENABLES_NUM_L2_CTXS | + FUNC_CFG_REQ_ENABLES_NUM_VNICS | + FUNC_CFG_REQ_ENABLES_NUM_HW_RING_GRPS); mtu = bp->dev->mtu + ETH_HLEN + VLAN_HLEN; - req.mru = cpu_to_le16(mtu); - req.admin_mtu = cpu_to_le16(mtu); + req->mru = cpu_to_le16(mtu); + req->admin_mtu = cpu_to_le16(mtu); - req.num_rsscos_ctxs = cpu_to_le16(1); - req.num_cmpl_rings = cpu_to_le16(vf_cp_rings); - req.num_tx_rings = cpu_to_le16(vf_tx_rings); - req.num_rx_rings = cpu_to_le16(vf_rx_rings); - req.num_hw_ring_grps = cpu_to_le16(vf_ring_grps); - req.num_l2_ctxs = cpu_to_le16(4); + req->num_rsscos_ctxs = cpu_to_le16(1); + req->num_cmpl_rings = cpu_to_le16(vf_cp_rings); + req->num_tx_rings = cpu_to_le16(vf_tx_rings); + req->num_rx_rings = cpu_to_le16(vf_rx_rings); + req->num_hw_ring_grps = cpu_to_le16(vf_ring_grps); + req->num_l2_ctxs = cpu_to_le16(4); - req.num_vnics = cpu_to_le16(vf_vnics); + req->num_vnics = cpu_to_le16(vf_vnics); /* FIXME spec currently uses 1 bit for stats ctx */ - req.num_stat_ctxs = cpu_to_le16(vf_stat_ctx); + req->num_stat_ctxs = cpu_to_le16(vf_stat_ctx); - mutex_lock(&bp->hwrm_cmd_lock); + hwrm_req_hold(bp, req); for (i = 0; i < num_vfs; i++) { int vf_tx_rsvd = vf_tx_rings; - req.fid = cpu_to_le16(pf->first_vf_id + i); - rc = _hwrm_send_message(bp, &req, sizeof(req), - HWRM_CMD_TIMEOUT); + req->fid = cpu_to_le16(pf->first_vf_id + i); + rc = hwrm_req_send(bp, req); if (rc) break; pf->active_vfs = i + 1; - pf->vf[i].fw_fid = le16_to_cpu(req.fid); + pf->vf[i].fw_fid = le16_to_cpu(req->fid); rc = __bnxt_hwrm_get_tx_rings(bp, pf->vf[i].fw_fid, &vf_tx_rsvd); if (rc) break; total_vf_tx_rings += vf_tx_rsvd; } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); if (pf->active_vfs) { hw_resc->max_tx_rings -= total_vf_tx_rings; hw_resc->max_rx_rings -= vf_rx_rings * num_vfs; @@ -893,23 +923,24 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, void *encap_resp, __le64 encap_resp_addr, __le16 encap_resp_cpr, u32 msg_size) { - int rc = 0; - struct hwrm_fwd_resp_input req = {0}; + struct hwrm_fwd_resp_input *req; + int rc; if (BNXT_FWD_RESP_SIZE_ERR(msg_size)) return -EINVAL; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FWD_RESP, -1, -1); - - /* Set the new target id */ - req.target_id = cpu_to_le16(vf->fw_fid); - req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); - req.encap_resp_len = cpu_to_le16(msg_size); - req.encap_resp_addr = encap_resp_addr; - req.encap_resp_cmpl_ring = encap_resp_cpr; - memcpy(req.encap_resp, encap_resp, msg_size); - - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_FWD_RESP); + if (!rc) { + /* Set the new target id */ + req->target_id = cpu_to_le16(vf->fw_fid); + req->encap_resp_target_id = cpu_to_le16(vf->fw_fid); + req->encap_resp_len = cpu_to_le16(msg_size); + req->encap_resp_addr = encap_resp_addr; + req->encap_resp_cmpl_ring = encap_resp_cpr; + memcpy(req->encap_resp, encap_resp, msg_size); + + rc = hwrm_req_send(bp, req); + } if (rc) netdev_err(bp->dev, "hwrm_fwd_resp failed. rc:%d\n", rc); return rc; @@ -918,19 +949,21 @@ static int bnxt_hwrm_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf, u32 msg_size) { - int rc = 0; - struct hwrm_reject_fwd_resp_input req = {0}; + struct hwrm_reject_fwd_resp_input *req; + int rc; if (BNXT_REJ_FWD_RESP_SIZE_ERR(msg_size)) return -EINVAL; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_REJECT_FWD_RESP, -1, -1); - /* Set the new target id */ - req.target_id = cpu_to_le16(vf->fw_fid); - req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); - memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); + rc = hwrm_req_init(bp, req, HWRM_REJECT_FWD_RESP); + if (!rc) { + /* Set the new target id */ + req->target_id = cpu_to_le16(vf->fw_fid); + req->encap_resp_target_id = cpu_to_le16(vf->fw_fid); + memcpy(req->encap_request, vf->hwrm_cmd_req_addr, msg_size); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); + } if (rc) netdev_err(bp->dev, "hwrm_fwd_err_resp failed. rc:%d\n", rc); return rc; @@ -939,19 +972,21 @@ static int bnxt_hwrm_fwd_err_resp(struct bnxt *bp, struct bnxt_vf_info *vf, static int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, struct bnxt_vf_info *vf, u32 msg_size) { - int rc = 0; - struct hwrm_exec_fwd_resp_input req = {0}; + struct hwrm_exec_fwd_resp_input *req; + int rc; if (BNXT_EXEC_FWD_RESP_SIZE_ERR(msg_size)) return -EINVAL; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_EXEC_FWD_RESP, -1, -1); - /* Set the new target id */ - req.target_id = cpu_to_le16(vf->fw_fid); - req.encap_resp_target_id = cpu_to_le16(vf->fw_fid); - memcpy(req.encap_request, vf->hwrm_cmd_req_addr, msg_size); + rc = hwrm_req_init(bp, req, HWRM_EXEC_FWD_RESP); + if (!rc) { + /* Set the new target id */ + req->target_id = cpu_to_le16(vf->fw_fid); + req->encap_resp_target_id = cpu_to_le16(vf->fw_fid); + memcpy(req->encap_request, vf->hwrm_cmd_req_addr, msg_size); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); + } if (rc) netdev_err(bp->dev, "hwrm_exec_fw_resp failed. rc:%d\n", rc); return rc; @@ -1031,10 +1066,10 @@ static int bnxt_vf_set_link(struct bnxt *bp, struct bnxt_vf_info *vf) phy_qcfg_req = (struct hwrm_port_phy_qcfg_input *)vf->hwrm_cmd_req_addr; - mutex_lock(&bp->hwrm_cmd_lock); + mutex_lock(&bp->link_lock); memcpy(&phy_qcfg_resp, &bp->link_info.phy_qcfg_resp, sizeof(phy_qcfg_resp)); - mutex_unlock(&bp->hwrm_cmd_lock); + mutex_unlock(&bp->link_lock); phy_qcfg_resp.resp_len = cpu_to_le16(sizeof(phy_qcfg_resp)); phy_qcfg_resp.seq_id = phy_qcfg_req->seq_id; phy_qcfg_resp.valid = 1; @@ -1118,7 +1153,7 @@ void bnxt_hwrm_exec_fwd_req(struct bnxt *bp) int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict) { - struct hwrm_func_vf_cfg_input req = {0}; + struct hwrm_func_vf_cfg_input *req; int rc = 0; if (!BNXT_VF(bp)) @@ -1129,10 +1164,16 @@ int bnxt_approve_mac(struct bnxt *bp, u8 *mac, bool strict) rc = -EADDRNOTAVAIL; goto mac_done; } - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_CFG, -1, -1); - req.enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR); - memcpy(req.dflt_mac_addr, mac, ETH_ALEN); - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + + rc = hwrm_req_init(bp, req, HWRM_FUNC_VF_CFG); + if (rc) + goto mac_done; + + req->enables = cpu_to_le32(FUNC_VF_CFG_REQ_ENABLES_DFLT_MAC_ADDR); + memcpy(req->dflt_mac_addr, mac, ETH_ALEN); + if (!strict) + hwrm_req_flags(bp, req, BNXT_HWRM_CTX_SILENT); + rc = hwrm_req_send(bp, req); mac_done: if (rc && strict) { rc = -EADDRNOTAVAIL; @@ -1145,15 +1186,17 @@ mac_done: void bnxt_update_vf_mac(struct bnxt *bp) { - struct hwrm_func_qcaps_input req = {0}; - struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_func_qcaps_output *resp; + struct hwrm_func_qcaps_input *req; bool inform_pf = false; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCAPS, -1, -1); - req.fid = cpu_to_le16(0xffff); + if (hwrm_req_init(bp, req, HWRM_FUNC_QCAPS)) + return; + + req->fid = cpu_to_le16(0xffff); - mutex_lock(&bp->hwrm_cmd_lock); - if (_hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) + resp = hwrm_req_hold(bp, req); + if (hwrm_req_send(bp, req)) goto update_vf_mac_exit; /* Store MAC address from the firmware. There are 2 cases: @@ -1176,7 +1219,7 @@ void bnxt_update_vf_mac(struct bnxt *bp) if (is_valid_ether_addr(bp->vf.mac_addr)) memcpy(bp->dev->dev_addr, bp->vf.mac_addr, ETH_ALEN); update_vf_mac_exit: - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); if (inform_pf) bnxt_approve_mac(bp, bp->dev->dev_addr, false); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 5e4429b14b8c..46fae1acbeed 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -22,6 +22,7 @@ #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_hwrm.h" #include "bnxt_sriov.h" #include "bnxt_tc.h" #include "bnxt_vfr.h" @@ -502,16 +503,18 @@ static int bnxt_tc_parse_flow(struct bnxt *bp, static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, struct bnxt_tc_flow_node *flow_node) { - struct hwrm_cfa_flow_free_input req = { 0 }; + struct hwrm_cfa_flow_free_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1); - if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) - req.ext_flow_handle = flow_node->ext_flow_handle; - else - req.flow_handle = flow_node->flow_handle; + rc = hwrm_req_init(bp, req, HWRM_CFA_FLOW_FREE); + if (!rc) { + if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) + req->ext_flow_handle = flow_node->ext_flow_handle; + else + req->flow_handle = flow_node->flow_handle; - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_send(bp, req); + } if (rc) netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc); @@ -587,20 +590,22 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, struct bnxt_tc_actions *actions = &flow->actions; struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask; struct bnxt_tc_l3_key *l3_key = &flow->l3_key; - struct hwrm_cfa_flow_alloc_input req = { 0 }; struct hwrm_cfa_flow_alloc_output *resp; + struct hwrm_cfa_flow_alloc_input *req; u16 flow_flags = 0, action_flags = 0; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_ALLOC, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_CFA_FLOW_ALLOC); + if (rc) + return rc; - req.src_fid = cpu_to_le16(flow->src_fid); - req.ref_flow_handle = ref_flow_handle; + req->src_fid = cpu_to_le16(flow->src_fid); + req->ref_flow_handle = ref_flow_handle; if (actions->flags & BNXT_TC_ACTION_FLAG_L2_REWRITE) { - memcpy(req.l2_rewrite_dmac, actions->l2_rewrite_dmac, + memcpy(req->l2_rewrite_dmac, actions->l2_rewrite_dmac, ETH_ALEN); - memcpy(req.l2_rewrite_smac, actions->l2_rewrite_smac, + memcpy(req->l2_rewrite_smac, actions->l2_rewrite_smac, ETH_ALEN); action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE; @@ -615,71 +620,71 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC; /* L3 source rewrite */ - req.nat_ip_address[0] = + req->nat_ip_address[0] = actions->nat.l3.ipv4.saddr.s_addr; /* L4 source port */ if (actions->nat.l4.ports.sport) - req.nat_port = + req->nat_port = actions->nat.l4.ports.sport; } else { action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST; /* L3 destination rewrite */ - req.nat_ip_address[0] = + req->nat_ip_address[0] = actions->nat.l3.ipv4.daddr.s_addr; /* L4 destination port */ if (actions->nat.l4.ports.dport) - req.nat_port = + req->nat_port = actions->nat.l4.ports.dport; } netdev_dbg(bp->dev, - "req.nat_ip_address: %pI4 src_xlate: %d req.nat_port: %x\n", - req.nat_ip_address, actions->nat.src_xlate, - req.nat_port); + "req->nat_ip_address: %pI4 src_xlate: %d req->nat_port: %x\n", + req->nat_ip_address, actions->nat.src_xlate, + req->nat_port); } else { if (actions->nat.src_xlate) { action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC; /* L3 source rewrite */ - memcpy(req.nat_ip_address, + memcpy(req->nat_ip_address, actions->nat.l3.ipv6.saddr.s6_addr32, - sizeof(req.nat_ip_address)); + sizeof(req->nat_ip_address)); /* L4 source port */ if (actions->nat.l4.ports.sport) - req.nat_port = + req->nat_port = actions->nat.l4.ports.sport; } else { action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST; /* L3 destination rewrite */ - memcpy(req.nat_ip_address, + memcpy(req->nat_ip_address, actions->nat.l3.ipv6.daddr.s6_addr32, - sizeof(req.nat_ip_address)); + sizeof(req->nat_ip_address)); /* L4 destination port */ if (actions->nat.l4.ports.dport) - req.nat_port = + req->nat_port = actions->nat.l4.ports.dport; } netdev_dbg(bp->dev, - "req.nat_ip_address: %pI6 src_xlate: %d req.nat_port: %x\n", - req.nat_ip_address, actions->nat.src_xlate, - req.nat_port); + "req->nat_ip_address: %pI6 src_xlate: %d req->nat_port: %x\n", + req->nat_ip_address, actions->nat.src_xlate, + req->nat_port); } } if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP || actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) { - req.tunnel_handle = tunnel_handle; + req->tunnel_handle = tunnel_handle; flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL; action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL; } - req.ethertype = flow->l2_key.ether_type; - req.ip_proto = flow->l4_key.ip_proto; + req->ethertype = flow->l2_key.ether_type; + req->ip_proto = flow->l4_key.ip_proto; if (flow->flags & BNXT_TC_FLOW_FLAGS_ETH_ADDRS) { - memcpy(req.dmac, flow->l2_key.dmac, ETH_ALEN); - memcpy(req.smac, flow->l2_key.smac, ETH_ALEN); + memcpy(req->dmac, flow->l2_key.dmac, ETH_ALEN); + memcpy(req->smac, flow->l2_key.smac, ETH_ALEN); } if (flow->l2_key.num_vlans > 0) { @@ -688,7 +693,7 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, * in outer_vlan_tci when num_vlans is 1 (which is * always the case in TC.) */ - req.outer_vlan_tci = flow->l2_key.inner_vlan_tci; + req->outer_vlan_tci = flow->l2_key.inner_vlan_tci; } /* If all IP and L4 fields are wildcarded then this is an L2 flow */ @@ -701,68 +706,67 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6; if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV4_ADDRS) { - req.ip_dst[0] = l3_key->ipv4.daddr.s_addr; - req.ip_dst_mask_len = + req->ip_dst[0] = l3_key->ipv4.daddr.s_addr; + req->ip_dst_mask_len = inet_mask_len(l3_mask->ipv4.daddr.s_addr); - req.ip_src[0] = l3_key->ipv4.saddr.s_addr; - req.ip_src_mask_len = + req->ip_src[0] = l3_key->ipv4.saddr.s_addr; + req->ip_src_mask_len = inet_mask_len(l3_mask->ipv4.saddr.s_addr); } else if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV6_ADDRS) { - memcpy(req.ip_dst, l3_key->ipv6.daddr.s6_addr32, - sizeof(req.ip_dst)); - req.ip_dst_mask_len = + memcpy(req->ip_dst, l3_key->ipv6.daddr.s6_addr32, + sizeof(req->ip_dst)); + req->ip_dst_mask_len = ipv6_mask_len(&l3_mask->ipv6.daddr); - memcpy(req.ip_src, l3_key->ipv6.saddr.s6_addr32, - sizeof(req.ip_src)); - req.ip_src_mask_len = + memcpy(req->ip_src, l3_key->ipv6.saddr.s6_addr32, + sizeof(req->ip_src)); + req->ip_src_mask_len = ipv6_mask_len(&l3_mask->ipv6.saddr); } } if (flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) { - req.l4_src_port = flow->l4_key.ports.sport; - req.l4_src_port_mask = flow->l4_mask.ports.sport; - req.l4_dst_port = flow->l4_key.ports.dport; - req.l4_dst_port_mask = flow->l4_mask.ports.dport; + req->l4_src_port = flow->l4_key.ports.sport; + req->l4_src_port_mask = flow->l4_mask.ports.sport; + req->l4_dst_port = flow->l4_key.ports.dport; + req->l4_dst_port_mask = flow->l4_mask.ports.dport; } else if (flow->flags & BNXT_TC_FLOW_FLAGS_ICMP) { /* l4 ports serve as type/code when ip_proto is ICMP */ - req.l4_src_port = htons(flow->l4_key.icmp.type); - req.l4_src_port_mask = htons(flow->l4_mask.icmp.type); - req.l4_dst_port = htons(flow->l4_key.icmp.code); - req.l4_dst_port_mask = htons(flow->l4_mask.icmp.code); + req->l4_src_port = htons(flow->l4_key.icmp.type); + req->l4_src_port_mask = htons(flow->l4_mask.icmp.type); + req->l4_dst_port = htons(flow->l4_key.icmp.code); + req->l4_dst_port_mask = htons(flow->l4_mask.icmp.code); } - req.flags = cpu_to_le16(flow_flags); + req->flags = cpu_to_le16(flow_flags); if (actions->flags & BNXT_TC_ACTION_FLAG_DROP) { action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP; } else { if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) { action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD; - req.dst_fid = cpu_to_le16(actions->dst_fid); + req->dst_fid = cpu_to_le16(actions->dst_fid); } if (actions->flags & BNXT_TC_ACTION_FLAG_PUSH_VLAN) { action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE; - req.l2_rewrite_vlan_tpid = actions->push_vlan_tpid; - req.l2_rewrite_vlan_tci = actions->push_vlan_tci; - memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN); - memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN); + req->l2_rewrite_vlan_tpid = actions->push_vlan_tpid; + req->l2_rewrite_vlan_tci = actions->push_vlan_tci; + memcpy(&req->l2_rewrite_dmac, &req->dmac, ETH_ALEN); + memcpy(&req->l2_rewrite_smac, &req->smac, ETH_ALEN); } if (actions->flags & BNXT_TC_ACTION_FLAG_POP_VLAN) { action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE; /* Rewrite config with tpid = 0 implies vlan pop */ - req.l2_rewrite_vlan_tpid = 0; - memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN); - memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN); + req->l2_rewrite_vlan_tpid = 0; + memcpy(&req->l2_rewrite_dmac, &req->dmac, ETH_ALEN); + memcpy(&req->l2_rewrite_smac, &req->smac, ETH_ALEN); } } - req.action_flags = cpu_to_le16(action_flags); + req->action_flags = cpu_to_le16(action_flags); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); if (!rc) { - resp = bnxt_get_hwrm_resp_addr(bp, &req); /* CFA_FLOW_ALLOC response interpretation: * fw with fw with * 16-bit 64-bit @@ -778,7 +782,7 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, flow_node->flow_id = resp->flow_id; } } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } @@ -788,67 +792,69 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp, __le32 ref_decap_handle, __le32 *decap_filter_handle) { - struct hwrm_cfa_decap_filter_alloc_input req = { 0 }; struct hwrm_cfa_decap_filter_alloc_output *resp; struct ip_tunnel_key *tun_key = &flow->tun_key; + struct hwrm_cfa_decap_filter_alloc_input *req; u32 enables = 0; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_ALLOC, -1, -1); + rc = hwrm_req_init(bp, req, HWRM_CFA_DECAP_FILTER_ALLOC); + if (rc) + goto exit; - req.flags = cpu_to_le32(CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL); + req->flags = cpu_to_le32(CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL); enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE | CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL; - req.tunnel_type = CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; - req.ip_protocol = CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP; + req->tunnel_type = CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN; + req->ip_protocol = CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP; if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ID) { enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID; /* tunnel_id is wrongly defined in hsi defn. as __le32 */ - req.tunnel_id = tunnel_id_to_key32(tun_key->tun_id); + req->tunnel_id = tunnel_id_to_key32(tun_key->tun_id); } if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) { enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR; - ether_addr_copy(req.dst_macaddr, l2_info->dmac); + ether_addr_copy(req->dst_macaddr, l2_info->dmac); } if (l2_info->num_vlans) { enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID; - req.t_ivlan_vid = l2_info->inner_vlan_tci; + req->t_ivlan_vid = l2_info->inner_vlan_tci; } enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE; - req.ethertype = htons(ETH_P_IP); + req->ethertype = htons(ETH_P_IP); if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS) { enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR | CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR | CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE; - req.ip_addr_type = CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; - req.dst_ipaddr[0] = tun_key->u.ipv4.dst; - req.src_ipaddr[0] = tun_key->u.ipv4.src; + req->ip_addr_type = + CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4; + req->dst_ipaddr[0] = tun_key->u.ipv4.dst; + req->src_ipaddr[0] = tun_key->u.ipv4.src; } if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_PORTS) { enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT; - req.dst_port = tun_key->tp_dst; + req->dst_port = tun_key->tp_dst; } /* Eventhough the decap_handle returned by hwrm_cfa_decap_filter_alloc * is defined as __le32, l2_ctxt_ref_id is defined in HSI as __le16. */ - req.l2_ctxt_ref_id = (__force __le16)ref_decap_handle; - req.enables = cpu_to_le32(enables); + req->l2_ctxt_ref_id = (__force __le16)ref_decap_handle; + req->enables = cpu_to_le32(enables); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (!rc) { - resp = bnxt_get_hwrm_resp_addr(bp, &req); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); + if (!rc) *decap_filter_handle = resp->decap_filter_id; - } else { + hwrm_req_drop(bp, req); +exit: + if (rc) netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc); - } - mutex_unlock(&bp->hwrm_cmd_lock); return rc; } @@ -856,13 +862,14 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp, static int hwrm_cfa_decap_filter_free(struct bnxt *bp, __le32 decap_filter_handle) { - struct hwrm_cfa_decap_filter_free_input req = { 0 }; + struct hwrm_cfa_decap_filter_free_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_FREE, -1, -1); - req.decap_filter_id = decap_filter_handle; - - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_CFA_DECAP_FILTER_FREE); + if (!rc) { + req->decap_filter_id = decap_filter_handle; + rc = hwrm_req_send(bp, req); + } if (rc) netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc); @@ -874,18 +881,18 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp, struct bnxt_tc_l2_key *l2_info, __le32 *encap_record_handle) { - struct hwrm_cfa_encap_record_alloc_input req = { 0 }; struct hwrm_cfa_encap_record_alloc_output *resp; - struct hwrm_cfa_encap_data_vxlan *encap = - (struct hwrm_cfa_encap_data_vxlan *)&req.encap_data; - struct hwrm_vxlan_ipv4_hdr *encap_ipv4 = - (struct hwrm_vxlan_ipv4_hdr *)encap->l3; + struct hwrm_cfa_encap_record_alloc_input *req; + struct hwrm_cfa_encap_data_vxlan *encap; + struct hwrm_vxlan_ipv4_hdr *encap_ipv4; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_ALLOC, -1, -1); - - req.encap_type = CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN; + rc = hwrm_req_init(bp, req, HWRM_CFA_ENCAP_RECORD_ALLOC); + if (rc) + goto exit; + encap = (struct hwrm_cfa_encap_data_vxlan *)&req->encap_data; + req->encap_type = CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN; ether_addr_copy(encap->dst_mac_addr, l2_info->dmac); ether_addr_copy(encap->src_mac_addr, l2_info->smac); if (l2_info->num_vlans) { @@ -894,6 +901,7 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp, encap->ovlan_tpid = l2_info->inner_vlan_tpid; } + encap_ipv4 = (struct hwrm_vxlan_ipv4_hdr *)encap->l3; encap_ipv4->ver_hlen = 4 << VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT; encap_ipv4->ver_hlen |= 5 << VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT; encap_ipv4->ttl = encap_key->ttl; @@ -905,15 +913,14 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp, encap->dst_port = encap_key->tp_dst; encap->vni = tunnel_id_to_key32(encap_key->tun_id); - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (!rc) { - resp = bnxt_get_hwrm_resp_addr(bp, &req); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send_silent(bp, req); + if (!rc) *encap_record_handle = resp->encap_record_id; - } else { + hwrm_req_drop(bp, req); +exit: + if (rc) netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc); - } - mutex_unlock(&bp->hwrm_cmd_lock); return rc; } @@ -921,13 +928,14 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp, static int hwrm_cfa_encap_record_free(struct bnxt *bp, __le32 encap_record_handle) { - struct hwrm_cfa_encap_record_free_input req = { 0 }; + struct hwrm_cfa_encap_record_free_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_FREE, -1, -1); - req.encap_record_id = encap_record_handle; - - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_CFA_ENCAP_RECORD_FREE); + if (!rc) { + req->encap_record_id = encap_record_handle; + rc = hwrm_req_send(bp, req); + } if (rc) netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc); @@ -1673,14 +1681,20 @@ static int bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows, struct bnxt_tc_stats_batch stats_batch[]) { - struct hwrm_cfa_flow_stats_input req = { 0 }; struct hwrm_cfa_flow_stats_output *resp; - __le16 *req_flow_handles = &req.flow_handle_0; - __le32 *req_flow_ids = &req.flow_id_0; + struct hwrm_cfa_flow_stats_input *req; + __le16 *req_flow_handles; + __le32 *req_flow_ids; int rc, i; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1); - req.num_flows = cpu_to_le16(num_flows); + rc = hwrm_req_init(bp, req, HWRM_CFA_FLOW_STATS); + if (rc) + goto exit; + + req_flow_handles = &req->flow_handle_0; + req_flow_ids = &req->flow_id_0; + + req->num_flows = cpu_to_le16(num_flows); for (i = 0; i < num_flows; i++) { struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node; @@ -1688,13 +1702,12 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows, &req_flow_handles[i], &req_flow_ids[i]); } - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) { __le64 *resp_packets; __le64 *resp_bytes; - resp = bnxt_get_hwrm_resp_addr(bp, &req); resp_packets = &resp->packet_0; resp_bytes = &resp->byte_0; @@ -1704,10 +1717,11 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows, stats_batch[i].hw_stats.bytes = le64_to_cpu(resp_bytes[i]); } - } else { - netdev_info(bp->dev, "error rc=%d\n", rc); } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); +exit: + if (rc) + netdev_info(bp->dev, "error rc=%d\n", rc); return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index 187ff643ad2a..fde0c3e8ac57 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -22,6 +22,7 @@ #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_hwrm.h" #include "bnxt_ulp.h" static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id, @@ -237,27 +238,33 @@ static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id, { struct net_device *dev = edev->net; struct bnxt *bp = netdev_priv(dev); + struct output *resp; struct input *req; + u32 resp_len; int rc; if (ulp_id != BNXT_ROCE_ULP && bp->fw_reset_state) return -EBUSY; - mutex_lock(&bp->hwrm_cmd_lock); - req = fw_msg->msg; - req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); - rc = _hwrm_send_message(bp, fw_msg->msg, fw_msg->msg_len, - fw_msg->timeout); - if (!rc) { - struct output *resp = bp->hwrm_cmd_resp_addr; - u32 len = le16_to_cpu(resp->resp_len); + rc = hwrm_req_init(bp, req, 0 /* don't care */); + if (rc) + return rc; - if (fw_msg->resp_max_len < len) - len = fw_msg->resp_max_len; + rc = hwrm_req_replace(bp, req, fw_msg->msg, fw_msg->msg_len); + if (rc) + return rc; - memcpy(fw_msg->resp, resp, len); + hwrm_req_timeout(bp, req, fw_msg->timeout); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + resp_len = le16_to_cpu(resp->resp_len); + if (resp_len) { + if (fw_msg->resp_max_len < resp_len) + resp_len = fw_msg->resp_max_len; + + memcpy(fw_msg->resp, resp, resp_len); } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c index dd66302343a2..9401936b74fa 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c @@ -15,6 +15,7 @@ #include "bnxt_hsi.h" #include "bnxt.h" +#include "bnxt_hwrm.h" #include "bnxt_vfr.h" #include "bnxt_devlink.h" #include "bnxt_tc.h" @@ -27,38 +28,40 @@ static int hwrm_cfa_vfr_alloc(struct bnxt *bp, u16 vf_idx, u16 *tx_cfa_action, u16 *rx_cfa_code) { - struct hwrm_cfa_vfr_alloc_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_cfa_vfr_alloc_input req = { 0 }; + struct hwrm_cfa_vfr_alloc_output *resp; + struct hwrm_cfa_vfr_alloc_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_VFR_ALLOC, -1, -1); - req.vf_id = cpu_to_le16(vf_idx); - sprintf(req.vfr_name, "vfr%d", vf_idx); - - mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_CFA_VFR_ALLOC); if (!rc) { - *tx_cfa_action = le16_to_cpu(resp->tx_cfa_action); - *rx_cfa_code = le16_to_cpu(resp->rx_cfa_code); - netdev_dbg(bp->dev, "tx_cfa_action=0x%x, rx_cfa_code=0x%x", - *tx_cfa_action, *rx_cfa_code); - } else { - netdev_info(bp->dev, "%s error rc=%d\n", __func__, rc); + req->vf_id = cpu_to_le16(vf_idx); + sprintf(req->vfr_name, "vfr%d", vf_idx); + + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); + if (!rc) { + *tx_cfa_action = le16_to_cpu(resp->tx_cfa_action); + *rx_cfa_code = le16_to_cpu(resp->rx_cfa_code); + netdev_dbg(bp->dev, "tx_cfa_action=0x%x, rx_cfa_code=0x%x", + *tx_cfa_action, *rx_cfa_code); + } + hwrm_req_drop(bp, req); } - - mutex_unlock(&bp->hwrm_cmd_lock); + if (rc) + netdev_info(bp->dev, "%s error rc=%d\n", __func__, rc); return rc; } static int hwrm_cfa_vfr_free(struct bnxt *bp, u16 vf_idx) { - struct hwrm_cfa_vfr_free_input req = { 0 }; + struct hwrm_cfa_vfr_free_input *req; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_VFR_FREE, -1, -1); - sprintf(req.vfr_name, "vfr%d", vf_idx); - - rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_req_init(bp, req, HWRM_CFA_VFR_FREE); + if (!rc) { + sprintf(req->vfr_name, "vfr%d", vf_idx); + rc = hwrm_req_send(bp, req); + } if (rc) netdev_info(bp->dev, "%s error rc=%d\n", __func__, rc); return rc; @@ -67,17 +70,18 @@ static int hwrm_cfa_vfr_free(struct bnxt *bp, u16 vf_idx) static int bnxt_hwrm_vfr_qcfg(struct bnxt *bp, struct bnxt_vf_rep *vf_rep, u16 *max_mtu) { - struct hwrm_func_qcfg_output *resp = bp->hwrm_cmd_resp_addr; - struct hwrm_func_qcfg_input req = {0}; + struct hwrm_func_qcfg_output *resp; + struct hwrm_func_qcfg_input *req; u16 mtu; int rc; - bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QCFG, -1, -1); - req.fid = cpu_to_le16(bp->pf.vf[vf_rep->vf_idx].fw_fid); - - mutex_lock(&bp->hwrm_cmd_lock); + rc = hwrm_req_init(bp, req, HWRM_FUNC_QCFG); + if (rc) + return rc; - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + req->fid = cpu_to_le16(bp->pf.vf[vf_rep->vf_idx].fw_fid); + resp = hwrm_req_hold(bp, req); + rc = hwrm_req_send(bp, req); if (!rc) { mtu = le16_to_cpu(resp->max_mtu_configured); if (!mtu) @@ -85,7 +89,7 @@ static int bnxt_hwrm_vfr_qcfg(struct bnxt *bp, struct bnxt_vf_rep *vf_rep, else *max_mtu = mtu; } - mutex_unlock(&bp->hwrm_cmd_lock); + hwrm_req_drop(bp, req); return rc; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index bee6e091a997..c8083df5e0ab 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -87,7 +87,7 @@ void bnxt_tx_int_xdp(struct bnxt *bp, struct bnxt_napi *bnapi, int nr_pkts) dma_unmap_single(&pdev->dev, dma_unmap_addr(tx_buf, mapping), dma_unmap_len(tx_buf, len), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); xdp_return_frame(tx_buf->xdpf); tx_buf->action = 0; tx_buf->xdpf = NULL; diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index db74241935ab..23c7595d2a1d 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -828,7 +828,9 @@ static void bcmgenet_set_msglevel(struct net_device *dev, u32 level) } static int bcmgenet_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct bcmgenet_priv *priv = netdev_priv(dev); struct bcmgenet_rx_ring *ring; @@ -890,7 +892,9 @@ static void bcmgenet_set_ring_rx_coalesce(struct bcmgenet_rx_ring *ring, } static int bcmgenet_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct bcmgenet_priv *priv = netdev_priv(dev); unsigned int i; @@ -3659,7 +3663,7 @@ static const struct net_device_ops bcmgenet_netdev_ops = { .ndo_tx_timeout = bcmgenet_timeout, .ndo_set_rx_mode = bcmgenet_set_rx_mode, .ndo_set_mac_address = bcmgenet_set_mac_addr, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_set_features = bcmgenet_set_features, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = bcmgenet_poll_controller, @@ -3972,8 +3976,6 @@ static int bcmgenet_probe(struct platform_device *pdev) */ dev->needed_headroom += 64; - netdev_boot_setup_check(dev); - priv->dev = dev; priv->pdev = pdev; diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c index 5b4568c2ad1c..f38f40eb966e 100644 --- a/drivers/net/ethernet/broadcom/sb1250-mac.c +++ b/drivers/net/ethernet/broadcom/sb1250-mac.c @@ -2136,7 +2136,7 @@ static const struct net_device_ops sbmac_netdev_ops = { .ndo_start_xmit = sbmac_start_tx, .ndo_set_rx_mode = sbmac_set_rx_mode, .ndo_tx_timeout = sbmac_tx_timeout, - .ndo_do_ioctl = sbmac_mii_ioctl, + .ndo_eth_ioctl = sbmac_mii_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index b0e49643f483..8a238e349e02 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -6564,10 +6564,8 @@ static void tg3_tx(struct tg3_napi *tnapi) skb_tstamp_tx(skb, ×tamp); } - pci_unmap_single(tp->pdev, - dma_unmap_addr(ri, mapping), - skb_headlen(skb), - PCI_DMA_TODEVICE); + dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), + skb_headlen(skb), DMA_TO_DEVICE); ri->skb = NULL; @@ -6584,10 +6582,10 @@ static void tg3_tx(struct tg3_napi *tnapi) if (unlikely(ri->skb != NULL || sw_idx == hw_idx)) tx_bug = 1; - pci_unmap_page(tp->pdev, + dma_unmap_page(&tp->pdev->dev, dma_unmap_addr(ri, mapping), skb_frag_size(&skb_shinfo(skb)->frags[i]), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); while (ri->fragmented) { ri->fragmented = false; @@ -6646,8 +6644,8 @@ static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz) if (!ri->data) return; - pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping), - map_sz, PCI_DMA_FROMDEVICE); + dma_unmap_single(&tp->pdev->dev, dma_unmap_addr(ri, mapping), map_sz, + DMA_FROM_DEVICE); tg3_frag_free(skb_size <= PAGE_SIZE, ri->data); ri->data = NULL; } @@ -6711,11 +6709,9 @@ static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr, if (!data) return -ENOMEM; - mapping = pci_map_single(tp->pdev, - data + TG3_RX_OFFSET(tp), - data_size, - PCI_DMA_FROMDEVICE); - if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) { + mapping = dma_map_single(&tp->pdev->dev, data + TG3_RX_OFFSET(tp), + data_size, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&tp->pdev->dev, mapping))) { tg3_frag_free(skb_size <= PAGE_SIZE, data); return -EIO; } @@ -6882,8 +6878,8 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) if (skb_size < 0) goto drop_it; - pci_unmap_single(tp->pdev, dma_addr, skb_size, - PCI_DMA_FROMDEVICE); + dma_unmap_single(&tp->pdev->dev, dma_addr, skb_size, + DMA_FROM_DEVICE); /* Ensure that the update to the data happens * after the usage of the old DMA mapping. @@ -6908,11 +6904,13 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget) goto drop_it_no_recycle; skb_reserve(skb, TG3_RAW_IP_ALIGN); - pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&tp->pdev->dev, dma_addr, len, + DMA_FROM_DEVICE); memcpy(skb->data, data + TG3_RX_OFFSET(tp), len); - pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&tp->pdev->dev, dma_addr, + len, DMA_FROM_DEVICE); } skb_put(skb, len); @@ -7762,10 +7760,8 @@ static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last) skb = txb->skb; txb->skb = NULL; - pci_unmap_single(tnapi->tp->pdev, - dma_unmap_addr(txb, mapping), - skb_headlen(skb), - PCI_DMA_TODEVICE); + dma_unmap_single(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping), + skb_headlen(skb), DMA_TO_DEVICE); while (txb->fragmented) { txb->fragmented = false; @@ -7779,9 +7775,9 @@ static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last) entry = NEXT_TX(entry); txb = &tnapi->tx_buffers[entry]; - pci_unmap_page(tnapi->tp->pdev, + dma_unmap_page(&tnapi->tp->pdev->dev, dma_unmap_addr(txb, mapping), - skb_frag_size(frag), PCI_DMA_TODEVICE); + skb_frag_size(frag), DMA_TO_DEVICE); while (txb->fragmented) { txb->fragmented = false; @@ -7816,10 +7812,10 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi, ret = -1; } else { /* New SKB is guaranteed to be linear. */ - new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len, - PCI_DMA_TODEVICE); + new_addr = dma_map_single(&tp->pdev->dev, new_skb->data, + new_skb->len, DMA_TO_DEVICE); /* Make sure the mapping succeeded */ - if (pci_dma_mapping_error(tp->pdev, new_addr)) { + if (dma_mapping_error(&tp->pdev->dev, new_addr)) { dev_kfree_skb_any(new_skb); ret = -1; } else { @@ -8043,8 +8039,9 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev) len = skb_headlen(skb); - mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(tp->pdev, mapping)) + mapping = dma_map_single(&tp->pdev->dev, skb->data, len, + DMA_TO_DEVICE); + if (dma_mapping_error(&tp->pdev->dev, mapping)) goto drop; @@ -13499,8 +13496,8 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback) for (i = data_off; i < tx_len; i++) tx_data[i] = (u8) (i & 0xff); - map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(tp->pdev, map)) { + map = dma_map_single(&tp->pdev->dev, skb->data, tx_len, DMA_TO_DEVICE); + if (dma_mapping_error(&tp->pdev->dev, map)) { dev_kfree_skb(skb); return -EIO; } @@ -13598,8 +13595,8 @@ static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback) } else goto out; - pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&tp->pdev->dev, map, rx_len, + DMA_FROM_DEVICE); rx_data += TG3_RX_OFFSET(tp); for (i = data_off; i < rx_len; i++, val++) { @@ -14040,7 +14037,10 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return -EOPNOTSUPP; } -static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) +static int tg3_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct tg3 *tp = netdev_priv(dev); @@ -14048,7 +14048,10 @@ static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) return 0; } -static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) +static int tg3_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct tg3 *tp = netdev_priv(dev); u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0; @@ -14290,7 +14293,7 @@ static const struct net_device_ops tg3_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = tg3_set_rx_mode, .ndo_set_mac_address = tg3_set_mac_addr, - .ndo_do_ioctl = tg3_ioctl, + .ndo_eth_ioctl = tg3_ioctl, .ndo_tx_timeout = tg3_tx_timeout, .ndo_change_mtu = tg3_change_mtu, .ndo_fix_features = tg3_fix_features, @@ -17755,11 +17758,11 @@ static int tg3_init_one(struct pci_dev *pdev, /* Configure DMA attributes. */ if (dma_mask > DMA_BIT_MASK(32)) { - err = pci_set_dma_mask(pdev, dma_mask); + err = dma_set_mask(&pdev->dev, dma_mask); if (!err) { features |= NETIF_F_HIGHDMA; - err = pci_set_consistent_dma_mask(pdev, - persist_dma_mask); + err = dma_set_coherent_mask(&pdev->dev, + persist_dma_mask); if (err < 0) { dev_err(&pdev->dev, "Unable to obtain 64 bit " "DMA for consistent allocations\n"); @@ -17768,7 +17771,7 @@ static int tg3_init_one(struct pci_dev *pdev, } } if (err || dma_mask == DMA_BIT_MASK(32)) { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c index 265c2fa6bbe0..391b85f25141 100644 --- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c +++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c @@ -307,8 +307,10 @@ bnad_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wolinfo) wolinfo->wolopts = 0; } -static int -bnad_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) +static int bnad_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct bnad *bnad = netdev_priv(netdev); unsigned long flags; @@ -328,8 +330,10 @@ bnad_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) return 0; } -static int -bnad_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coalesce) +static int bnad_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct bnad *bnad = netdev_priv(netdev); unsigned long flags; diff --git a/drivers/net/ethernet/cadence/Kconfig b/drivers/net/ethernet/cadence/Kconfig index e432a68ac520..5b2a461dfd28 100644 --- a/drivers/net/ethernet/cadence/Kconfig +++ b/drivers/net/ethernet/cadence/Kconfig @@ -22,6 +22,7 @@ if NET_VENDOR_CADENCE config MACB tristate "Cadence MACB/GEM support" depends on HAS_DMA && COMMON_CLK + depends on PTP_1588_CLOCK_OPTIONAL select PHYLINK select CRC32 help diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 7d2fe13a52f8..d13fb1d31821 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -3664,7 +3664,7 @@ static const struct net_device_ops macb_netdev_ops = { .ndo_start_xmit = macb_start_xmit, .ndo_set_rx_mode = macb_set_rx_mode, .ndo_get_stats = macb_get_stats, - .ndo_do_ioctl = macb_ioctl, + .ndo_eth_ioctl = macb_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = macb_change_mtu, .ndo_set_mac_address = eth_mac_addr, @@ -4323,7 +4323,7 @@ static const struct net_device_ops at91ether_netdev_ops = { .ndo_get_stats = macb_get_stats, .ndo_set_rx_mode = macb_set_rx_mode, .ndo_set_mac_address = eth_mac_addr, - .ndo_do_ioctl = macb_ioctl, + .ndo_eth_ioctl = macb_ioctl, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = at91ether_poll_controller, @@ -4533,6 +4533,14 @@ static const struct macb_config sama5d2_config = { .usrio = &macb_default_usrio, }; +static const struct macb_config sama5d29_config = { + .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_GEM_HAS_PTP, + .dma_burst_length = 16, + .clk_init = macb_clk_init, + .init = macb_init, + .usrio = &macb_default_usrio, +}; + static const struct macb_config sama5d3_config = { .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO, @@ -4610,6 +4618,7 @@ static const struct of_device_id macb_dt_ids[] = { { .compatible = "cdns,gem", .data = &pc302gem_config }, { .compatible = "cdns,sam9x60-macb", .data = &at91sam9260_config }, { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config }, + { .compatible = "atmel,sama5d29-gem", .data = &sama5d29_config }, { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config }, { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config }, { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config }, diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c index 5c368a9cbbbc..c2e1f163bb14 100644 --- a/drivers/net/ethernet/cadence/macb_ptp.c +++ b/drivers/net/ethernet/cadence/macb_ptp.c @@ -275,6 +275,12 @@ void gem_ptp_rxstamp(struct macb *bp, struct sk_buff *skb, if (GEM_BFEXT(DMA_RXVALID, desc->addr)) { desc_ptp = macb_ptp_desc(bp, desc); + /* Unlikely but check */ + if (!desc_ptp) { + dev_warn_ratelimited(&bp->pdev->dev, + "Timestamp not supported in BD\n"); + return; + } gem_hw_timestamp(bp, desc_ptp->ts_1, desc_ptp->ts_2, &ts); memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ts.tv_nsec); @@ -307,8 +313,11 @@ int gem_ptp_txstamp(struct macb_queue *queue, struct sk_buff *skb, if (CIRC_SPACE(head, tail, PTP_TS_BUFFER_SIZE) == 0) return -ENOMEM; - skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; desc_ptp = macb_ptp_desc(queue->bp, desc); + /* Unlikely but check */ + if (!desc_ptp) + return -EINVAL; + skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; tx_timestamp = &queue->tx_timestamps[head]; tx_timestamp->skb = skb; /* ensure ts_1/ts_2 is loaded after ctrl (TX_USED check) */ diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig index 4875cdae622e..1c76c95b0b27 100644 --- a/drivers/net/ethernet/cavium/Kconfig +++ b/drivers/net/ethernet/cavium/Kconfig @@ -66,7 +66,7 @@ config LIQUIDIO tristate "Cavium LiquidIO support" depends on 64BIT && PCI depends on PCI - imply PTP_1588_CLOCK + depends on PTP_1588_CLOCK_OPTIONAL select FW_LOADER select LIBCRC32C select NET_DEVLINK @@ -91,7 +91,7 @@ config OCTEON_MGMT_ETHERNET config LIQUIDIO_VF tristate "Cavium LiquidIO VF support" depends on 64BIT && PCI_MSI - imply PTP_1588_CLOCK + depends on PTP_1588_CLOCK_OPTIONAL help This driver supports Cavium LiquidIO Intelligent Server Adapter based on CN23XX chips. diff --git a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c index 66f2c553370c..2b9747867d4c 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_ethtool.c @@ -2108,7 +2108,9 @@ static int octnet_set_intrmod_cfg(struct lio *lio, } static int lio_get_intr_coalesce(struct net_device *netdev, - struct ethtool_coalesce *intr_coal) + struct ethtool_coalesce *intr_coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct lio *lio = GET_LIO(netdev); struct octeon_device *oct = lio->oct_dev; @@ -2412,7 +2414,9 @@ oct_cfg_tx_intrcnt(struct lio *lio, } static int lio_set_intr_coalesce(struct net_device *netdev, - struct ethtool_coalesce *intr_coal) + struct ethtool_coalesce *intr_coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct lio *lio = GET_LIO(netdev); int ret; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 591229b96257..2907e13b9df6 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -1457,7 +1457,7 @@ static void free_netsgbuf(void *buf) while (frags--) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; - pci_unmap_page((lio->oct_dev)->pci_dev, + dma_unmap_page(&lio->oct_dev->pci_dev->dev, g->sg[(i >> 2)].ptr[(i & 3)], skb_frag_size(frag), DMA_TO_DEVICE); i++; @@ -1500,7 +1500,7 @@ static void free_netsgbuf_with_resp(void *buf) while (frags--) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; - pci_unmap_page((lio->oct_dev)->pci_dev, + dma_unmap_page(&lio->oct_dev->pci_dev->dev, g->sg[(i >> 2)].ptr[(i & 3)], skb_frag_size(frag), DMA_TO_DEVICE); i++; @@ -3223,7 +3223,7 @@ static const struct net_device_ops lionetdevops = { .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid, .ndo_change_mtu = liquidio_change_mtu, - .ndo_do_ioctl = liquidio_ioctl, + .ndo_eth_ioctl = liquidio_ioctl, .ndo_fix_features = liquidio_fix_features, .ndo_set_features = liquidio_set_features, .ndo_set_vf_mac = liquidio_set_vf_mac, @@ -3750,7 +3750,8 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) } devlink = devlink_alloc(&liquidio_devlink_ops, - sizeof(struct lio_devlink_priv)); + sizeof(struct lio_devlink_priv), + &octeon_dev->pci_dev->dev); if (!devlink) { dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n"); goto setup_nic_dev_free; @@ -3759,7 +3760,7 @@ static int setup_nic_devices(struct octeon_device *octeon_dev) lio_devlink = devlink_priv(devlink); lio_devlink->oct = octeon_dev; - if (devlink_register(devlink, &octeon_dev->pci_dev->dev)) { + if (devlink_register(devlink)) { devlink_free(devlink); dev_err(&octeon_dev->pci_dev->dev, "devlink registration failed\n"); diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index ffddb3126a32..c6fe0f2a4d0e 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -843,7 +843,7 @@ static void free_netsgbuf(void *buf) while (frags--) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; - pci_unmap_page((lio->oct_dev)->pci_dev, + dma_unmap_page(&lio->oct_dev->pci_dev->dev, g->sg[(i >> 2)].ptr[(i & 3)], skb_frag_size(frag), DMA_TO_DEVICE); i++; @@ -887,7 +887,7 @@ static void free_netsgbuf_with_resp(void *buf) while (frags--) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; - pci_unmap_page((lio->oct_dev)->pci_dev, + dma_unmap_page(&lio->oct_dev->pci_dev->dev, g->sg[(i >> 2)].ptr[(i & 3)], skb_frag_size(frag), DMA_TO_DEVICE); i++; @@ -1889,7 +1889,7 @@ static const struct net_device_ops lionetdevops = { .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid, .ndo_change_mtu = liquidio_change_mtu, - .ndo_do_ioctl = liquidio_ioctl, + .ndo_eth_ioctl = liquidio_ioctl, .ndo_fix_features = liquidio_fix_features, .ndo_set_features = liquidio_set_features, }; diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index 48ff6fb0eed9..30463a6d1f8c 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -1373,7 +1373,7 @@ static const struct net_device_ops octeon_mgmt_ops = { .ndo_start_xmit = octeon_mgmt_xmit, .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering, .ndo_set_mac_address = octeon_mgmt_set_mac_address, - .ndo_do_ioctl = octeon_mgmt_ioctl, + .ndo_eth_ioctl = octeon_mgmt_ioctl, .ndo_change_mtu = octeon_mgmt_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = octeon_mgmt_poll_controller, diff --git a/drivers/net/ethernet/cavium/thunder/nic_main.c b/drivers/net/ethernet/cavium/thunder/nic_main.c index 9361f964bb9b..691e1475d55e 100644 --- a/drivers/net/ethernet/cavium/thunder/nic_main.c +++ b/drivers/net/ethernet/cavium/thunder/nic_main.c @@ -1322,18 +1322,12 @@ static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_disable_device; } - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)); if (err) { dev_err(dev, "Unable to get usable DMA configuration\n"); goto err_release_regions; } - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48)); - if (err) { - dev_err(dev, "Unable to get 48-bit DMA for consistent allocations\n"); - goto err_release_regions; - } - /* MAP PF's configuration registers */ nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0); if (!nic->reg_base) { diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index 2f218fbfed06..7f2882109b16 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c @@ -456,7 +456,9 @@ static void nicvf_get_regs(struct net_device *dev, } static int nicvf_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *cmd) + struct ethtool_coalesce *cmd, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct nicvf *nic = netdev_priv(netdev); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index e2b290135fd9..d1667b759522 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -2096,7 +2096,7 @@ static const struct net_device_ops nicvf_netdev_ops = { .ndo_fix_features = nicvf_fix_features, .ndo_set_features = nicvf_set_features, .ndo_bpf = nicvf_xdp, - .ndo_do_ioctl = nicvf_ioctl, + .ndo_eth_ioctl = nicvf_ioctl, .ndo_set_rx_mode = nicvf_set_rx_mode, }; @@ -2130,18 +2130,12 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) goto err_disable_device; } - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)); if (err) { dev_err(dev, "Unable to get usable DMA configuration\n"); goto err_release_regions; } - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48)); - if (err) { - dev_err(dev, "unable to get 48-bit DMA for consistent allocations\n"); - goto err_release_regions; - } - qcount = netif_get_num_default_rss_queues(); /* Restrict multiqset support only for host bound VFs */ diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig index 8ba0e08e5e64..c931ec8cac40 100644 --- a/drivers/net/ethernet/chelsio/Kconfig +++ b/drivers/net/ethernet/chelsio/Kconfig @@ -69,6 +69,7 @@ config CHELSIO_T3 config CHELSIO_T4 tristate "Chelsio Communications T4/T5/T6 Ethernet support" depends on PCI && (IPV6 || IPV6=n) && (TLS || TLS=n) + depends on PTP_1588_CLOCK_OPTIONAL select FW_LOADER select MDIO select ZLIB_DEFLATE diff --git a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c index 512da98019c6..73c016166f06 100644 --- a/drivers/net/ethernet/chelsio/cxgb/cxgb2.c +++ b/drivers/net/ethernet/chelsio/cxgb/cxgb2.c @@ -748,7 +748,9 @@ static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) return 0; } -static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) +static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct adapter *adapter = dev->ml_priv; @@ -759,7 +761,9 @@ static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) return 0; } -static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) +static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct adapter *adapter = dev->ml_priv; @@ -924,7 +928,7 @@ static const struct net_device_ops cxgb_netdev_ops = { .ndo_get_stats = t1_get_stats, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = t1_set_rxmode, - .ndo_do_ioctl = t1_ioctl, + .ndo_eth_ioctl = t1_ioctl, .ndo_change_mtu = t1_change_mtu, .ndo_set_mac_address = t1_set_mac_addr, .ndo_fix_features = t1_fix_features, diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index 57f210c53afc..38e47703f9ab 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -1996,7 +1996,9 @@ static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e) return 0; } -static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) +static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; @@ -2017,7 +2019,9 @@ static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c) return 0; } -static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) +static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; @@ -2135,13 +2139,18 @@ static int in_range(int val, int lo, int hi) return val < 0 || (val <= hi && val >= lo); } -static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr) +static int cxgb_siocdevprivate(struct net_device *dev, + struct ifreq *ifreq, + void __user *useraddr, + int cmd) { struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; - u32 cmd; int ret; + if (cmd != SIOCCHIOCTL) + return -EOPNOTSUPP; + if (copy_from_user(&cmd, useraddr, sizeof(cmd))) return -EFAULT; @@ -2546,8 +2555,6 @@ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd) fallthrough; case SIOCGMIIPHY: return mdio_mii_ioctl(&pi->phy.mdio, data, cmd); - case SIOCCHIOCTL: - return cxgb_extension_ioctl(dev, req->ifr_data); default: return -EOPNOTSUPP; } @@ -3181,7 +3188,8 @@ static const struct net_device_ops cxgb_netdev_ops = { .ndo_get_stats = cxgb_get_stats, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = cxgb_set_rxmode, - .ndo_do_ioctl = cxgb_ioctl, + .ndo_eth_ioctl = cxgb_ioctl, + .ndo_siocdevprivate = cxgb_siocdevprivate, .ndo_change_mtu = cxgb_change_mtu, .ndo_set_mac_address = cxgb_set_mac_addr, .ndo_fix_features = cxgb_fix_features, @@ -3231,15 +3239,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) goto out_disable_device; } - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { pci_using_dac = 1; - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " - "coherent allocations\n"); - goto out_release_regions; - } - } else if ((err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) { + } else if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) != 0) { dev_err(&pdev->dev, "no usable DMA configuration\n"); goto out_release_regions; } diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c index cb5c79c43bc9..e21a2e691382 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c @@ -244,8 +244,8 @@ static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q, frag_idx = d->fragidx; if (frag_idx == 0 && skb_headlen(skb)) { - pci_unmap_single(pdev, be64_to_cpu(sgp->addr[0]), - skb_headlen(skb), PCI_DMA_TODEVICE); + dma_unmap_single(&pdev->dev, be64_to_cpu(sgp->addr[0]), + skb_headlen(skb), DMA_TO_DEVICE); j = 1; } @@ -253,9 +253,9 @@ static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q, nfrags = skb_shinfo(skb)->nr_frags; while (frag_idx < nfrags && curflit < WR_FLITS) { - pci_unmap_page(pdev, be64_to_cpu(sgp->addr[j]), + dma_unmap_page(&pdev->dev, be64_to_cpu(sgp->addr[j]), skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); j ^= 1; if (j == 0) { sgp++; @@ -355,15 +355,14 @@ static void clear_rx_desc(struct pci_dev *pdev, const struct sge_fl *q, if (q->use_pages && d->pg_chunk.page) { (*d->pg_chunk.p_cnt)--; if (!*d->pg_chunk.p_cnt) - pci_unmap_page(pdev, - d->pg_chunk.mapping, - q->alloc_size, PCI_DMA_FROMDEVICE); + dma_unmap_page(&pdev->dev, d->pg_chunk.mapping, + q->alloc_size, DMA_FROM_DEVICE); put_page(d->pg_chunk.page); d->pg_chunk.page = NULL; } else { - pci_unmap_single(pdev, dma_unmap_addr(d, dma_addr), - q->buf_size, PCI_DMA_FROMDEVICE); + dma_unmap_single(&pdev->dev, dma_unmap_addr(d, dma_addr), + q->buf_size, DMA_FROM_DEVICE); kfree_skb(d->skb); d->skb = NULL; } @@ -414,8 +413,8 @@ static inline int add_one_rx_buf(void *va, unsigned int len, { dma_addr_t mapping; - mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE); - if (unlikely(pci_dma_mapping_error(pdev, mapping))) + mapping = dma_map_single(&pdev->dev, va, len, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&pdev->dev, mapping))) return -ENOMEM; dma_unmap_addr_set(sd, dma_addr, mapping); @@ -453,9 +452,9 @@ static int alloc_pg_chunk(struct adapter *adapter, struct sge_fl *q, q->pg_chunk.p_cnt = q->pg_chunk.va + (PAGE_SIZE << order) - SGE_PG_RSVD; q->pg_chunk.offset = 0; - mapping = pci_map_page(adapter->pdev, q->pg_chunk.page, - 0, q->alloc_size, PCI_DMA_FROMDEVICE); - if (unlikely(pci_dma_mapping_error(adapter->pdev, mapping))) { + mapping = dma_map_page(&adapter->pdev->dev, q->pg_chunk.page, + 0, q->alloc_size, DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&adapter->pdev->dev, mapping))) { __free_pages(q->pg_chunk.page, order); q->pg_chunk.page = NULL; return -EIO; @@ -522,9 +521,9 @@ nomem: q->alloc_failed++; dma_unmap_addr_set(sd, dma_addr, mapping); add_one_rx_chunk(mapping, d, q->gen); - pci_dma_sync_single_for_device(adap->pdev, mapping, - q->buf_size - SGE_PG_RSVD, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&adap->pdev->dev, mapping, + q->buf_size - SGE_PG_RSVD, + DMA_FROM_DEVICE); } else { void *buf_start; @@ -793,13 +792,13 @@ static struct sk_buff *get_packet(struct adapter *adap, struct sge_fl *fl, skb = alloc_skb(len, GFP_ATOMIC); if (likely(skb != NULL)) { __skb_put(skb, len); - pci_dma_sync_single_for_cpu(adap->pdev, - dma_unmap_addr(sd, dma_addr), len, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&adap->pdev->dev, + dma_unmap_addr(sd, dma_addr), + len, DMA_FROM_DEVICE); memcpy(skb->data, sd->skb->data, len); - pci_dma_sync_single_for_device(adap->pdev, - dma_unmap_addr(sd, dma_addr), len, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&adap->pdev->dev, + dma_unmap_addr(sd, dma_addr), + len, DMA_FROM_DEVICE); } else if (!drop_thres) goto use_orig_buf; recycle: @@ -813,8 +812,8 @@ recycle: goto recycle; use_orig_buf: - pci_unmap_single(adap->pdev, dma_unmap_addr(sd, dma_addr), - fl->buf_size, PCI_DMA_FROMDEVICE); + dma_unmap_single(&adap->pdev->dev, dma_unmap_addr(sd, dma_addr), + fl->buf_size, DMA_FROM_DEVICE); skb = sd->skb; skb_put(skb, len); __refill_fl(adap, fl); @@ -854,12 +853,11 @@ static struct sk_buff *get_packet_pg(struct adapter *adap, struct sge_fl *fl, newskb = alloc_skb(len, GFP_ATOMIC); if (likely(newskb != NULL)) { __skb_put(newskb, len); - pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&adap->pdev->dev, dma_addr, + len, DMA_FROM_DEVICE); memcpy(newskb->data, sd->pg_chunk.va, len); - pci_dma_sync_single_for_device(adap->pdev, dma_addr, - len, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&adap->pdev->dev, dma_addr, + len, DMA_FROM_DEVICE); } else if (!drop_thres) return NULL; recycle: @@ -883,14 +881,12 @@ recycle: goto recycle; } - pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&adap->pdev->dev, dma_addr, len, + DMA_FROM_DEVICE); (*sd->pg_chunk.p_cnt)--; if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page) - pci_unmap_page(adap->pdev, - sd->pg_chunk.mapping, - fl->alloc_size, - PCI_DMA_FROMDEVICE); + dma_unmap_page(&adap->pdev->dev, sd->pg_chunk.mapping, + fl->alloc_size, DMA_FROM_DEVICE); if (!skb) { __skb_put(newskb, SGE_RX_PULL_LEN); memcpy(newskb->data, sd->pg_chunk.va, SGE_RX_PULL_LEN); @@ -968,9 +964,9 @@ static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb, const struct skb_shared_info *si; if (skb_headlen(skb)) { - *addr = pci_map_single(pdev, skb->data, skb_headlen(skb), - PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(pdev, *addr)) + *addr = dma_map_single(&pdev->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, *addr)) goto out_err; addr++; } @@ -981,7 +977,7 @@ static int map_skb(struct pci_dev *pdev, const struct sk_buff *skb, for (fp = si->frags; fp < end; fp++) { *addr = skb_frag_dma_map(&pdev->dev, fp, 0, skb_frag_size(fp), DMA_TO_DEVICE); - if (pci_dma_mapping_error(pdev, *addr)) + if (dma_mapping_error(&pdev->dev, *addr)) goto unwind; addr++; } @@ -992,7 +988,8 @@ unwind: dma_unmap_page(&pdev->dev, *--addr, skb_frag_size(fp), DMA_TO_DEVICE); - pci_unmap_single(pdev, addr[-1], skb_headlen(skb), PCI_DMA_TODEVICE); + dma_unmap_single(&pdev->dev, addr[-1], skb_headlen(skb), + DMA_TO_DEVICE); out_err: return -ENOMEM; } @@ -1592,13 +1589,14 @@ static void deferred_unmap_destructor(struct sk_buff *skb) p = dui->addr; if (skb_tail_pointer(skb) - skb_transport_header(skb)) - pci_unmap_single(dui->pdev, *p++, skb_tail_pointer(skb) - - skb_transport_header(skb), PCI_DMA_TODEVICE); + dma_unmap_single(&dui->pdev->dev, *p++, + skb_tail_pointer(skb) - skb_transport_header(skb), + DMA_TO_DEVICE); si = skb_shinfo(skb); for (i = 0; i < si->nr_frags; i++) - pci_unmap_page(dui->pdev, *p++, skb_frag_size(&si->frags[i]), - PCI_DMA_TODEVICE); + dma_unmap_page(&dui->pdev->dev, *p++, + skb_frag_size(&si->frags[i]), DMA_TO_DEVICE); } static void setup_deferred_unmapping(struct sk_buff *skb, struct pci_dev *pdev, @@ -2153,17 +2151,14 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs, fl->credits--; - pci_dma_sync_single_for_cpu(adap->pdev, - dma_unmap_addr(sd, dma_addr), - fl->buf_size - SGE_PG_RSVD, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&adap->pdev->dev, + dma_unmap_addr(sd, dma_addr), + fl->buf_size - SGE_PG_RSVD, DMA_FROM_DEVICE); (*sd->pg_chunk.p_cnt)--; if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page) - pci_unmap_page(adap->pdev, - sd->pg_chunk.mapping, - fl->alloc_size, - PCI_DMA_FROMDEVICE); + dma_unmap_page(&adap->pdev->dev, sd->pg_chunk.mapping, + fl->alloc_size, DMA_FROM_DEVICE); if (!skb) { put_page(sd->pg_chunk.page); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c index 83ed10ac8660..5903bdb78916 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_ethtool.c @@ -1147,7 +1147,9 @@ static int set_dbqtimer_tickval(struct net_device *dev, } static int set_coalesce(struct net_device *dev, - struct ethtool_coalesce *coalesce) + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { int ret; @@ -1163,7 +1165,9 @@ static int set_coalesce(struct net_device *dev, coalesce->tx_coalesce_usecs); } -static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) +static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { const struct port_info *pi = netdev_priv(dev); const struct adapter *adap = pi->adapter; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index 6260b3bebd2b..786ceae34488 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -1441,7 +1441,7 @@ static int cxgb4_set_hash_filter(struct net_device *dev, } else if (iconf & USE_ENC_IDX_F) { if (f->fs.val.encap_vld) { struct port_info *pi = netdev_priv(f->dev); - u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 }; + static const u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 }; /* allocate MPS TCAM entry */ ret = t4_alloc_encap_mac_filt(adapter, pi->viid, @@ -1688,7 +1688,7 @@ int __cxgb4_set_filter(struct net_device *dev, int ftid, } else if (iconf & USE_ENC_IDX_F) { if (f->fs.val.encap_vld) { struct port_info *pi = netdev_priv(f->dev); - u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 }; + static const u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 }; /* allocate MPS TCAM entry */ ret = t4_alloc_encap_mac_filt(adapter, pi->viid, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index dbf9a0e6601d..0d9cda4ab303 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -3872,7 +3872,7 @@ static const struct net_device_ops cxgb4_netdev_ops = { .ndo_set_mac_address = cxgb_set_mac_addr, .ndo_set_features = cxgb_set_features, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = cxgb_ioctl, + .ndo_eth_ioctl = cxgb_ioctl, .ndo_change_mtu = cxgb_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = cxgb_netpoll, @@ -4008,7 +4008,7 @@ static void adap_free_hma_mem(struct adapter *adapter) if (adapter->hma.flags & HMA_DMA_MAPPED_FLAG) { dma_unmap_sg(adapter->pdev_dev, adapter->hma.sgt->sgl, - adapter->hma.sgt->nents, PCI_DMA_BIDIRECTIONAL); + adapter->hma.sgt->nents, DMA_BIDIRECTIONAL); adapter->hma.flags &= ~HMA_DMA_MAPPED_FLAG; } @@ -5068,6 +5068,7 @@ static int adap_init0(struct adapter *adap, int vpd_skip) ret = -ENOMEM; goto bye; } + bitmap_zero(adap->sge.blocked_fl, adap->sge.egr_sz); #endif params[0] = FW_PARAM_PFVF(CLIP_START); @@ -6162,8 +6163,7 @@ static void print_port_info(const struct net_device *dev) --bufp; sprintf(bufp, "BASE-%s", t4_get_port_type_description(pi->port_type)); - netdev_info(dev, "%s: Chelsio %s (%s) %s\n", - dev->name, adap->params.vpd.id, adap->name, buf); + netdev_info(dev, "Chelsio %s %s\n", adap->params.vpd.id, buf); } /* @@ -6687,16 +6687,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) return 0; } - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { highdma = true; - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - dev_err(&pdev->dev, "unable to obtain 64-bit DMA for " - "coherent allocations\n"); - goto out_free_adapter; - } } else { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "no usable DMA configuration\n"); goto out_free_adapter; @@ -6788,13 +6782,11 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) setup_memwin(adapter); err = adap_init0(adapter, 0); -#ifdef CONFIG_DEBUG_FS - bitmap_zero(adapter->sge.blocked_fl, adapter->sge.egr_sz); -#endif - setup_memwin_rdma(adapter); if (err) goto out_unmap_bar; + setup_memwin_rdma(adapter); + /* configure SGE_STAT_CFG_A to read WC stats */ if (!is_t4(adapter->params.chip)) t4_write_reg(adapter, SGE_STAT_CFG_A, STATSOURCE_T5_V(7) | diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c index 6a099cb34b12..fa5b596ff23a 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c @@ -443,7 +443,7 @@ static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n) if (is_buf_mapped(d)) dma_unmap_page(adap->pdev_dev, get_buf_addr(d), get_buf_size(adap, d), - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); put_page(d->page); d->page = NULL; if (++q->cidx == q->size) @@ -469,7 +469,7 @@ static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q) if (is_buf_mapped(d)) dma_unmap_page(adap->pdev_dev, get_buf_addr(d), - get_buf_size(adap, d), PCI_DMA_FROMDEVICE); + get_buf_size(adap, d), DMA_FROM_DEVICE); d->page = NULL; if (++q->cidx == q->size) q->cidx = 0; @@ -566,7 +566,7 @@ static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n, mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE << s->fl_pg_order, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { __free_pages(pg, s->fl_pg_order); q->mapping_err++; @@ -596,7 +596,7 @@ alloc_small_pages: } mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) { put_page(pg); q->mapping_err++; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 2820a0bb971b..49b76fd47daa 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -1647,7 +1647,9 @@ static int cxgb4vf_set_ringparam(struct net_device *dev, * interrupt holdoff timer to be read on all of the device's Queue Sets. */ static int cxgb4vf_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *coalesce) + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { const struct port_info *pi = netdev_priv(dev); const struct adapter *adapter = pi->adapter; @@ -1667,7 +1669,9 @@ static int cxgb4vf_get_coalesce(struct net_device *dev, * the interrupt holdoff timer on any of the device's Queue Sets. */ static int cxgb4vf_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *coalesce) + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { const struct port_info *pi = netdev_priv(dev); struct adapter *adapter = pi->adapter; @@ -2837,7 +2841,7 @@ static const struct net_device_ops cxgb4vf_netdev_ops = { .ndo_set_rx_mode = cxgb4vf_set_rxmode, .ndo_set_mac_address = cxgb4vf_set_mac_addr, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = cxgb4vf_do_ioctl, + .ndo_eth_ioctl = cxgb4vf_do_ioctl, .ndo_change_mtu = cxgb4vf_change_mtu, .ndo_fix_features = cxgb4vf_fix_features, .ndo_set_features = cxgb4vf_set_features, @@ -2917,17 +2921,11 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, * Set up our DMA mask: try for 64-bit address masking first and * fall back to 32-bit if we can't get 64 bits ... */ - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err == 0) { - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - dev_err(&pdev->dev, "unable to obtain 64-bit DMA for" - " coherent allocations\n"); - goto err_release_regions; - } pci_using_dac = 1; } else { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err != 0) { dev_err(&pdev->dev, "no usable DMA configuration\n"); goto err_release_regions; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c index 7bc80eeb2c21..0295b2406646 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c @@ -478,7 +478,7 @@ static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n) if (is_buf_mapped(sdesc)) dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), get_buf_size(adapter, sdesc), - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); put_page(sdesc->page); sdesc->page = NULL; if (++fl->cidx == fl->size) @@ -507,7 +507,7 @@ static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl) if (is_buf_mapped(sdesc)) dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), get_buf_size(adapter, sdesc), - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); sdesc->page = NULL; if (++fl->cidx == fl->size) fl->cidx = 0; @@ -644,7 +644,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE << s->fl_pg_order, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) { /* * We've run out of DMA mapping space. Free up the @@ -682,7 +682,7 @@ alloc_small_pages: poison_buf(page, PAGE_SIZE); dma_addr = dma_map_page(adapter->pdev_dev, page, 0, PAGE_SIZE, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) { put_page(page); break; diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig index d8af9e64dd1e..dac1764ba740 100644 --- a/drivers/net/ethernet/cirrus/Kconfig +++ b/drivers/net/ethernet/cirrus/Kconfig @@ -6,7 +6,7 @@ config NET_VENDOR_CIRRUS bool "Cirrus devices" default y - depends on ISA || EISA || ARM || MAC + depends on ISA || EISA || ARM || MAC || COMPILE_TEST help If you have a network (Ethernet) card belonging to this class, say Y. @@ -18,9 +18,16 @@ config NET_VENDOR_CIRRUS if NET_VENDOR_CIRRUS config CS89x0 - tristate "CS89x0 support" - depends on ISA || EISA || ARM + tristate + +config CS89x0_ISA + tristate "CS89x0 ISA driver support" + depends on HAS_IOPORT_MAP + depends on ISA depends on !PPC32 + depends on CS89x0_PLATFORM=n + select NETDEV_LEGACY_INIT + select CS89x0 help Support for CS89x0 chipset based Ethernet cards. If you have a network (Ethernet) card of this type, say Y and read the file @@ -30,15 +37,15 @@ config CS89x0 will be called cs89x0. config CS89x0_PLATFORM - bool "CS89x0 platform driver support" if HAS_IOPORT_MAP - default !HAS_IOPORT_MAP - depends on CS89x0 + tristate "CS89x0 platform driver support" + depends on ARM || COMPILE_TEST + select CS89x0 help - Say Y to compile the cs89x0 driver as a platform driver. This - makes this driver suitable for use on certain evaluation boards - such as the iMX21ADS. + Say Y to compile the cs89x0 platform driver. This makes this driver + suitable for use on certain evaluation boards such as the iMX21ADS. - If you are unsure, say N. + To compile this driver as a module, choose M here. The module + will be called cs89x0. config EP93XX_ETH tristate "EP93xx Ethernet support" diff --git a/drivers/net/ethernet/cirrus/cs89x0.c b/drivers/net/ethernet/cirrus/cs89x0.c index 33ace3307059..d0c4c8b7a15a 100644 --- a/drivers/net/ethernet/cirrus/cs89x0.c +++ b/drivers/net/ethernet/cirrus/cs89x0.c @@ -104,7 +104,7 @@ static char version[] __initdata = * them to system IRQ numbers. This mapping is card specific and is set to * the configuration of the Cirrus Eval board for this chip. */ -#ifndef CONFIG_CS89x0_PLATFORM +#if IS_ENABLED(CONFIG_CS89x0_ISA) static unsigned int netcard_portlist[] __used __initdata = { 0x300, 0x320, 0x340, 0x360, 0x200, 0x220, 0x240, 0x260, 0x280, 0x2a0, 0x2c0, 0x2e0, 0 @@ -292,7 +292,7 @@ write_irq(struct net_device *dev, int chip_type, int irq) int i; if (chip_type == CS8900) { -#ifndef CONFIG_CS89x0_PLATFORM +#if IS_ENABLED(CONFIG_CS89x0_ISA) /* Search the mapping table for the corresponding IRQ pin. */ for (i = 0; i != ARRAY_SIZE(cs8900_irq_map); i++) if (cs8900_irq_map[i] == irq) @@ -859,7 +859,7 @@ net_open(struct net_device *dev) goto bad_out; } } else { -#if !defined(CONFIG_CS89x0_PLATFORM) +#if IS_ENABLED(CONFIG_CS89x0_ISA) if (((1 << dev->irq) & lp->irq_map) == 0) { pr_err("%s: IRQ %d is not in our map of allowable IRQs, which is %x\n", dev->name, dev->irq, lp->irq_map); @@ -1523,7 +1523,7 @@ cs89x0_probe1(struct net_device *dev, void __iomem *ioaddr, int modular) dev->irq = i; } else { i = lp->isa_config & INT_NO_MASK; -#ifndef CONFIG_CS89x0_PLATFORM +#if IS_ENABLED(CONFIG_CS89x0_ISA) if (lp->chip_type == CS8900) { /* Translate the IRQ using the IRQ mapping table. */ if (i >= ARRAY_SIZE(cs8900_irq_map)) @@ -1576,7 +1576,7 @@ out1: return retval; } -#ifndef CONFIG_CS89x0_PLATFORM +#if IS_ENABLED(CONFIG_CS89x0_ISA) /* * This function converts the I/O port address used by the cs89x0_probe() and * init_module() functions to the I/O memory address used by the @@ -1682,11 +1682,7 @@ out: pr_warn("no cs8900 or cs8920 detected. Be sure to disable PnP with SETUP\n"); return ERR_PTR(err); } -#endif -#endif - -#if defined(MODULE) && !defined(CONFIG_CS89x0_PLATFORM) - +#else static struct net_device *dev_cs89x0; /* Support the 'debug' module parm even if we're compiled for non-debug to @@ -1757,9 +1753,9 @@ MODULE_LICENSE("GPL"); * (hw or software util) */ -int __init init_module(void) +static int __init cs89x0_isa_init_module(void) { - struct net_device *dev = alloc_etherdev(sizeof(struct net_local)); + struct net_device *dev; struct net_local *lp; int ret = 0; @@ -1768,6 +1764,7 @@ int __init init_module(void) #else debug = 0; #endif + dev = alloc_etherdev(sizeof(struct net_local)); if (!dev) return -ENOMEM; @@ -1826,9 +1823,9 @@ out: free_netdev(dev); return ret; } +module_init(cs89x0_isa_init_module); -void __exit -cleanup_module(void) +static void __exit cs89x0_isa_cleanup_module(void) { struct net_local *lp = netdev_priv(dev_cs89x0); @@ -1838,9 +1835,11 @@ cleanup_module(void) release_region(dev_cs89x0->base_addr, NETCARD_IO_EXTENT); free_netdev(dev_cs89x0); } -#endif /* MODULE && !CONFIG_CS89x0_PLATFORM */ +module_exit(cs89x0_isa_cleanup_module); +#endif /* MODULE */ +#endif /* CONFIG_CS89x0_ISA */ -#ifdef CONFIG_CS89x0_PLATFORM +#if IS_ENABLED(CONFIG_CS89x0_PLATFORM) static int __init cs89x0_platform_probe(struct platform_device *pdev) { struct net_device *dev = alloc_etherdev(sizeof(struct net_local)); diff --git a/drivers/net/ethernet/cirrus/ep93xx_eth.c b/drivers/net/ethernet/cirrus/ep93xx_eth.c index 9f5e5ec69991..072fac5f5d24 100644 --- a/drivers/net/ethernet/cirrus/ep93xx_eth.c +++ b/drivers/net/ethernet/cirrus/ep93xx_eth.c @@ -733,7 +733,7 @@ static const struct net_device_ops ep93xx_netdev_ops = { .ndo_open = ep93xx_open, .ndo_stop = ep93xx_close, .ndo_start_xmit = ep93xx_xmit, - .ndo_do_ioctl = ep93xx_ioctl, + .ndo_eth_ioctl = ep93xx_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, }; diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c index 1a9803f2073e..12ffc14fbecd 100644 --- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c @@ -298,7 +298,9 @@ static void enic_set_msglevel(struct net_device *netdev, u32 value) } static int enic_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ecmd) + struct ethtool_coalesce *ecmd, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct enic *enic = netdev_priv(netdev); struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting; @@ -343,7 +345,9 @@ static int enic_coalesce_valid(struct enic *enic, } static int enic_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ecmd) + struct ethtool_coalesce *ecmd, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct enic *enic = netdev_priv(netdev); u32 tx_coalesce_usecs; diff --git a/drivers/net/ethernet/cortina/gemini.c b/drivers/net/ethernet/cortina/gemini.c index c2ebb3388789..6e745ca4c433 100644 --- a/drivers/net/ethernet/cortina/gemini.c +++ b/drivers/net/ethernet/cortina/gemini.c @@ -2144,7 +2144,9 @@ static int gmac_set_ringparam(struct net_device *netdev, } static int gmac_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ecmd) + struct ethtool_coalesce *ecmd, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct gemini_ethernet_port *port = netdev_priv(netdev); @@ -2156,7 +2158,9 @@ static int gmac_get_coalesce(struct net_device *netdev, } static int gmac_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ecmd) + struct ethtool_coalesce *ecmd, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct gemini_ethernet_port *port = netdev_priv(netdev); diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index 2a8bf53c2f75..e842de6f6635 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -1372,7 +1372,7 @@ static const struct net_device_ops dm9000_netdev_ops = { .ndo_start_xmit = dm9000_start_xmit, .ndo_tx_timeout = dm9000_timeout, .ndo_set_rx_mode = dm9000_hash_table, - .ndo_do_ioctl = dm9000_ioctl, + .ndo_eth_ioctl = dm9000_ioctl, .ndo_set_features = dm9000_set_features, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c index b125d7faefdf..36ab4cbf2ad0 100644 --- a/drivers/net/ethernet/dec/tulip/de4x5.c +++ b/drivers/net/ethernet/dec/tulip/de4x5.c @@ -443,6 +443,7 @@ ========================================================================= */ +#include <linux/compat.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> @@ -902,7 +903,8 @@ static int de4x5_close(struct net_device *dev); static struct net_device_stats *de4x5_get_stats(struct net_device *dev); static void de4x5_local_stats(struct net_device *dev, char *buf, int pkt_len); static void set_multicast_list(struct net_device *dev); -static int de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static int de4x5_siocdevprivate(struct net_device *dev, struct ifreq *rq, + void __user *data, int cmd); /* ** Private functions @@ -1084,7 +1086,7 @@ static const struct net_device_ops de4x5_netdev_ops = { .ndo_start_xmit = de4x5_queue_pkt, .ndo_get_stats = de4x5_get_stats, .ndo_set_rx_mode = set_multicast_list, - .ndo_do_ioctl = de4x5_ioctl, + .ndo_siocdevprivate = de4x5_siocdevprivate, .ndo_set_mac_address= eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; @@ -5357,7 +5359,7 @@ de4x5_dbg_rx(struct sk_buff *skb, int len) ** this function is only used for my testing. */ static int -de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +de4x5_siocdevprivate(struct net_device *dev, struct ifreq *rq, void __user *data, int cmd) { struct de4x5_private *lp = netdev_priv(dev); struct de4x5_ioctl *ioc = (struct de4x5_ioctl *) &rq->ifr_ifru; @@ -5371,6 +5373,9 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) } tmp; u_long flags = 0; + if (cmd != SIOCDEVPRIVATE || in_compat_syscall()) + return -EOPNOTSUPP; + switch(ioc->cmd) { case DE4X5_GET_HWADDR: /* Get the hardware address */ ioc->len = ETH_ALEN; diff --git a/drivers/net/ethernet/dec/tulip/media.c b/drivers/net/ethernet/dec/tulip/media.c index 011604787b8e..55d6fc99f40b 100644 --- a/drivers/net/ethernet/dec/tulip/media.c +++ b/drivers/net/ethernet/dec/tulip/media.c @@ -362,7 +362,7 @@ void tulip_select_media(struct net_device *dev, int startup) iowrite32(0x33, ioaddr + CSR12); new_csr6 = 0x01860000; /* Trigger autonegotiation. */ - iowrite32(startup ? 0x0201F868 : 0x0001F868, ioaddr + 0xB8); + iowrite32(0x0001F868, ioaddr + 0xB8); } else { iowrite32(0x32, ioaddr + CSR12); new_csr6 = 0x00420000; diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c index c1dcd6ca1457..fcedd733bacb 100644 --- a/drivers/net/ethernet/dec/tulip/tulip_core.c +++ b/drivers/net/ethernet/dec/tulip/tulip_core.c @@ -1271,7 +1271,7 @@ static const struct net_device_ops tulip_netdev_ops = { .ndo_tx_timeout = tulip_tx_timeout, .ndo_stop = tulip_close, .ndo_get_stats = tulip_get_stats, - .ndo_do_ioctl = private_ioctl, + .ndo_eth_ioctl = private_ioctl, .ndo_set_rx_mode = set_rx_mode, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/ethernet/dec/tulip/winbond-840.c b/drivers/net/ethernet/dec/tulip/winbond-840.c index 1876f15dd827..85b99099c6b9 100644 --- a/drivers/net/ethernet/dec/tulip/winbond-840.c +++ b/drivers/net/ethernet/dec/tulip/winbond-840.c @@ -341,7 +341,7 @@ static const struct net_device_ops netdev_ops = { .ndo_start_xmit = start_tx, .ndo_get_stats = get_stats, .ndo_set_rx_mode = set_rx_mode, - .ndo_do_ioctl = netdev_ioctl, + .ndo_eth_ioctl = netdev_ioctl, .ndo_tx_timeout = tx_timeout, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index 734acb834c98..202ecb132053 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -95,7 +95,7 @@ static const struct net_device_ops netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, .ndo_set_rx_mode = set_multicast, - .ndo_do_ioctl = rio_ioctl, + .ndo_eth_ioctl = rio_ioctl, .ndo_tx_timeout = rio_tx_timeout, }; diff --git a/drivers/net/ethernet/dlink/sundance.c b/drivers/net/ethernet/dlink/sundance.c index ee0ca712dd1c..c36d186dffed 100644 --- a/drivers/net/ethernet/dlink/sundance.c +++ b/drivers/net/ethernet/dlink/sundance.c @@ -479,7 +479,7 @@ static const struct net_device_ops netdev_ops = { .ndo_start_xmit = start_tx, .ndo_get_stats = get_stats, .ndo_set_rx_mode = set_rx_mode, - .ndo_do_ioctl = netdev_ioctl, + .ndo_eth_ioctl = netdev_ioctl, .ndo_tx_timeout = tx_timeout, .ndo_change_mtu = change_mtu, .ndo_set_mac_address = sundance_set_mac_addr, diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c index 48c6eb142dcc..6c51cf991dad 100644 --- a/drivers/net/ethernet/dnet.c +++ b/drivers/net/ethernet/dnet.c @@ -742,7 +742,7 @@ static const struct net_device_ops dnet_netdev_ops = { .ndo_stop = dnet_close, .ndo_get_stats = dnet_get_stats, .ndo_start_xmit = dnet_start_xmit, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; diff --git a/drivers/net/ethernet/ec_bhf.c b/drivers/net/ethernet/ec_bhf.c index 7c992172933b..b2d4fb3feb74 100644 --- a/drivers/net/ethernet/ec_bhf.c +++ b/drivers/net/ethernet/ec_bhf.c @@ -488,15 +488,7 @@ static int ec_bhf_probe(struct pci_dev *dev, const struct pci_device_id *id) pci_set_master(dev); - err = pci_set_dma_mask(dev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&dev->dev, - "Required dma mask not supported, failed to initialize device\n"); - err = -EIO; - goto err_disable_dev; - } - - err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(32)); + err = dma_set_mask_and_coherent(&dev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&dev->dev, "Required dma mask not supported, failed to initialize device\n"); diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c index 99cc1c46fb30..f9955308b93d 100644 --- a/drivers/net/ethernet/emulex/benet/be_ethtool.c +++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c @@ -315,7 +315,9 @@ static int be_read_dump_data(struct be_adapter *adapter, u32 dump_len, } static int be_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *et) + struct ethtool_coalesce *et, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct be_adapter *adapter = netdev_priv(netdev); struct be_aic_obj *aic = &adapter->aic_obj[0]; @@ -338,7 +340,9 @@ static int be_get_coalesce(struct net_device *netdev, * eqd cmd is issued in the worker thread. */ static int be_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *et) + struct ethtool_coalesce *et, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct be_adapter *adapter = netdev_priv(netdev); struct be_aic_obj *aic = &adapter->aic_obj[0]; diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c index e1b43b07755b..ed1ed48e7483 100644 --- a/drivers/net/ethernet/ethoc.c +++ b/drivers/net/ethernet/ethoc.c @@ -1009,7 +1009,7 @@ static const struct ethtool_ops ethoc_ethtool_ops = { static const struct net_device_ops ethoc_netdev_ops = { .ndo_open = ethoc_open, .ndo_stop = ethoc_stop, - .ndo_do_ioctl = ethoc_ioctl, + .ndo_eth_ioctl = ethoc_ioctl, .ndo_set_mac_address = ethoc_set_mac_address, .ndo_set_rx_mode = ethoc_set_multicast_list, .ndo_change_mtu = ethoc_change_mtu, diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c index 11dbbfd38770..ff76e401a014 100644 --- a/drivers/net/ethernet/faraday/ftgmac100.c +++ b/drivers/net/ethernet/faraday/ftgmac100.c @@ -1616,7 +1616,7 @@ static const struct net_device_ops ftgmac100_netdev_ops = { .ndo_start_xmit = ftgmac100_hard_start_xmit, .ndo_set_mac_address = ftgmac100_set_mac_addr, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = phy_do_ioctl, + .ndo_eth_ioctl = phy_do_ioctl, .ndo_tx_timeout = ftgmac100_tx_timeout, .ndo_set_rx_mode = ftgmac100_set_rx_mode, .ndo_set_features = ftgmac100_set_features, diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c index 5a1a8f2ea63c..8a341e2d5833 100644 --- a/drivers/net/ethernet/faraday/ftmac100.c +++ b/drivers/net/ethernet/faraday/ftmac100.c @@ -1043,7 +1043,7 @@ static const struct net_device_ops ftmac100_netdev_ops = { .ndo_start_xmit = ftmac100_hard_start_xmit, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = ftmac100_do_ioctl, + .ndo_eth_ioctl = ftmac100_do_ioctl, }; /****************************************************************************** diff --git a/drivers/net/ethernet/fealnx.c b/drivers/net/ethernet/fealnx.c index 0f141c14d72d..25c91b3c5fd3 100644 --- a/drivers/net/ethernet/fealnx.c +++ b/drivers/net/ethernet/fealnx.c @@ -463,7 +463,7 @@ static const struct net_device_ops netdev_ops = { .ndo_start_xmit = start_tx, .ndo_get_stats = get_stats, .ndo_set_rx_mode = set_rx_mode, - .ndo_do_ioctl = mii_ioctl, + .ndo_eth_ioctl = mii_ioctl, .ndo_tx_timeout = fealnx_tx_timeout, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig index 2d1abdd58fab..e04e1c5cb013 100644 --- a/drivers/net/ethernet/freescale/Kconfig +++ b/drivers/net/ethernet/freescale/Kconfig @@ -25,10 +25,10 @@ config FEC depends on (M523x || M527x || M5272 || M528x || M520x || M532x || \ ARCH_MXC || SOC_IMX28 || COMPILE_TEST) default ARCH_MXC || SOC_IMX28 if ARM + depends on PTP_1588_CLOCK_OPTIONAL select CRC32 select PHYLIB imply NET_SELFTESTS - imply PTP_1588_CLOCK help Say Y here if you want to use the built-in 10/100 Fast ethernet controller on some Motorola ColdFire and Freescale i.MX processors. diff --git a/drivers/net/ethernet/freescale/dpaa/Kconfig b/drivers/net/ethernet/freescale/dpaa/Kconfig index 626ec58a0afc..0e1439fd00bd 100644 --- a/drivers/net/ethernet/freescale/dpaa/Kconfig +++ b/drivers/net/ethernet/freescale/dpaa/Kconfig @@ -4,7 +4,6 @@ menuconfig FSL_DPAA_ETH depends on FSL_DPAA && FSL_FMAN select PHYLIB select FIXED_PHY - select FSL_FMAN_MAC help Data Path Acceleration Architecture Ethernet driver, supporting the Freescale QorIQ chips. diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index e6826561cf11..685d2d8a3b36 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -3157,7 +3157,7 @@ static const struct net_device_ops dpaa_ops = { .ndo_set_mac_address = dpaa_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = dpaa_set_rx_mode, - .ndo_do_ioctl = dpaa_ioctl, + .ndo_eth_ioctl = dpaa_ioctl, .ndo_setup_tc = dpaa_setup_tc, .ndo_change_mtu = dpaa_change_mtu, .ndo_bpf = dpaa_xdp, diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c index 1268996b7030..763d2c7b5fb1 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c @@ -513,7 +513,9 @@ static int dpaa_get_ts_info(struct net_device *net_dev, } static int dpaa_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *c) + struct ethtool_coalesce *c, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct qman_portal *portal; u32 period; @@ -530,7 +532,9 @@ static int dpaa_get_coalesce(struct net_device *dev, } static int dpaa_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *c) + struct ethtool_coalesce *c, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { const cpumask_t *cpus = qman_affine_cpus(); bool needs_revert[NR_CPUS] = {false}; diff --git a/drivers/net/ethernet/freescale/dpaa2/Makefile b/drivers/net/ethernet/freescale/dpaa2/Makefile index c2ef74052ef8..3d9842af7f10 100644 --- a/drivers/net/ethernet/freescale/dpaa2/Makefile +++ b/drivers/net/ethernet/freescale/dpaa2/Makefile @@ -11,7 +11,7 @@ fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o dpaa2-mac.o dpmac.o dpa fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DCB} += dpaa2-eth-dcb.o fsl-dpaa2-eth-${CONFIG_DEBUG_FS} += dpaa2-eth-debugfs.o fsl-dpaa2-ptp-objs := dpaa2-ptp.o dprtc.o -fsl-dpaa2-switch-objs := dpaa2-switch.o dpaa2-switch-ethtool.o dpsw.o dpaa2-switch-flower.o +fsl-dpaa2-switch-objs := dpaa2-switch.o dpaa2-switch-ethtool.o dpsw.o dpaa2-switch-flower.o dpaa2-mac.o dpmac.o # Needed by the tracing framework CFLAGS_dpaa2-eth.o := -I$(src) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c index 833696245565..605a39f892b9 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth-devlink.c @@ -68,7 +68,7 @@ dpaa2_eth_dl_trap_item_lookup(struct dpaa2_eth_priv *priv, u16 trap_id) struct dpaa2_eth_trap_item *dpaa2_eth_dl_get_trap(struct dpaa2_eth_priv *priv, struct dpaa2_fapr *fapr) { - struct dpaa2_faf_error_bit { + static const struct dpaa2_faf_error_bit { int position; enum devlink_trap_generic_id trap_id; } faf_bits[] = { @@ -196,7 +196,8 @@ int dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv) struct dpaa2_eth_devlink_priv *dl_priv; int err; - priv->devlink = devlink_alloc(&dpaa2_eth_devlink_ops, sizeof(*dl_priv)); + priv->devlink = + devlink_alloc(&dpaa2_eth_devlink_ops, sizeof(*dl_priv), dev); if (!priv->devlink) { dev_err(dev, "devlink_alloc failed\n"); return -ENOMEM; @@ -204,7 +205,7 @@ int dpaa2_eth_dl_register(struct dpaa2_eth_priv *priv) dl_priv = devlink_priv(priv->devlink); dl_priv->dpaa2_priv = priv; - err = devlink_register(priv->devlink, dev); + err = devlink_register(priv->devlink); if (err) { dev_err(dev, "devlink_register() = %d\n", err); goto devlink_free; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 973352393bd4..7065c71ed7b8 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -2594,7 +2594,7 @@ static const struct net_device_ops dpaa2_eth_ops = { .ndo_get_stats64 = dpaa2_eth_get_stats, .ndo_set_rx_mode = dpaa2_eth_set_rx_mode, .ndo_set_features = dpaa2_eth_set_features, - .ndo_do_ioctl = dpaa2_eth_ioctl, + .ndo_eth_ioctl = dpaa2_eth_ioctl, .ndo_change_mtu = dpaa2_eth_change_mtu, .ndo_bpf = dpaa2_eth_xdp, .ndo_xdp_xmit = dpaa2_eth_xdp_xmit, @@ -4138,7 +4138,7 @@ static int dpaa2_eth_connect_mac(struct dpaa2_eth_priv *priv) int err; dpni_dev = to_fsl_mc_device(priv->net_dev->dev.parent); - dpmac_dev = fsl_mc_get_endpoint(dpni_dev); + dpmac_dev = fsl_mc_get_endpoint(dpni_dev, 0); if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER) return PTR_ERR(dpmac_dev); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c index ad5e374eeccf..2da5f881f630 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c @@ -72,12 +72,12 @@ static void dpaa2_eth_get_drvinfo(struct net_device *net_dev, { struct dpaa2_eth_priv *priv = netdev_priv(net_dev); - strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); + strscpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%u.%u", priv->dpni_ver_major, priv->dpni_ver_minor); - strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent), + strscpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent), sizeof(drvinfo->bus_info)); } @@ -191,11 +191,11 @@ static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset, switch (stringset) { case ETH_SS_STATS: for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) { - strlcpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN); + strscpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) { - strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN); + strscpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } if (dpaa2_eth_has_mac(priv)) diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c index 70e04321c420..720c9230cab5 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-ethtool.c @@ -15,18 +15,18 @@ static struct { enum dpsw_counter id; char name[ETH_GSTRING_LEN]; } dpaa2_switch_ethtool_counters[] = { - {DPSW_CNT_ING_FRAME, "rx frames"}, - {DPSW_CNT_ING_BYTE, "rx bytes"}, - {DPSW_CNT_ING_FLTR_FRAME, "rx filtered frames"}, - {DPSW_CNT_ING_FRAME_DISCARD, "rx discarded frames"}, - {DPSW_CNT_ING_BCAST_FRAME, "rx b-cast frames"}, - {DPSW_CNT_ING_BCAST_BYTES, "rx b-cast bytes"}, - {DPSW_CNT_ING_MCAST_FRAME, "rx m-cast frames"}, - {DPSW_CNT_ING_MCAST_BYTE, "rx m-cast bytes"}, - {DPSW_CNT_EGR_FRAME, "tx frames"}, - {DPSW_CNT_EGR_BYTE, "tx bytes"}, - {DPSW_CNT_EGR_FRAME_DISCARD, "tx discarded frames"}, - {DPSW_CNT_ING_NO_BUFF_DISCARD, "rx discarded no buffer frames"}, + {DPSW_CNT_ING_FRAME, "[hw] rx frames"}, + {DPSW_CNT_ING_BYTE, "[hw] rx bytes"}, + {DPSW_CNT_ING_FLTR_FRAME, "[hw] rx filtered frames"}, + {DPSW_CNT_ING_FRAME_DISCARD, "[hw] rx discarded frames"}, + {DPSW_CNT_ING_BCAST_FRAME, "[hw] rx bcast frames"}, + {DPSW_CNT_ING_BCAST_BYTES, "[hw] rx bcast bytes"}, + {DPSW_CNT_ING_MCAST_FRAME, "[hw] rx mcast frames"}, + {DPSW_CNT_ING_MCAST_BYTE, "[hw] rx mcast bytes"}, + {DPSW_CNT_EGR_FRAME, "[hw] tx frames"}, + {DPSW_CNT_EGR_BYTE, "[hw] tx bytes"}, + {DPSW_CNT_EGR_FRAME_DISCARD, "[hw] tx discarded frames"}, + {DPSW_CNT_ING_NO_BUFF_DISCARD, "[hw] rx nobuffer discards"}, }; #define DPAA2_SWITCH_NUM_COUNTERS ARRAY_SIZE(dpaa2_switch_ethtool_counters) @@ -62,6 +62,10 @@ dpaa2_switch_get_link_ksettings(struct net_device *netdev, struct dpsw_link_state state = {0}; int err = 0; + if (dpaa2_switch_port_is_type_phy(port_priv)) + return phylink_ethtool_ksettings_get(port_priv->mac->phylink, + link_ksettings); + err = dpsw_if_get_link_state(port_priv->ethsw_data->mc_io, 0, port_priv->ethsw_data->dpsw_handle, port_priv->idx, @@ -95,6 +99,10 @@ dpaa2_switch_set_link_ksettings(struct net_device *netdev, bool if_running; int err = 0, ret; + if (dpaa2_switch_port_is_type_phy(port_priv)) + return phylink_ethtool_ksettings_set(port_priv->mac->phylink, + link_ksettings); + /* Interface needs to be down to change link settings */ if_running = netif_running(netdev); if (if_running) { @@ -134,11 +142,17 @@ dpaa2_switch_set_link_ksettings(struct net_device *netdev, return err; } -static int dpaa2_switch_ethtool_get_sset_count(struct net_device *dev, int sset) +static int +dpaa2_switch_ethtool_get_sset_count(struct net_device *netdev, int sset) { + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + int num_ss_stats = DPAA2_SWITCH_NUM_COUNTERS; + switch (sset) { case ETH_SS_STATS: - return DPAA2_SWITCH_NUM_COUNTERS; + if (port_priv->mac) + num_ss_stats += dpaa2_mac_get_sset_count(); + return num_ss_stats; default: return -EOPNOTSUPP; } @@ -147,14 +161,19 @@ static int dpaa2_switch_ethtool_get_sset_count(struct net_device *dev, int sset) static void dpaa2_switch_ethtool_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { + struct ethsw_port_priv *port_priv = netdev_priv(netdev); + u8 *p = data; int i; switch (stringset) { case ETH_SS_STATS: - for (i = 0; i < DPAA2_SWITCH_NUM_COUNTERS; i++) - memcpy(data + i * ETH_GSTRING_LEN, - dpaa2_switch_ethtool_counters[i].name, + for (i = 0; i < DPAA2_SWITCH_NUM_COUNTERS; i++) { + memcpy(p, dpaa2_switch_ethtool_counters[i].name, ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + if (port_priv->mac) + dpaa2_mac_get_strings(p); break; } } @@ -176,6 +195,9 @@ static void dpaa2_switch_ethtool_get_stats(struct net_device *netdev, netdev_err(netdev, "dpsw_if_get_counter[%s] err %d\n", dpaa2_switch_ethtool_counters[i].name, err); } + + if (port_priv->mac) + dpaa2_mac_get_ethtool_stats(port_priv->mac, data + i); } const struct ethtool_ops dpaa2_switch_port_ethtool_ops = { diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c index f9451ec5f2cb..d6eefbbf163f 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch-flower.c @@ -111,11 +111,11 @@ static int dpaa2_switch_flower_parse_key(struct flow_cls_offload *cls, return 0; } -int dpaa2_switch_acl_entry_add(struct dpaa2_switch_acl_tbl *acl_tbl, +int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *filter_block, struct dpaa2_switch_acl_entry *entry) { struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg; - struct ethsw_core *ethsw = acl_tbl->ethsw; + struct ethsw_core *ethsw = filter_block->ethsw; struct dpsw_acl_key *acl_key = &entry->key; struct device *dev = ethsw->dev; u8 *cmd_buff; @@ -136,7 +136,7 @@ int dpaa2_switch_acl_entry_add(struct dpaa2_switch_acl_tbl *acl_tbl, } err = dpsw_acl_add_entry(ethsw->mc_io, 0, ethsw->dpsw_handle, - acl_tbl->id, acl_entry_cfg); + filter_block->acl_id, acl_entry_cfg); dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff), DMA_TO_DEVICE); @@ -150,12 +150,13 @@ int dpaa2_switch_acl_entry_add(struct dpaa2_switch_acl_tbl *acl_tbl, return 0; } -static int dpaa2_switch_acl_entry_remove(struct dpaa2_switch_acl_tbl *acl_tbl, - struct dpaa2_switch_acl_entry *entry) +static int +dpaa2_switch_acl_entry_remove(struct dpaa2_switch_filter_block *block, + struct dpaa2_switch_acl_entry *entry) { struct dpsw_acl_entry_cfg *acl_entry_cfg = &entry->cfg; struct dpsw_acl_key *acl_key = &entry->key; - struct ethsw_core *ethsw = acl_tbl->ethsw; + struct ethsw_core *ethsw = block->ethsw; struct device *dev = ethsw->dev; u8 *cmd_buff; int err; @@ -175,7 +176,7 @@ static int dpaa2_switch_acl_entry_remove(struct dpaa2_switch_acl_tbl *acl_tbl, } err = dpsw_acl_remove_entry(ethsw->mc_io, 0, ethsw->dpsw_handle, - acl_tbl->id, acl_entry_cfg); + block->acl_id, acl_entry_cfg); dma_unmap_single(dev, acl_entry_cfg->key_iova, sizeof(cmd_buff), DMA_TO_DEVICE); @@ -190,19 +191,19 @@ static int dpaa2_switch_acl_entry_remove(struct dpaa2_switch_acl_tbl *acl_tbl, } static int -dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_acl_tbl *acl_tbl, +dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_filter_block *block, struct dpaa2_switch_acl_entry *entry) { struct dpaa2_switch_acl_entry *tmp; struct list_head *pos, *n; int index = 0; - if (list_empty(&acl_tbl->entries)) { - list_add(&entry->list, &acl_tbl->entries); + if (list_empty(&block->acl_entries)) { + list_add(&entry->list, &block->acl_entries); return index; } - list_for_each_safe(pos, n, &acl_tbl->entries) { + list_for_each_safe(pos, n, &block->acl_entries) { tmp = list_entry(pos, struct dpaa2_switch_acl_entry, list); if (entry->prio < tmp->prio) break; @@ -213,13 +214,13 @@ dpaa2_switch_acl_entry_add_to_list(struct dpaa2_switch_acl_tbl *acl_tbl, } static struct dpaa2_switch_acl_entry* -dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_acl_tbl *acl_tbl, +dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_filter_block *block, int index) { struct dpaa2_switch_acl_entry *tmp; int i = 0; - list_for_each_entry(tmp, &acl_tbl->entries, list) { + list_for_each_entry(tmp, &block->acl_entries, list) { if (i == index) return tmp; ++i; @@ -229,37 +230,38 @@ dpaa2_switch_acl_entry_get_by_index(struct dpaa2_switch_acl_tbl *acl_tbl, } static int -dpaa2_switch_acl_entry_set_precedence(struct dpaa2_switch_acl_tbl *acl_tbl, +dpaa2_switch_acl_entry_set_precedence(struct dpaa2_switch_filter_block *block, struct dpaa2_switch_acl_entry *entry, int precedence) { int err; - err = dpaa2_switch_acl_entry_remove(acl_tbl, entry); + err = dpaa2_switch_acl_entry_remove(block, entry); if (err) return err; entry->cfg.precedence = precedence; - return dpaa2_switch_acl_entry_add(acl_tbl, entry); + return dpaa2_switch_acl_entry_add(block, entry); } -static int dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_acl_tbl *acl_tbl, - struct dpaa2_switch_acl_entry *entry) +static int +dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_filter_block *block, + struct dpaa2_switch_acl_entry *entry) { struct dpaa2_switch_acl_entry *tmp; int index, i, precedence, err; /* Add the new ACL entry to the linked list and get its index */ - index = dpaa2_switch_acl_entry_add_to_list(acl_tbl, entry); + index = dpaa2_switch_acl_entry_add_to_list(block, entry); /* Move up in priority the ACL entries to make space * for the new filter. */ - precedence = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES - acl_tbl->num_rules - 1; + precedence = DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES - block->num_acl_rules - 1; for (i = 0; i < index; i++) { - tmp = dpaa2_switch_acl_entry_get_by_index(acl_tbl, i); + tmp = dpaa2_switch_acl_entry_get_by_index(block, i); - err = dpaa2_switch_acl_entry_set_precedence(acl_tbl, tmp, + err = dpaa2_switch_acl_entry_set_precedence(block, tmp, precedence); if (err) return err; @@ -269,19 +271,19 @@ static int dpaa2_switch_acl_tbl_add_entry(struct dpaa2_switch_acl_tbl *acl_tbl, /* Add the new entry to hardware */ entry->cfg.precedence = precedence; - err = dpaa2_switch_acl_entry_add(acl_tbl, entry); - acl_tbl->num_rules++; + err = dpaa2_switch_acl_entry_add(block, entry); + block->num_acl_rules++; return err; } static struct dpaa2_switch_acl_entry * -dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_acl_tbl *acl_tbl, +dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_filter_block *block, unsigned long cookie) { struct dpaa2_switch_acl_entry *tmp, *n; - list_for_each_entry_safe(tmp, n, &acl_tbl->entries, list) { + list_for_each_entry_safe(tmp, n, &block->acl_entries, list) { if (tmp->cookie == cookie) return tmp; } @@ -289,13 +291,13 @@ dpaa2_switch_acl_tbl_find_entry_by_cookie(struct dpaa2_switch_acl_tbl *acl_tbl, } static int -dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_acl_tbl *acl_tbl, +dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_filter_block *block, struct dpaa2_switch_acl_entry *entry) { struct dpaa2_switch_acl_entry *tmp, *n; int index = 0; - list_for_each_entry_safe(tmp, n, &acl_tbl->entries, list) { + list_for_each_entry_safe(tmp, n, &block->acl_entries, list) { if (tmp->cookie == entry->cookie) return index; index++; @@ -303,21 +305,34 @@ dpaa2_switch_acl_entry_get_index(struct dpaa2_switch_acl_tbl *acl_tbl, return -ENOENT; } +static struct dpaa2_switch_mirror_entry * +dpaa2_switch_mirror_find_entry_by_cookie(struct dpaa2_switch_filter_block *block, + unsigned long cookie) +{ + struct dpaa2_switch_mirror_entry *tmp, *n; + + list_for_each_entry_safe(tmp, n, &block->mirror_entries, list) { + if (tmp->cookie == cookie) + return tmp; + } + return NULL; +} + static int -dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_acl_tbl *acl_tbl, +dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_filter_block *block, struct dpaa2_switch_acl_entry *entry) { struct dpaa2_switch_acl_entry *tmp; int index, i, precedence, err; - index = dpaa2_switch_acl_entry_get_index(acl_tbl, entry); + index = dpaa2_switch_acl_entry_get_index(block, entry); /* Remove from hardware the ACL entry */ - err = dpaa2_switch_acl_entry_remove(acl_tbl, entry); + err = dpaa2_switch_acl_entry_remove(block, entry); if (err) return err; - acl_tbl->num_rules--; + block->num_acl_rules--; /* Remove it from the list also */ list_del(&entry->list); @@ -325,8 +340,8 @@ dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_acl_tbl *acl_tbl, /* Move down in priority the entries over the deleted one */ precedence = entry->cfg.precedence; for (i = index - 1; i >= 0; i--) { - tmp = dpaa2_switch_acl_entry_get_by_index(acl_tbl, i); - err = dpaa2_switch_acl_entry_set_precedence(acl_tbl, tmp, + tmp = dpaa2_switch_acl_entry_get_by_index(block, i); + err = dpaa2_switch_acl_entry_set_precedence(block, tmp, precedence); if (err) return err; @@ -339,10 +354,10 @@ dpaa2_switch_acl_tbl_remove_entry(struct dpaa2_switch_acl_tbl *acl_tbl, return 0; } -static int dpaa2_switch_tc_parse_action(struct ethsw_core *ethsw, - struct flow_action_entry *cls_act, - struct dpsw_acl_result *dpsw_act, - struct netlink_ext_ack *extack) +static int dpaa2_switch_tc_parse_action_acl(struct ethsw_core *ethsw, + struct flow_action_entry *cls_act, + struct dpsw_acl_result *dpsw_act, + struct netlink_ext_ack *extack) { int err = 0; @@ -374,22 +389,110 @@ out: return err; } -int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_acl_tbl *acl_tbl, +static int +dpaa2_switch_block_add_mirror(struct dpaa2_switch_filter_block *block, + struct dpaa2_switch_mirror_entry *entry, + u16 to, struct netlink_ext_ack *extack) +{ + unsigned long block_ports = block->ports; + struct ethsw_core *ethsw = block->ethsw; + struct ethsw_port_priv *port_priv; + unsigned long ports_added = 0; + u16 vlan = entry->cfg.vlan_id; + bool mirror_port_enabled; + int err, port; + + /* Setup the mirroring port */ + mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs); + if (!mirror_port_enabled) { + err = dpsw_set_reflection_if(ethsw->mc_io, 0, + ethsw->dpsw_handle, to); + if (err) + return err; + ethsw->mirror_port = to; + } + + /* Setup the same egress mirroring configuration on all the switch + * ports that share the same filter block. + */ + for_each_set_bit(port, &block_ports, ethsw->sw_attr.num_ifs) { + port_priv = ethsw->ports[port]; + + /* We cannot add a per VLAN mirroring rule if the VLAN in + * question is not installed on the switch port. + */ + if (entry->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_VLAN && + !(port_priv->vlans[vlan] & ETHSW_VLAN_MEMBER)) { + NL_SET_ERR_MSG(extack, + "VLAN must be installed on the switch port"); + err = -EINVAL; + goto err_remove_filters; + } + + err = dpsw_if_add_reflection(ethsw->mc_io, 0, + ethsw->dpsw_handle, + port, &entry->cfg); + if (err) + goto err_remove_filters; + + ports_added |= BIT(port); + } + + list_add(&entry->list, &block->mirror_entries); + + return 0; + +err_remove_filters: + for_each_set_bit(port, &ports_added, ethsw->sw_attr.num_ifs) { + dpsw_if_remove_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle, + port, &entry->cfg); + } + + if (!mirror_port_enabled) + ethsw->mirror_port = ethsw->sw_attr.num_ifs; + + return err; +} + +static int +dpaa2_switch_block_remove_mirror(struct dpaa2_switch_filter_block *block, + struct dpaa2_switch_mirror_entry *entry) +{ + struct dpsw_reflection_cfg *cfg = &entry->cfg; + unsigned long block_ports = block->ports; + struct ethsw_core *ethsw = block->ethsw; + int port; + + /* Remove this mirroring configuration from all the ports belonging to + * the filter block. + */ + for_each_set_bit(port, &block_ports, ethsw->sw_attr.num_ifs) + dpsw_if_remove_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle, + port, cfg); + + /* Also remove it from the list of mirror filters */ + list_del(&entry->list); + kfree(entry); + + /* If this was the last mirror filter, then unset the mirror port */ + if (list_empty(&block->mirror_entries)) + ethsw->mirror_port = ethsw->sw_attr.num_ifs; + + return 0; +} + +static int +dpaa2_switch_cls_flower_replace_acl(struct dpaa2_switch_filter_block *block, struct flow_cls_offload *cls) { struct flow_rule *rule = flow_cls_offload_flow_rule(cls); struct netlink_ext_ack *extack = cls->common.extack; - struct ethsw_core *ethsw = acl_tbl->ethsw; struct dpaa2_switch_acl_entry *acl_entry; + struct ethsw_core *ethsw = block->ethsw; struct flow_action_entry *act; int err; - if (!flow_offload_has_one_action(&rule->action)) { - NL_SET_ERR_MSG(extack, "Only singular actions are supported"); - return -EOPNOTSUPP; - } - - if (dpaa2_switch_acl_tbl_is_full(acl_tbl)) { + if (dpaa2_switch_acl_tbl_is_full(block)) { NL_SET_ERR_MSG(extack, "Maximum filter capacity reached"); return -ENOMEM; } @@ -403,15 +506,15 @@ int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_acl_tbl *acl_tbl, goto free_acl_entry; act = &rule->action.entries[0]; - err = dpaa2_switch_tc_parse_action(ethsw, act, - &acl_entry->cfg.result, extack); + err = dpaa2_switch_tc_parse_action_acl(ethsw, act, + &acl_entry->cfg.result, extack); if (err) goto free_acl_entry; acl_entry->prio = cls->common.prio; acl_entry->cookie = cls->cookie; - err = dpaa2_switch_acl_tbl_add_entry(acl_tbl, acl_entry); + err = dpaa2_switch_acl_tbl_add_entry(block, acl_entry); if (err) goto free_acl_entry; @@ -423,33 +526,171 @@ free_acl_entry: return err; } -int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_acl_tbl *acl_tbl, - struct flow_cls_offload *cls) +static int dpaa2_switch_flower_parse_mirror_key(struct flow_cls_offload *cls, + u16 *vlan) { - struct dpaa2_switch_acl_entry *entry; + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct flow_dissector *dissector = rule->match.dissector; + struct netlink_ext_ack *extack = cls->common.extack; + + if (dissector->used_keys & + ~(BIT(FLOW_DISSECTOR_KEY_BASIC) | + BIT(FLOW_DISSECTOR_KEY_CONTROL) | + BIT(FLOW_DISSECTOR_KEY_VLAN))) { + NL_SET_ERR_MSG_MOD(extack, + "Mirroring is supported only per VLAN"); + return -EOPNOTSUPP; + } + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { + struct flow_match_vlan match; + + flow_rule_match_vlan(rule, &match); - entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(acl_tbl, cls->cookie); - if (!entry) - return 0; + if (match.mask->vlan_priority != 0 || + match.mask->vlan_dei != 0) { + NL_SET_ERR_MSG_MOD(extack, + "Only matching on VLAN ID supported"); + return -EOPNOTSUPP; + } - return dpaa2_switch_acl_tbl_remove_entry(acl_tbl, entry); + if (match.mask->vlan_id != 0xFFF) { + NL_SET_ERR_MSG_MOD(extack, + "Masked matching not supported"); + return -EOPNOTSUPP; + } + + *vlan = (u16)match.key->vlan_id; + } + + return 0; } -int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_acl_tbl *acl_tbl, - struct tc_cls_matchall_offload *cls) +static int +dpaa2_switch_cls_flower_replace_mirror(struct dpaa2_switch_filter_block *block, + struct flow_cls_offload *cls) { struct netlink_ext_ack *extack = cls->common.extack; - struct ethsw_core *ethsw = acl_tbl->ethsw; - struct dpaa2_switch_acl_entry *acl_entry; - struct flow_action_entry *act; + struct dpaa2_switch_mirror_entry *mirror_entry; + struct ethsw_core *ethsw = block->ethsw; + struct dpaa2_switch_mirror_entry *tmp; + struct flow_action_entry *cls_act; + struct list_head *pos, *n; + bool mirror_port_enabled; + u16 if_id, vlan; int err; - if (!flow_offload_has_one_action(&cls->rule->action)) { + mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs); + cls_act = &cls->rule->action.entries[0]; + + /* Offload rules only when the destination is a DPAA2 switch port */ + if (!dpaa2_switch_port_dev_check(cls_act->dev)) { + NL_SET_ERR_MSG_MOD(extack, + "Destination not a DPAA2 switch port"); + return -EOPNOTSUPP; + } + if_id = dpaa2_switch_get_index(ethsw, cls_act->dev); + + /* We have a single mirror port but can configure egress mirroring on + * all the other switch ports. We need to allow mirroring rules only + * when the destination port is the same. + */ + if (mirror_port_enabled && ethsw->mirror_port != if_id) { + NL_SET_ERR_MSG_MOD(extack, + "Multiple mirror ports not supported"); + return -EBUSY; + } + + /* Parse the key */ + err = dpaa2_switch_flower_parse_mirror_key(cls, &vlan); + if (err) + return err; + + /* Make sure that we don't already have a mirror rule with the same + * configuration. + */ + list_for_each_safe(pos, n, &block->mirror_entries) { + tmp = list_entry(pos, struct dpaa2_switch_mirror_entry, list); + + if (tmp->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_VLAN && + tmp->cfg.vlan_id == vlan) { + NL_SET_ERR_MSG_MOD(extack, + "VLAN mirror filter already installed"); + return -EBUSY; + } + } + + mirror_entry = kzalloc(sizeof(*mirror_entry), GFP_KERNEL); + if (!mirror_entry) + return -ENOMEM; + + mirror_entry->cfg.filter = DPSW_REFLECTION_FILTER_INGRESS_VLAN; + mirror_entry->cfg.vlan_id = vlan; + mirror_entry->cookie = cls->cookie; + + return dpaa2_switch_block_add_mirror(block, mirror_entry, if_id, + extack); +} + +int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_filter_block *block, + struct flow_cls_offload *cls) +{ + struct flow_rule *rule = flow_cls_offload_flow_rule(cls); + struct netlink_ext_ack *extack = cls->common.extack; + struct flow_action_entry *act; + + if (!flow_offload_has_one_action(&rule->action)) { NL_SET_ERR_MSG(extack, "Only singular actions are supported"); return -EOPNOTSUPP; } - if (dpaa2_switch_acl_tbl_is_full(acl_tbl)) { + act = &rule->action.entries[0]; + switch (act->id) { + case FLOW_ACTION_REDIRECT: + case FLOW_ACTION_TRAP: + case FLOW_ACTION_DROP: + return dpaa2_switch_cls_flower_replace_acl(block, cls); + case FLOW_ACTION_MIRRED: + return dpaa2_switch_cls_flower_replace_mirror(block, cls); + default: + NL_SET_ERR_MSG_MOD(extack, "Action not supported"); + return -EOPNOTSUPP; + } +} + +int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_filter_block *block, + struct flow_cls_offload *cls) +{ + struct dpaa2_switch_mirror_entry *mirror_entry; + struct dpaa2_switch_acl_entry *acl_entry; + + /* If this filter is a an ACL one, remove it */ + acl_entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(block, + cls->cookie); + if (acl_entry) + return dpaa2_switch_acl_tbl_remove_entry(block, acl_entry); + + /* If not, then it has to be a mirror */ + mirror_entry = dpaa2_switch_mirror_find_entry_by_cookie(block, + cls->cookie); + if (mirror_entry) + return dpaa2_switch_block_remove_mirror(block, + mirror_entry); + + return 0; +} + +static int +dpaa2_switch_cls_matchall_replace_acl(struct dpaa2_switch_filter_block *block, + struct tc_cls_matchall_offload *cls) +{ + struct netlink_ext_ack *extack = cls->common.extack; + struct ethsw_core *ethsw = block->ethsw; + struct dpaa2_switch_acl_entry *acl_entry; + struct flow_action_entry *act; + int err; + + if (dpaa2_switch_acl_tbl_is_full(block)) { NL_SET_ERR_MSG(extack, "Maximum filter capacity reached"); return -ENOMEM; } @@ -459,15 +700,15 @@ int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_acl_tbl *acl_tbl, return -ENOMEM; act = &cls->rule->action.entries[0]; - err = dpaa2_switch_tc_parse_action(ethsw, act, - &acl_entry->cfg.result, extack); + err = dpaa2_switch_tc_parse_action_acl(ethsw, act, + &acl_entry->cfg.result, extack); if (err) goto free_acl_entry; acl_entry->prio = cls->common.prio; acl_entry->cookie = cls->cookie; - err = dpaa2_switch_acl_tbl_add_entry(acl_tbl, acl_entry); + err = dpaa2_switch_acl_tbl_add_entry(block, acl_entry); if (err) goto free_acl_entry; @@ -479,14 +720,159 @@ free_acl_entry: return err; } -int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_acl_tbl *acl_tbl, +static int +dpaa2_switch_cls_matchall_replace_mirror(struct dpaa2_switch_filter_block *block, + struct tc_cls_matchall_offload *cls) +{ + struct netlink_ext_ack *extack = cls->common.extack; + struct dpaa2_switch_mirror_entry *mirror_entry; + struct ethsw_core *ethsw = block->ethsw; + struct dpaa2_switch_mirror_entry *tmp; + struct flow_action_entry *cls_act; + struct list_head *pos, *n; + bool mirror_port_enabled; + u16 if_id; + + mirror_port_enabled = (ethsw->mirror_port != ethsw->sw_attr.num_ifs); + cls_act = &cls->rule->action.entries[0]; + + /* Offload rules only when the destination is a DPAA2 switch port */ + if (!dpaa2_switch_port_dev_check(cls_act->dev)) { + NL_SET_ERR_MSG_MOD(extack, + "Destination not a DPAA2 switch port"); + return -EOPNOTSUPP; + } + if_id = dpaa2_switch_get_index(ethsw, cls_act->dev); + + /* We have a single mirror port but can configure egress mirroring on + * all the other switch ports. We need to allow mirroring rules only + * when the destination port is the same. + */ + if (mirror_port_enabled && ethsw->mirror_port != if_id) { + NL_SET_ERR_MSG_MOD(extack, + "Multiple mirror ports not supported"); + return -EBUSY; + } + + /* Make sure that we don't already have a mirror rule with the same + * configuration. One matchall rule per block is the maximum. + */ + list_for_each_safe(pos, n, &block->mirror_entries) { + tmp = list_entry(pos, struct dpaa2_switch_mirror_entry, list); + + if (tmp->cfg.filter == DPSW_REFLECTION_FILTER_INGRESS_ALL) { + NL_SET_ERR_MSG_MOD(extack, + "Matchall mirror filter already installed"); + return -EBUSY; + } + } + + mirror_entry = kzalloc(sizeof(*mirror_entry), GFP_KERNEL); + if (!mirror_entry) + return -ENOMEM; + + mirror_entry->cfg.filter = DPSW_REFLECTION_FILTER_INGRESS_ALL; + mirror_entry->cookie = cls->cookie; + + return dpaa2_switch_block_add_mirror(block, mirror_entry, if_id, + extack); +} + +int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_filter_block *block, + struct tc_cls_matchall_offload *cls) +{ + struct netlink_ext_ack *extack = cls->common.extack; + struct flow_action_entry *act; + + if (!flow_offload_has_one_action(&cls->rule->action)) { + NL_SET_ERR_MSG(extack, "Only singular actions are supported"); + return -EOPNOTSUPP; + } + + act = &cls->rule->action.entries[0]; + switch (act->id) { + case FLOW_ACTION_REDIRECT: + case FLOW_ACTION_TRAP: + case FLOW_ACTION_DROP: + return dpaa2_switch_cls_matchall_replace_acl(block, cls); + case FLOW_ACTION_MIRRED: + return dpaa2_switch_cls_matchall_replace_mirror(block, cls); + default: + NL_SET_ERR_MSG_MOD(extack, "Action not supported"); + return -EOPNOTSUPP; + } +} + +int dpaa2_switch_block_offload_mirror(struct dpaa2_switch_filter_block *block, + struct ethsw_port_priv *port_priv) +{ + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct dpaa2_switch_mirror_entry *tmp; + int err; + + list_for_each_entry(tmp, &block->mirror_entries, list) { + err = dpsw_if_add_reflection(ethsw->mc_io, 0, + ethsw->dpsw_handle, + port_priv->idx, &tmp->cfg); + if (err) + goto unwind_add; + } + + return 0; + +unwind_add: + list_for_each_entry(tmp, &block->mirror_entries, list) + dpsw_if_remove_reflection(ethsw->mc_io, 0, + ethsw->dpsw_handle, + port_priv->idx, &tmp->cfg); + + return err; +} + +int dpaa2_switch_block_unoffload_mirror(struct dpaa2_switch_filter_block *block, + struct ethsw_port_priv *port_priv) +{ + struct ethsw_core *ethsw = port_priv->ethsw_data; + struct dpaa2_switch_mirror_entry *tmp; + int err; + + list_for_each_entry(tmp, &block->mirror_entries, list) { + err = dpsw_if_remove_reflection(ethsw->mc_io, 0, + ethsw->dpsw_handle, + port_priv->idx, &tmp->cfg); + if (err) + goto unwind_remove; + } + + return 0; + +unwind_remove: + list_for_each_entry(tmp, &block->mirror_entries, list) + dpsw_if_add_reflection(ethsw->mc_io, 0, ethsw->dpsw_handle, + port_priv->idx, &tmp->cfg); + + return err; +} + +int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_filter_block *block, struct tc_cls_matchall_offload *cls) { - struct dpaa2_switch_acl_entry *entry; + struct dpaa2_switch_mirror_entry *mirror_entry; + struct dpaa2_switch_acl_entry *acl_entry; + + /* If this filter is a an ACL one, remove it */ + acl_entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(block, + cls->cookie); + if (acl_entry) + return dpaa2_switch_acl_tbl_remove_entry(block, + acl_entry); - entry = dpaa2_switch_acl_tbl_find_entry_by_cookie(acl_tbl, cls->cookie); - if (!entry) - return 0; + /* If not, then it has to be a mirror */ + mirror_entry = dpaa2_switch_mirror_find_entry_by_cookie(block, + cls->cookie); + if (mirror_entry) + return dpaa2_switch_block_remove_mirror(block, + mirror_entry); - return dpaa2_switch_acl_tbl_remove_entry(acl_tbl, entry); + return 0; } diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c index 98cc0133c343..175f15c46842 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c @@ -41,14 +41,14 @@ static struct dpaa2_switch_fdb *dpaa2_switch_fdb_get_unused(struct ethsw_core *e return NULL; } -static struct dpaa2_switch_acl_tbl * -dpaa2_switch_acl_tbl_get_unused(struct ethsw_core *ethsw) +static struct dpaa2_switch_filter_block * +dpaa2_switch_filter_block_get_unused(struct ethsw_core *ethsw) { int i; for (i = 0; i < ethsw->sw_attr.num_ifs; i++) - if (!ethsw->acls[i].in_use) - return ðsw->acls[i]; + if (!ethsw->filter_blocks[i].in_use) + return ðsw->filter_blocks[i]; return NULL; } @@ -594,12 +594,18 @@ static int dpaa2_switch_port_change_mtu(struct net_device *netdev, int mtu) return 0; } -static int dpaa2_switch_port_carrier_state_sync(struct net_device *netdev) +static int dpaa2_switch_port_link_state_update(struct net_device *netdev) { struct ethsw_port_priv *port_priv = netdev_priv(netdev); struct dpsw_link_state state; int err; + /* When we manage the MAC/PHY using phylink there is no need + * to manually update the netif_carrier. + */ + if (dpaa2_switch_port_is_type_phy(port_priv)) + return 0; + /* Interrupts are received even though no one issued an 'ifconfig up' * on the switch interface. Ignore these link state update interrupts */ @@ -677,12 +683,14 @@ static int dpaa2_switch_port_open(struct net_device *netdev) struct ethsw_core *ethsw = port_priv->ethsw_data; int err; - /* Explicitly set carrier off, otherwise - * netif_carrier_ok() will return true and cause 'ip link show' - * to report the LOWER_UP flag, even though the link - * notification wasn't even received. - */ - netif_carrier_off(netdev); + if (!dpaa2_switch_port_is_type_phy(port_priv)) { + /* Explicitly set carrier off, otherwise + * netif_carrier_ok() will return true and cause 'ip link show' + * to report the LOWER_UP flag, even though the link + * notification wasn't even received. + */ + netif_carrier_off(netdev); + } err = dpsw_if_enable(port_priv->ethsw_data->mc_io, 0, port_priv->ethsw_data->dpsw_handle, @@ -692,23 +700,12 @@ static int dpaa2_switch_port_open(struct net_device *netdev) return err; } - /* sync carrier state */ - err = dpaa2_switch_port_carrier_state_sync(netdev); - if (err) { - netdev_err(netdev, - "dpaa2_switch_port_carrier_state_sync err %d\n", err); - goto err_carrier_sync; - } - dpaa2_switch_enable_ctrl_if_napi(ethsw); - return 0; + if (dpaa2_switch_port_is_type_phy(port_priv)) + phylink_start(port_priv->mac->phylink); -err_carrier_sync: - dpsw_if_disable(port_priv->ethsw_data->mc_io, 0, - port_priv->ethsw_data->dpsw_handle, - port_priv->idx); - return err; + return 0; } static int dpaa2_switch_port_stop(struct net_device *netdev) @@ -717,6 +714,13 @@ static int dpaa2_switch_port_stop(struct net_device *netdev) struct ethsw_core *ethsw = port_priv->ethsw_data; int err; + if (dpaa2_switch_port_is_type_phy(port_priv)) { + phylink_stop(port_priv->mac->phylink); + } else { + netif_tx_stop_all_queues(netdev); + netif_carrier_off(netdev); + } + err = dpsw_if_disable(port_priv->ethsw_data->mc_io, 0, port_priv->ethsw_data->dpsw_handle, port_priv->idx); @@ -1127,28 +1131,28 @@ err_exit: } static int -dpaa2_switch_setup_tc_cls_flower(struct dpaa2_switch_acl_tbl *acl_tbl, +dpaa2_switch_setup_tc_cls_flower(struct dpaa2_switch_filter_block *filter_block, struct flow_cls_offload *f) { switch (f->command) { case FLOW_CLS_REPLACE: - return dpaa2_switch_cls_flower_replace(acl_tbl, f); + return dpaa2_switch_cls_flower_replace(filter_block, f); case FLOW_CLS_DESTROY: - return dpaa2_switch_cls_flower_destroy(acl_tbl, f); + return dpaa2_switch_cls_flower_destroy(filter_block, f); default: return -EOPNOTSUPP; } } static int -dpaa2_switch_setup_tc_cls_matchall(struct dpaa2_switch_acl_tbl *acl_tbl, +dpaa2_switch_setup_tc_cls_matchall(struct dpaa2_switch_filter_block *block, struct tc_cls_matchall_offload *f) { switch (f->command) { case TC_CLSMATCHALL_REPLACE: - return dpaa2_switch_cls_matchall_replace(acl_tbl, f); + return dpaa2_switch_cls_matchall_replace(block, f); case TC_CLSMATCHALL_DESTROY: - return dpaa2_switch_cls_matchall_destroy(acl_tbl, f); + return dpaa2_switch_cls_matchall_destroy(block, f); default: return -EOPNOTSUPP; } @@ -1170,106 +1174,122 @@ static int dpaa2_switch_port_setup_tc_block_cb_ig(enum tc_setup_type type, static LIST_HEAD(dpaa2_switch_block_cb_list); -static int dpaa2_switch_port_acl_tbl_bind(struct ethsw_port_priv *port_priv, - struct dpaa2_switch_acl_tbl *acl_tbl) +static int +dpaa2_switch_port_acl_tbl_bind(struct ethsw_port_priv *port_priv, + struct dpaa2_switch_filter_block *block) { struct ethsw_core *ethsw = port_priv->ethsw_data; struct net_device *netdev = port_priv->netdev; struct dpsw_acl_if_cfg acl_if_cfg; int err; - if (port_priv->acl_tbl) + if (port_priv->filter_block) return -EINVAL; acl_if_cfg.if_id[0] = port_priv->idx; acl_if_cfg.num_ifs = 1; err = dpsw_acl_add_if(ethsw->mc_io, 0, ethsw->dpsw_handle, - acl_tbl->id, &acl_if_cfg); + block->acl_id, &acl_if_cfg); if (err) { netdev_err(netdev, "dpsw_acl_add_if err %d\n", err); return err; } - acl_tbl->ports |= BIT(port_priv->idx); - port_priv->acl_tbl = acl_tbl; + block->ports |= BIT(port_priv->idx); + port_priv->filter_block = block; return 0; } static int dpaa2_switch_port_acl_tbl_unbind(struct ethsw_port_priv *port_priv, - struct dpaa2_switch_acl_tbl *acl_tbl) + struct dpaa2_switch_filter_block *block) { struct ethsw_core *ethsw = port_priv->ethsw_data; struct net_device *netdev = port_priv->netdev; struct dpsw_acl_if_cfg acl_if_cfg; int err; - if (port_priv->acl_tbl != acl_tbl) + if (port_priv->filter_block != block) return -EINVAL; acl_if_cfg.if_id[0] = port_priv->idx; acl_if_cfg.num_ifs = 1; err = dpsw_acl_remove_if(ethsw->mc_io, 0, ethsw->dpsw_handle, - acl_tbl->id, &acl_if_cfg); + block->acl_id, &acl_if_cfg); if (err) { netdev_err(netdev, "dpsw_acl_add_if err %d\n", err); return err; } - acl_tbl->ports &= ~BIT(port_priv->idx); - port_priv->acl_tbl = NULL; + block->ports &= ~BIT(port_priv->idx); + port_priv->filter_block = NULL; return 0; } static int dpaa2_switch_port_block_bind(struct ethsw_port_priv *port_priv, - struct dpaa2_switch_acl_tbl *acl_tbl) + struct dpaa2_switch_filter_block *block) { - struct dpaa2_switch_acl_tbl *old_acl_tbl = port_priv->acl_tbl; + struct dpaa2_switch_filter_block *old_block = port_priv->filter_block; int err; + /* Offload all the mirror entries found in the block on this new port + * joining it. + */ + err = dpaa2_switch_block_offload_mirror(block, port_priv); + if (err) + return err; + /* If the port is already bound to this ACL table then do nothing. This * can happen when this port is the first one to join a tc block */ - if (port_priv->acl_tbl == acl_tbl) + if (port_priv->filter_block == block) return 0; - err = dpaa2_switch_port_acl_tbl_unbind(port_priv, old_acl_tbl); + err = dpaa2_switch_port_acl_tbl_unbind(port_priv, old_block); if (err) return err; /* Mark the previous ACL table as being unused if this was the last * port that was using it. */ - if (old_acl_tbl->ports == 0) - old_acl_tbl->in_use = false; + if (old_block->ports == 0) + old_block->in_use = false; - return dpaa2_switch_port_acl_tbl_bind(port_priv, acl_tbl); + return dpaa2_switch_port_acl_tbl_bind(port_priv, block); } -static int dpaa2_switch_port_block_unbind(struct ethsw_port_priv *port_priv, - struct dpaa2_switch_acl_tbl *acl_tbl) +static int +dpaa2_switch_port_block_unbind(struct ethsw_port_priv *port_priv, + struct dpaa2_switch_filter_block *block) { struct ethsw_core *ethsw = port_priv->ethsw_data; - struct dpaa2_switch_acl_tbl *new_acl_tbl; + struct dpaa2_switch_filter_block *new_block; int err; + /* Unoffload all the mirror entries found in the block from the + * port leaving it. + */ + err = dpaa2_switch_block_unoffload_mirror(block, port_priv); + if (err) + return err; + /* We are the last port that leaves a block (an ACL table). * We'll continue to use this table. */ - if (acl_tbl->ports == BIT(port_priv->idx)) + if (block->ports == BIT(port_priv->idx)) return 0; - err = dpaa2_switch_port_acl_tbl_unbind(port_priv, acl_tbl); + err = dpaa2_switch_port_acl_tbl_unbind(port_priv, block); if (err) return err; - if (acl_tbl->ports == 0) - acl_tbl->in_use = false; + if (block->ports == 0) + block->in_use = false; - new_acl_tbl = dpaa2_switch_acl_tbl_get_unused(ethsw); - new_acl_tbl->in_use = true; - return dpaa2_switch_port_acl_tbl_bind(port_priv, new_acl_tbl); + new_block = dpaa2_switch_filter_block_get_unused(ethsw); + new_block->in_use = true; + return dpaa2_switch_port_acl_tbl_bind(port_priv, new_block); } static int dpaa2_switch_setup_tc_block_bind(struct net_device *netdev, @@ -1277,7 +1297,7 @@ static int dpaa2_switch_setup_tc_block_bind(struct net_device *netdev, { struct ethsw_port_priv *port_priv = netdev_priv(netdev); struct ethsw_core *ethsw = port_priv->ethsw_data; - struct dpaa2_switch_acl_tbl *acl_tbl; + struct dpaa2_switch_filter_block *filter_block; struct flow_block_cb *block_cb; bool register_block = false; int err; @@ -1287,24 +1307,24 @@ static int dpaa2_switch_setup_tc_block_bind(struct net_device *netdev, ethsw); if (!block_cb) { - /* If the ACL table is not already known, then this port must - * be the first to join it. In this case, we can just continue - * to use our private table + /* If the filter block is not already known, then this port + * must be the first to join it. In this case, we can just + * continue to use our private table */ - acl_tbl = port_priv->acl_tbl; + filter_block = port_priv->filter_block; block_cb = flow_block_cb_alloc(dpaa2_switch_port_setup_tc_block_cb_ig, - ethsw, acl_tbl, NULL); + ethsw, filter_block, NULL); if (IS_ERR(block_cb)) return PTR_ERR(block_cb); register_block = true; } else { - acl_tbl = flow_block_cb_priv(block_cb); + filter_block = flow_block_cb_priv(block_cb); } flow_block_cb_incref(block_cb); - err = dpaa2_switch_port_block_bind(port_priv, acl_tbl); + err = dpaa2_switch_port_block_bind(port_priv, filter_block); if (err) goto err_block_bind; @@ -1327,7 +1347,7 @@ static void dpaa2_switch_setup_tc_block_unbind(struct net_device *netdev, { struct ethsw_port_priv *port_priv = netdev_priv(netdev); struct ethsw_core *ethsw = port_priv->ethsw_data; - struct dpaa2_switch_acl_tbl *acl_tbl; + struct dpaa2_switch_filter_block *filter_block; struct flow_block_cb *block_cb; int err; @@ -1337,8 +1357,8 @@ static void dpaa2_switch_setup_tc_block_unbind(struct net_device *netdev, if (!block_cb) return; - acl_tbl = flow_block_cb_priv(block_cb); - err = dpaa2_switch_port_block_unbind(port_priv, acl_tbl); + filter_block = flow_block_cb_priv(block_cb); + err = dpaa2_switch_port_block_unbind(port_priv, filter_block); if (!err && !flow_block_cb_decref(block_cb)) { flow_block_cb_remove(block_cb, f); list_del(&block_cb->driver_list); @@ -1403,41 +1423,105 @@ bool dpaa2_switch_port_dev_check(const struct net_device *netdev) return netdev->netdev_ops == &dpaa2_switch_port_ops; } -static void dpaa2_switch_links_state_update(struct ethsw_core *ethsw) +static int dpaa2_switch_port_connect_mac(struct ethsw_port_priv *port_priv) { - int i; + struct fsl_mc_device *dpsw_port_dev, *dpmac_dev; + struct dpaa2_mac *mac; + int err; - for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { - dpaa2_switch_port_carrier_state_sync(ethsw->ports[i]->netdev); - dpaa2_switch_port_set_mac_addr(ethsw->ports[i]); + dpsw_port_dev = to_fsl_mc_device(port_priv->netdev->dev.parent); + dpmac_dev = fsl_mc_get_endpoint(dpsw_port_dev, port_priv->idx); + + if (PTR_ERR(dpmac_dev) == -EPROBE_DEFER) + return PTR_ERR(dpmac_dev); + + if (IS_ERR(dpmac_dev) || dpmac_dev->dev.type != &fsl_mc_bus_dpmac_type) + return 0; + + mac = kzalloc(sizeof(*mac), GFP_KERNEL); + if (!mac) + return -ENOMEM; + + mac->mc_dev = dpmac_dev; + mac->mc_io = port_priv->ethsw_data->mc_io; + mac->net_dev = port_priv->netdev; + + err = dpaa2_mac_open(mac); + if (err) + goto err_free_mac; + port_priv->mac = mac; + + if (dpaa2_switch_port_is_type_phy(port_priv)) { + err = dpaa2_mac_connect(mac); + if (err) { + netdev_err(port_priv->netdev, + "Error connecting to the MAC endpoint %pe\n", + ERR_PTR(err)); + goto err_close_mac; + } } + + return 0; + +err_close_mac: + dpaa2_mac_close(mac); + port_priv->mac = NULL; +err_free_mac: + kfree(mac); + return err; +} + +static void dpaa2_switch_port_disconnect_mac(struct ethsw_port_priv *port_priv) +{ + if (dpaa2_switch_port_is_type_phy(port_priv)) + dpaa2_mac_disconnect(port_priv->mac); + + if (!dpaa2_switch_port_has_mac(port_priv)) + return; + + dpaa2_mac_close(port_priv->mac); + kfree(port_priv->mac); + port_priv->mac = NULL; } static irqreturn_t dpaa2_switch_irq0_handler_thread(int irq_num, void *arg) { struct device *dev = (struct device *)arg; struct ethsw_core *ethsw = dev_get_drvdata(dev); - - /* Mask the events and the if_id reserved bits to be cleared on read */ - u32 status = DPSW_IRQ_EVENT_LINK_CHANGED | 0xFFFF0000; - int err; + struct ethsw_port_priv *port_priv; + u32 status = ~0; + int err, if_id; err = dpsw_get_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle, DPSW_IRQ_INDEX_IF, &status); if (err) { dev_err(dev, "Can't get irq status (err %d)\n", err); - - err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle, - DPSW_IRQ_INDEX_IF, 0xFFFFFFFF); - if (err) - dev_err(dev, "Can't clear irq status (err %d)\n", err); goto out; } - if (status & DPSW_IRQ_EVENT_LINK_CHANGED) - dpaa2_switch_links_state_update(ethsw); + if_id = (status & 0xFFFF0000) >> 16; + port_priv = ethsw->ports[if_id]; + + if (status & DPSW_IRQ_EVENT_LINK_CHANGED) { + dpaa2_switch_port_link_state_update(port_priv->netdev); + dpaa2_switch_port_set_mac_addr(port_priv); + } + + if (status & DPSW_IRQ_EVENT_ENDPOINT_CHANGED) { + rtnl_lock(); + if (dpaa2_switch_port_has_mac(port_priv)) + dpaa2_switch_port_disconnect_mac(port_priv); + else + dpaa2_switch_port_connect_mac(port_priv); + rtnl_unlock(); + } out: + err = dpsw_clear_irq_status(ethsw->mc_io, 0, ethsw->dpsw_handle, + DPSW_IRQ_INDEX_IF, status); + if (err) + dev_err(dev, "Can't clear irq status (err %d)\n", err); + return IRQ_HANDLED; } @@ -1889,8 +1973,12 @@ static int dpaa2_switch_port_attr_set_event(struct net_device *netdev, return notifier_from_errno(err); } +static struct notifier_block dpaa2_switch_port_switchdev_nb; +static struct notifier_block dpaa2_switch_port_switchdev_blocking_nb; + static int dpaa2_switch_port_bridge_join(struct net_device *netdev, - struct net_device *upper_dev) + struct net_device *upper_dev, + struct netlink_ext_ack *extack) { struct ethsw_port_priv *port_priv = netdev_priv(netdev); struct ethsw_core *ethsw = port_priv->ethsw_data; @@ -1906,8 +1994,8 @@ static int dpaa2_switch_port_bridge_join(struct net_device *netdev, other_port_priv = netdev_priv(other_dev); if (other_port_priv->ethsw_data != port_priv->ethsw_data) { - netdev_err(netdev, - "Interface from a different DPSW is in the bridge already!\n"); + NL_SET_ERR_MSG_MOD(extack, + "Interface from a different DPSW is in the bridge already"); return -EINVAL; } } @@ -1929,8 +2017,16 @@ static int dpaa2_switch_port_bridge_join(struct net_device *netdev, if (err) goto err_egress_flood; + err = switchdev_bridge_port_offload(netdev, netdev, NULL, + &dpaa2_switch_port_switchdev_nb, + &dpaa2_switch_port_switchdev_blocking_nb, + false, extack); + if (err) + goto err_switchdev_offload; + return 0; +err_switchdev_offload: err_egress_flood: dpaa2_switch_port_set_fdb(port_priv, NULL); return err; @@ -1956,6 +2052,13 @@ static int dpaa2_switch_port_restore_rxvlan(struct net_device *vdev, int vid, vo return dpaa2_switch_port_vlan_add(arg, vlan_proto, vid); } +static void dpaa2_switch_port_pre_bridge_leave(struct net_device *netdev) +{ + switchdev_bridge_port_unoffload(netdev, NULL, + &dpaa2_switch_port_switchdev_nb, + &dpaa2_switch_port_switchdev_blocking_nb); +} + static int dpaa2_switch_port_bridge_leave(struct net_device *netdev) { struct ethsw_port_priv *port_priv = netdev_priv(netdev); @@ -2029,6 +2132,28 @@ static int dpaa2_switch_prevent_bridging_with_8021q_upper(struct net_device *net return 0; } +static int +dpaa2_switch_prechangeupper_sanity_checks(struct net_device *netdev, + struct net_device *upper_dev, + struct netlink_ext_ack *extack) +{ + int err; + + if (!br_vlan_enabled(upper_dev)) { + NL_SET_ERR_MSG_MOD(extack, "Cannot join a VLAN-unaware bridge"); + return -EOPNOTSUPP; + } + + err = dpaa2_switch_prevent_bridging_with_8021q_upper(netdev); + if (err) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot join a bridge while VLAN uppers are present"); + return 0; + } + + return 0; +} + static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb, unsigned long event, void *ptr) { @@ -2049,25 +2174,23 @@ static int dpaa2_switch_port_netdevice_event(struct notifier_block *nb, if (!netif_is_bridge_master(upper_dev)) break; - if (!br_vlan_enabled(upper_dev)) { - NL_SET_ERR_MSG_MOD(extack, "Cannot join a VLAN-unaware bridge"); - err = -EOPNOTSUPP; + err = dpaa2_switch_prechangeupper_sanity_checks(netdev, + upper_dev, + extack); + if (err) goto out; - } - err = dpaa2_switch_prevent_bridging_with_8021q_upper(netdev); - if (err) { - NL_SET_ERR_MSG_MOD(extack, - "Cannot join a bridge while VLAN uppers are present"); - goto out; - } + if (!info->linking) + dpaa2_switch_port_pre_bridge_leave(netdev); break; case NETDEV_CHANGEUPPER: upper_dev = info->upper_dev; if (netif_is_bridge_master(upper_dev)) { if (info->linking) - err = dpaa2_switch_port_bridge_join(netdev, upper_dev); + err = dpaa2_switch_port_bridge_join(netdev, + upper_dev, + extack); else err = dpaa2_switch_port_bridge_leave(netdev); } @@ -2802,6 +2925,18 @@ err_free_dpbp: return err; } +static void dpaa2_switch_remove_port(struct ethsw_core *ethsw, + u16 port_idx) +{ + struct ethsw_port_priv *port_priv = ethsw->ports[port_idx]; + + rtnl_lock(); + dpaa2_switch_port_disconnect_mac(port_priv); + rtnl_unlock(); + free_netdev(port_priv->netdev); + ethsw->ports[port_idx] = NULL; +} + static int dpaa2_switch_init(struct fsl_mc_device *sw_dev) { struct device *dev = &sw_dev->dev; @@ -2952,7 +3087,7 @@ static int dpaa2_switch_port_trap_mac_addr(struct ethsw_port_priv *port_priv, acl_entry.cfg.precedence = 0; acl_entry.cfg.result.action = DPSW_ACL_ACTION_REDIRECT_TO_CTRL_IF; - return dpaa2_switch_acl_entry_add(port_priv->acl_tbl, &acl_entry); + return dpaa2_switch_acl_entry_add(port_priv->filter_block, &acl_entry); } static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port) @@ -2965,7 +3100,7 @@ static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port) }; struct net_device *netdev = port_priv->netdev; struct ethsw_core *ethsw = port_priv->ethsw_data; - struct dpaa2_switch_acl_tbl *acl_tbl; + struct dpaa2_switch_filter_block *filter_block; struct dpsw_fdb_cfg fdb_cfg = {0}; struct dpsw_if_attr dpsw_if_attr; struct dpaa2_switch_fdb *fdb; @@ -3020,14 +3155,15 @@ static int dpaa2_switch_port_init(struct ethsw_port_priv *port_priv, u16 port) return err; } - acl_tbl = dpaa2_switch_acl_tbl_get_unused(ethsw); - acl_tbl->ethsw = ethsw; - acl_tbl->id = acl_tbl_id; - acl_tbl->in_use = true; - acl_tbl->num_rules = 0; - INIT_LIST_HEAD(&acl_tbl->entries); + filter_block = dpaa2_switch_filter_block_get_unused(ethsw); + filter_block->ethsw = ethsw; + filter_block->acl_id = acl_tbl_id; + filter_block->in_use = true; + filter_block->num_acl_rules = 0; + INIT_LIST_HEAD(&filter_block->acl_entries); + INIT_LIST_HEAD(&filter_block->mirror_entries); - err = dpaa2_switch_port_acl_tbl_bind(port_priv, acl_tbl); + err = dpaa2_switch_port_acl_tbl_bind(port_priv, filter_block); if (err) return err; @@ -3079,11 +3215,11 @@ static int dpaa2_switch_remove(struct fsl_mc_device *sw_dev) for (i = 0; i < ethsw->sw_attr.num_ifs; i++) { port_priv = ethsw->ports[i]; unregister_netdev(port_priv->netdev); - free_netdev(port_priv->netdev); + dpaa2_switch_remove_port(ethsw, i); } kfree(ethsw->fdbs); - kfree(ethsw->acls); + kfree(ethsw->filter_blocks); kfree(ethsw->ports); dpaa2_switch_teardown(sw_dev); @@ -3156,6 +3292,10 @@ static int dpaa2_switch_probe_port(struct ethsw_core *ethsw, goto err_port_probe; port_priv->learn_ena = false; + err = dpaa2_switch_port_connect_mac(port_priv); + if (err) + goto err_port_probe; + return 0; err_port_probe: @@ -3209,9 +3349,10 @@ static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev) goto err_free_ports; } - ethsw->acls = kcalloc(ethsw->sw_attr.num_ifs, sizeof(*ethsw->acls), - GFP_KERNEL); - if (!ethsw->acls) { + ethsw->filter_blocks = kcalloc(ethsw->sw_attr.num_ifs, + sizeof(*ethsw->filter_blocks), + GFP_KERNEL); + if (!ethsw->filter_blocks) { err = -ENOMEM; goto err_free_fdbs; } @@ -3231,17 +3372,16 @@ static int dpaa2_switch_probe(struct fsl_mc_device *sw_dev) ðsw->fq[i].napi, dpaa2_switch_poll, NAPI_POLL_WEIGHT); - err = dpsw_enable(ethsw->mc_io, 0, ethsw->dpsw_handle); - if (err) { - dev_err(ethsw->dev, "dpsw_enable err %d\n", err); - goto err_free_netdev; - } - /* Setup IRQs */ err = dpaa2_switch_setup_irqs(sw_dev); if (err) goto err_stop; + /* By convention, if the mirror port is equal to the number of switch + * interfaces, then mirroring of any kind is disabled. + */ + ethsw->mirror_port = ethsw->sw_attr.num_ifs; + /* Register the netdev only when the entire setup is done and the * switch port interfaces are ready to receive traffic */ @@ -3263,8 +3403,8 @@ err_stop: dpsw_disable(ethsw->mc_io, 0, ethsw->dpsw_handle); err_free_netdev: for (i--; i >= 0; i--) - free_netdev(ethsw->ports[i]->netdev); - kfree(ethsw->acls); + dpaa2_switch_remove_port(ethsw, i); + kfree(ethsw->filter_blocks); err_free_fdbs: kfree(ethsw->fdbs); err_free_ports: diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h index bdef71f234cb..0002dca4d417 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.h @@ -21,6 +21,7 @@ #include <net/pkt_cls.h> #include <soc/fsl/dpaa2-io.h> +#include "dpaa2-mac.h" #include "dpsw.h" /* Number of IRQs supported */ @@ -113,20 +114,29 @@ struct dpaa2_switch_acl_entry { struct dpsw_acl_key key; }; -struct dpaa2_switch_acl_tbl { - struct list_head entries; +struct dpaa2_switch_mirror_entry { + struct list_head list; + struct dpsw_reflection_cfg cfg; + unsigned long cookie; + u16 if_id; +}; + +struct dpaa2_switch_filter_block { struct ethsw_core *ethsw; u64 ports; - - u16 id; - u8 num_rules; bool in_use; + + struct list_head acl_entries; + u16 acl_id; + u8 num_acl_rules; + + struct list_head mirror_entries; }; static inline bool -dpaa2_switch_acl_tbl_is_full(struct dpaa2_switch_acl_tbl *acl_tbl) +dpaa2_switch_acl_tbl_is_full(struct dpaa2_switch_filter_block *filter_block) { - if ((acl_tbl->num_rules + DPAA2_ETHSW_PORT_DEFAULT_TRAPS) >= + if ((filter_block->num_acl_rules + DPAA2_ETHSW_PORT_DEFAULT_TRAPS) >= DPAA2_ETHSW_PORT_MAX_ACL_ENTRIES) return true; return false; @@ -149,7 +159,8 @@ struct ethsw_port_priv { bool ucast_flood; bool learn_ena; - struct dpaa2_switch_acl_tbl *acl_tbl; + struct dpaa2_switch_filter_block *filter_block; + struct dpaa2_mac *mac; }; /* Switch data */ @@ -175,7 +186,8 @@ struct ethsw_core { int napi_users; struct dpaa2_switch_fdb *fdbs; - struct dpaa2_switch_acl_tbl *acls; + struct dpaa2_switch_filter_block *filter_blocks; + u16 mirror_port; }; static inline int dpaa2_switch_get_index(struct ethsw_core *ethsw, @@ -215,6 +227,22 @@ static inline bool dpaa2_switch_supports_cpu_traffic(struct ethsw_core *ethsw) return true; } +static inline bool +dpaa2_switch_port_is_type_phy(struct ethsw_port_priv *port_priv) +{ + if (port_priv->mac && + (port_priv->mac->attr.link_type == DPMAC_LINK_TYPE_PHY || + port_priv->mac->attr.link_type == DPMAC_LINK_TYPE_BACKPLANE)) + return true; + + return false; +} + +static inline bool dpaa2_switch_port_has_mac(struct ethsw_port_priv *port_priv) +{ + return port_priv->mac ? true : false; +} + bool dpaa2_switch_port_dev_check(const struct net_device *netdev); int dpaa2_switch_port_vlans_add(struct net_device *netdev, @@ -229,18 +257,24 @@ typedef int dpaa2_switch_fdb_cb_t(struct ethsw_port_priv *port_priv, /* TC offload */ -int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_acl_tbl *acl_tbl, +int dpaa2_switch_cls_flower_replace(struct dpaa2_switch_filter_block *block, struct flow_cls_offload *cls); -int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_acl_tbl *acl_tbl, +int dpaa2_switch_cls_flower_destroy(struct dpaa2_switch_filter_block *block, struct flow_cls_offload *cls); -int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_acl_tbl *acl_tbl, +int dpaa2_switch_cls_matchall_replace(struct dpaa2_switch_filter_block *block, struct tc_cls_matchall_offload *cls); -int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_acl_tbl *acl_tbl, +int dpaa2_switch_cls_matchall_destroy(struct dpaa2_switch_filter_block *block, struct tc_cls_matchall_offload *cls); -int dpaa2_switch_acl_entry_add(struct dpaa2_switch_acl_tbl *acl_tbl, +int dpaa2_switch_acl_entry_add(struct dpaa2_switch_filter_block *block, struct dpaa2_switch_acl_entry *entry); + +int dpaa2_switch_block_offload_mirror(struct dpaa2_switch_filter_block *block, + struct ethsw_port_priv *port_priv); + +int dpaa2_switch_block_unoffload_mirror(struct dpaa2_switch_filter_block *block, + struct ethsw_port_priv *port_priv); #endif /* __ETHSW_H */ diff --git a/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h index cb13e740f72b..397d55f2bd99 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpsw-cmd.h @@ -39,11 +39,16 @@ #define DPSW_CMDID_GET_IRQ_STATUS DPSW_CMD_ID(0x016) #define DPSW_CMDID_CLEAR_IRQ_STATUS DPSW_CMD_ID(0x017) +#define DPSW_CMDID_SET_REFLECTION_IF DPSW_CMD_ID(0x022) + #define DPSW_CMDID_IF_SET_TCI DPSW_CMD_ID(0x030) #define DPSW_CMDID_IF_SET_STP DPSW_CMD_ID(0x031) #define DPSW_CMDID_IF_GET_COUNTER DPSW_CMD_V2(0x034) +#define DPSW_CMDID_IF_ADD_REFLECTION DPSW_CMD_ID(0x037) +#define DPSW_CMDID_IF_REMOVE_REFLECTION DPSW_CMD_ID(0x038) + #define DPSW_CMDID_IF_ENABLE DPSW_CMD_ID(0x03D) #define DPSW_CMDID_IF_DISABLE DPSW_CMD_ID(0x03E) @@ -533,5 +538,19 @@ struct dpsw_cmd_acl_entry { __le64 pad2[4]; __le64 key_iova; }; + +struct dpsw_cmd_set_reflection_if { + __le16 if_id; +}; + +#define DPSW_FILTER_SHIFT 0 +#define DPSW_FILTER_SIZE 2 + +struct dpsw_cmd_if_reflection { + __le16 if_id; + __le16 vlan_id; + /* only 2 bits from the LSB */ + u8 filter; +}; #pragma pack(pop) #endif /* __FSL_DPSW_CMD_H */ diff --git a/drivers/net/ethernet/freescale/dpaa2/dpsw.c b/drivers/net/ethernet/freescale/dpaa2/dpsw.c index 6352d6d1ecba..ab921d75deb2 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpsw.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpsw.c @@ -1579,3 +1579,83 @@ int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, /* send command to mc*/ return mc_send_command(mc_io, &cmd); } + +/** + * dpsw_set_reflection_if() - Set target interface for traffic mirrored + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: Interface Id + * + * Only one mirroring destination is allowed per switch + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_set_reflection_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 if_id) +{ + struct dpsw_cmd_set_reflection_if *cmd_params; + struct fsl_mc_command cmd = { 0 }; + + cmd.header = mc_encode_cmd_header(DPSW_CMDID_SET_REFLECTION_IF, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_set_reflection_if *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_if_add_reflection() - Setup mirroring rule + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: Interface Identifier + * @cfg: Reflection configuration + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_if_add_reflection(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 if_id, const struct dpsw_reflection_cfg *cfg) +{ + struct dpsw_cmd_if_reflection *cmd_params; + struct fsl_mc_command cmd = { 0 }; + + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_ADD_REFLECTION, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_if_reflection *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id); + dpsw_set_field(cmd_params->filter, FILTER, cfg->filter); + + return mc_send_command(mc_io, &cmd); +} + +/** + * dpsw_if_remove_reflection() - Remove mirroring rule + * @mc_io: Pointer to MC portal's I/O object + * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' + * @token: Token of DPSW object + * @if_id: Interface Identifier + * @cfg: Reflection configuration + * + * Return: Completion status. '0' on Success; Error code otherwise. + */ +int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 if_id, const struct dpsw_reflection_cfg *cfg) +{ + struct dpsw_cmd_if_reflection *cmd_params; + struct fsl_mc_command cmd = { 0 }; + + cmd.header = mc_encode_cmd_header(DPSW_CMDID_IF_REMOVE_REFLECTION, + cmd_flags, + token); + cmd_params = (struct dpsw_cmd_if_reflection *)cmd.params; + cmd_params->if_id = cpu_to_le16(if_id); + cmd_params->vlan_id = cpu_to_le16(cfg->vlan_id); + dpsw_set_field(cmd_params->filter, FILTER, cfg->filter); + + return mc_send_command(mc_io, &cmd); +} diff --git a/drivers/net/ethernet/freescale/dpaa2/dpsw.h b/drivers/net/ethernet/freescale/dpaa2/dpsw.h index 5ef221a25b02..b90bd363f47a 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpsw.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpsw.h @@ -99,6 +99,11 @@ int dpsw_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); #define DPSW_IRQ_EVENT_LINK_CHANGED 0x0001 /** + * DPSW_IRQ_EVENT_ENDPOINT_CHANGED - Indicates a change in endpoint + */ +#define DPSW_IRQ_EVENT_ENDPOINT_CHANGED 0x0002 + +/** * struct dpsw_irq_cfg - IRQ configuration * @addr: Address that must be written to signal a message-based interrupt * @val: Value to write into irq_addr address @@ -752,4 +757,35 @@ int dpsw_acl_add_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, int dpsw_acl_remove_entry(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u16 acl_id, const struct dpsw_acl_entry_cfg *cfg); + +/** + * enum dpsw_reflection_filter - Filter type for frames to be reflected + * @DPSW_REFLECTION_FILTER_INGRESS_ALL: Reflect all frames + * @DPSW_REFLECTION_FILTER_INGRESS_VLAN: Reflect only frames that belong to + * the particular VLAN defined by vid parameter + * + */ +enum dpsw_reflection_filter { + DPSW_REFLECTION_FILTER_INGRESS_ALL = 0, + DPSW_REFLECTION_FILTER_INGRESS_VLAN = 1 +}; + +/** + * struct dpsw_reflection_cfg - Structure representing the mirroring config + * @filter: Filter type for frames to be mirrored + * @vlan_id: VLAN ID to mirror; valid only when the type is DPSW_INGRESS_VLAN + */ +struct dpsw_reflection_cfg { + enum dpsw_reflection_filter filter; + u16 vlan_id; +}; + +int dpsw_set_reflection_if(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 if_id); + +int dpsw_if_add_reflection(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 if_id, const struct dpsw_reflection_cfg *cfg); + +int dpsw_if_remove_reflection(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, + u16 if_id, const struct dpsw_reflection_cfg *cfg); #endif /* __FSL_DPSW_H */ diff --git a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c index ebccaf02411c..9690e36e9e85 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_ethtool.c @@ -585,7 +585,9 @@ static void enetc_get_ringparam(struct net_device *ndev, } static int enetc_get_coalesce(struct net_device *ndev, - struct ethtool_coalesce *ic) + struct ethtool_coalesce *ic, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct enetc_ndev_priv *priv = netdev_priv(ndev); struct enetc_int_vector *v = priv->int_vector[0]; @@ -602,7 +604,9 @@ static int enetc_get_coalesce(struct net_device *ndev, } static int enetc_set_coalesce(struct net_device *ndev, - struct ethtool_coalesce *ic) + struct ethtool_coalesce *ic, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct enetc_ndev_priv *priv = netdev_priv(ndev); u32 rx_ictt, tx_ictt; diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index c84f6c226743..60d94e0a07d6 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -735,7 +735,7 @@ static const struct net_device_ops enetc_ndev_ops = { .ndo_set_vf_vlan = enetc_pf_set_vf_vlan, .ndo_set_vf_spoofchk = enetc_pf_set_vf_spoofchk, .ndo_set_features = enetc_pf_set_features, - .ndo_do_ioctl = enetc_ioctl, + .ndo_eth_ioctl = enetc_ioctl, .ndo_setup_tc = enetc_setup_tc, .ndo_bpf = enetc_setup_bpf, .ndo_xdp_xmit = enetc_xdp_xmit, diff --git a/drivers/net/ethernet/freescale/enetc/enetc_vf.c b/drivers/net/ethernet/freescale/enetc/enetc_vf.c index 03090ba7e226..1a9d1e8b772c 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_vf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_vf.c @@ -99,7 +99,7 @@ static const struct net_device_ops enetc_ndev_ops = { .ndo_get_stats = enetc_get_stats, .ndo_set_mac_address = enetc_vf_set_mac_addr, .ndo_set_features = enetc_vf_set_features, - .ndo_do_ioctl = enetc_ioctl, + .ndo_eth_ioctl = enetc_ioctl, .ndo_setup_tc = enetc_setup_tc, }; diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 2e002e4b4b4a..7b4961daa254 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -77,6 +77,8 @@ #define FEC_R_DES_ACTIVE_2 0x1e8 /* Rx descriptor active for ring 2 */ #define FEC_X_DES_ACTIVE_2 0x1ec /* Tx descriptor active for ring 2 */ #define FEC_QOS_SCHEME 0x1f0 /* Set multi queues Qos scheme */ +#define FEC_LPI_SLEEP 0x1f4 /* Set IEEE802.3az LPI Sleep Ts time */ +#define FEC_LPI_WAKE 0x1f8 /* Set IEEE802.3az LPI Wake Tw time */ #define FEC_MIIGSK_CFGR 0x300 /* MIIGSK Configuration reg */ #define FEC_MIIGSK_ENR 0x308 /* MIIGSK Enable reg */ @@ -187,6 +189,8 @@ #define FEC_RXIC0 0xfff #define FEC_RXIC1 0xfff #define FEC_RXIC2 0xfff +#define FEC_LPI_SLEEP 0xfff +#define FEC_LPI_WAKE 0xfff #endif /* CONFIG_M5272 */ @@ -379,6 +383,9 @@ struct bufdesc_ex { #define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF) #define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF)) +#define FEC_ENET_TXC_DLY ((uint)0x00010000) +#define FEC_ENET_RXC_DLY ((uint)0x00020000) + /* ENET interrupt coalescing macro define */ #define FEC_ITR_CLK_SEL (0x1 << 30) #define FEC_ITR_EN (0x1 << 31) @@ -472,6 +479,22 @@ struct bufdesc_ex { */ #define FEC_QUIRK_HAS_MULTI_QUEUES (1 << 19) +/* i.MX8MQ ENET IP version add new feature to support IEEE 802.3az EEE + * standard. For the transmission, MAC supply two user registers to set + * Sleep (TS) and Wake (TW) time. + */ +#define FEC_QUIRK_HAS_EEE (1 << 20) + +/* i.MX8QM ENET IP version add new feture to generate delayed TXC/RXC + * as an alternative option to make sure it works well with various PHYs. + * For the implementation of delayed clock, ENET takes synchronized 250MHz + * clocks to generate 2ns delay. + */ +#define FEC_QUIRK_DELAYED_CLKS_SUPPORT (1 << 21) + +/* i.MX8MQ SoC integration mix wakeup interrupt signal into "int2" interrupt line. */ +#define FEC_QUIRK_WAKEUP_FROM_INT2 (1 << 22) + struct bufdesc_prop { int qid; /* Address of Rx and Tx buffers */ @@ -528,6 +551,7 @@ struct fec_enet_private { struct clk *clk_ref; struct clk *clk_enet_out; struct clk *clk_ptp; + struct clk *clk_2x_txclk; bool ptp_clk_on; struct mutex ptp_clk_mutex; @@ -550,6 +574,8 @@ struct fec_enet_private { uint phy_speed; phy_interface_t phy_interface; struct device_node *phy_node; + bool rgmii_txc_dly; + bool rgmii_rxc_dly; int link; int full_duplex; int speed; @@ -557,6 +583,7 @@ struct fec_enet_private { bool bufdesc_ex; int pause_flag; int wol_flag; + int wake_irq; u32 quirks; struct napi_struct napi; @@ -589,6 +616,10 @@ struct fec_enet_private { unsigned int tx_time_itr; unsigned int itr_clk_rate; + /* tx lpi eee mode */ + struct ethtool_eee eee; + unsigned int clk_ref_rate; + u32 rx_copybreak; /* ptp clock period in ns*/ diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 7e4c4980ced7..80bd5c629fa0 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -135,6 +135,26 @@ static const struct fec_devinfo fec_imx6ul_info = { FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII, }; +static const struct fec_devinfo fec_imx8mq_info = { + .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | + FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | + FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE | + FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES | + FEC_QUIRK_HAS_EEE | FEC_QUIRK_WAKEUP_FROM_INT2, +}; + +static const struct fec_devinfo fec_imx8qm_info = { + .quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | + FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB | + FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE | + FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE | + FEC_QUIRK_CLEAR_SETUP_MII | FEC_QUIRK_HAS_MULTI_QUEUES | + FEC_QUIRK_DELAYED_CLKS_SUPPORT, +}; + static struct platform_device_id fec_devtype[] = { { /* keep it for coldfire */ @@ -162,6 +182,12 @@ static struct platform_device_id fec_devtype[] = { .name = "imx6ul-fec", .driver_data = (kernel_ulong_t)&fec_imx6ul_info, }, { + .name = "imx8mq-fec", + .driver_data = (kernel_ulong_t)&fec_imx8mq_info, + }, { + .name = "imx8qm-fec", + .driver_data = (kernel_ulong_t)&fec_imx8qm_info, + }, { /* sentinel */ } }; @@ -175,6 +201,8 @@ enum imx_fec_type { MVF600_FEC, IMX6SX_FEC, IMX6UL_FEC, + IMX8MQ_FEC, + IMX8QM_FEC, }; static const struct of_device_id fec_dt_ids[] = { @@ -185,6 +213,8 @@ static const struct of_device_id fec_dt_ids[] = { { .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], }, { .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], }, { .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], }, + { .compatible = "fsl,imx8mq-fec", .data = &fec_devtype[IMX8MQ_FEC], }, + { .compatible = "fsl,imx8qm-fec", .data = &fec_devtype[IMX8QM_FEC], }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, fec_dt_ids); @@ -1107,6 +1137,13 @@ fec_restart(struct net_device *ndev) if (fep->bufdesc_ex) ecntl |= (1 << 4); + if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT && + fep->rgmii_txc_dly) + ecntl |= FEC_ENET_TXC_DLY; + if (fep->quirks & FEC_QUIRK_DELAYED_CLKS_SUPPORT && + fep->rgmii_rxc_dly) + ecntl |= FEC_ENET_RXC_DLY; + #ifndef CONFIG_M5272 /* Enable the MIB statistic event counters */ writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT); @@ -1970,6 +2007,10 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) if (ret) goto failed_clk_ref; + ret = clk_prepare_enable(fep->clk_2x_txclk); + if (ret) + goto failed_clk_2x_txclk; + fec_enet_phy_reset_after_clk_enable(ndev); } else { clk_disable_unprepare(fep->clk_enet_out); @@ -1980,10 +2021,14 @@ static int fec_enet_clk_enable(struct net_device *ndev, bool enable) mutex_unlock(&fep->ptp_clk_mutex); } clk_disable_unprepare(fep->clk_ref); + clk_disable_unprepare(fep->clk_2x_txclk); } return 0; +failed_clk_2x_txclk: + if (fep->clk_ref) + clk_disable_unprepare(fep->clk_ref); failed_clk_ref: if (fep->clk_ptp) { mutex_lock(&fep->ptp_clk_mutex); @@ -1997,6 +2042,34 @@ failed_clk_ptp: return ret; } +static int fec_enet_parse_rgmii_delay(struct fec_enet_private *fep, + struct device_node *np) +{ + u32 rgmii_tx_delay, rgmii_rx_delay; + + /* For rgmii tx internal delay, valid values are 0ps and 2000ps */ + if (!of_property_read_u32(np, "tx-internal-delay-ps", &rgmii_tx_delay)) { + if (rgmii_tx_delay != 0 && rgmii_tx_delay != 2000) { + dev_err(&fep->pdev->dev, "The only allowed RGMII TX delay values are: 0ps, 2000ps"); + return -EINVAL; + } else if (rgmii_tx_delay == 2000) { + fep->rgmii_txc_dly = true; + } + } + + /* For rgmii rx internal delay, valid values are 0ps and 2000ps */ + if (!of_property_read_u32(np, "rx-internal-delay-ps", &rgmii_rx_delay)) { + if (rgmii_rx_delay != 0 && rgmii_rx_delay != 2000) { + dev_err(&fep->pdev->dev, "The only allowed RGMII RX delay values are: 0ps, 2000ps"); + return -EINVAL; + } else if (rgmii_rx_delay == 2000) { + fep->rgmii_rxc_dly = true; + } + } + + return 0; +} + static int fec_enet_mii_probe(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); @@ -2581,8 +2654,10 @@ static void fec_enet_itr_coal_set(struct net_device *ndev) } } -static int -fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) +static int fec_enet_get_coalesce(struct net_device *ndev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct fec_enet_private *fep = netdev_priv(ndev); @@ -2598,8 +2673,10 @@ fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) return 0; } -static int -fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec) +static int fec_enet_set_coalesce(struct net_device *ndev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct fec_enet_private *fep = netdev_priv(ndev); struct device *dev = &fep->pdev->dev; @@ -2651,7 +2728,7 @@ static void fec_enet_itr_coal_init(struct net_device *ndev) ec.tx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT; ec.tx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT; - fec_enet_set_coalesce(ndev, &ec); + fec_enet_set_coalesce(ndev, &ec, NULL, NULL); } static int fec_enet_get_tunable(struct net_device *netdev, @@ -2692,6 +2769,92 @@ static int fec_enet_set_tunable(struct net_device *netdev, return ret; } +/* LPI Sleep Ts count base on tx clk (clk_ref). + * The lpi sleep cnt value = X us / (cycle_ns). + */ +static int fec_enet_us_to_tx_cycle(struct net_device *ndev, int us) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + return us * (fep->clk_ref_rate / 1000) / 1000; +} + +static int fec_enet_eee_mode_set(struct net_device *ndev, bool enable) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct ethtool_eee *p = &fep->eee; + unsigned int sleep_cycle, wake_cycle; + int ret = 0; + + if (enable) { + ret = phy_init_eee(ndev->phydev, 0); + if (ret) + return ret; + + sleep_cycle = fec_enet_us_to_tx_cycle(ndev, p->tx_lpi_timer); + wake_cycle = sleep_cycle; + } else { + sleep_cycle = 0; + wake_cycle = 0; + } + + p->tx_lpi_enabled = enable; + p->eee_enabled = enable; + p->eee_active = enable; + + writel(sleep_cycle, fep->hwp + FEC_LPI_SLEEP); + writel(wake_cycle, fep->hwp + FEC_LPI_WAKE); + + return 0; +} + +static int +fec_enet_get_eee(struct net_device *ndev, struct ethtool_eee *edata) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct ethtool_eee *p = &fep->eee; + + if (!(fep->quirks & FEC_QUIRK_HAS_EEE)) + return -EOPNOTSUPP; + + if (!netif_running(ndev)) + return -ENETDOWN; + + edata->eee_enabled = p->eee_enabled; + edata->eee_active = p->eee_active; + edata->tx_lpi_timer = p->tx_lpi_timer; + edata->tx_lpi_enabled = p->tx_lpi_enabled; + + return phy_ethtool_get_eee(ndev->phydev, edata); +} + +static int +fec_enet_set_eee(struct net_device *ndev, struct ethtool_eee *edata) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct ethtool_eee *p = &fep->eee; + int ret = 0; + + if (!(fep->quirks & FEC_QUIRK_HAS_EEE)) + return -EOPNOTSUPP; + + if (!netif_running(ndev)) + return -ENETDOWN; + + p->tx_lpi_timer = edata->tx_lpi_timer; + + if (!edata->eee_enabled || !edata->tx_lpi_enabled || + !edata->tx_lpi_timer) + ret = fec_enet_eee_mode_set(ndev, false); + else + ret = fec_enet_eee_mode_set(ndev, true); + + if (ret) + return ret; + + return phy_ethtool_set_eee(ndev->phydev, edata); +} + static void fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) { @@ -2719,12 +2882,12 @@ fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol) device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC); if (device_may_wakeup(&ndev->dev)) { fep->wol_flag |= FEC_WOL_FLAG_ENABLE; - if (fep->irq[0] > 0) - enable_irq_wake(fep->irq[0]); + if (fep->wake_irq > 0) + enable_irq_wake(fep->wake_irq); } else { fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE); - if (fep->irq[0] > 0) - disable_irq_wake(fep->irq[0]); + if (fep->wake_irq > 0) + disable_irq_wake(fep->wake_irq); } return 0; @@ -2752,6 +2915,8 @@ static const struct ethtool_ops fec_enet_ethtool_ops = { .set_tunable = fec_enet_set_tunable, .get_wol = fec_enet_get_wol, .set_wol = fec_enet_set_wol, + .get_eee = fec_enet_get_eee, + .set_eee = fec_enet_set_eee, .get_link_ksettings = phy_ethtool_get_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings, .self_test = net_selftest, @@ -3280,7 +3445,7 @@ static const struct net_device_ops fec_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = fec_timeout, .ndo_set_mac_address = fec_set_mac_address, - .ndo_do_ioctl = fec_enet_ioctl, + .ndo_eth_ioctl = fec_enet_ioctl, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = fec_poll_controller, #endif @@ -3535,6 +3700,17 @@ static int fec_enet_get_irq_cnt(struct platform_device *pdev) return irq_cnt; } +static void fec_enet_get_wakeup_irq(struct platform_device *pdev) +{ + struct net_device *ndev = platform_get_drvdata(pdev); + struct fec_enet_private *fep = netdev_priv(ndev); + + if (fep->quirks & FEC_QUIRK_WAKEUP_FROM_INT2) + fep->wake_irq = fep->irq[2]; + else + fep->wake_irq = fep->irq[0]; +} + static int fec_enet_init_stop_mode(struct fec_enet_private *fep, struct device_node *np) { @@ -3666,6 +3842,10 @@ fec_probe(struct platform_device *pdev) fep->phy_interface = interface; } + ret = fec_enet_parse_rgmii_delay(fep, np); + if (ret) + goto failed_rgmii_delay; + fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); if (IS_ERR(fep->clk_ipg)) { ret = PTR_ERR(fep->clk_ipg); @@ -3692,6 +3872,14 @@ fec_probe(struct platform_device *pdev) fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref"); if (IS_ERR(fep->clk_ref)) fep->clk_ref = NULL; + fep->clk_ref_rate = clk_get_rate(fep->clk_ref); + + /* clk_2x_txclk is optional, depends on board */ + if (fep->rgmii_txc_dly || fep->rgmii_rxc_dly) { + fep->clk_2x_txclk = devm_clk_get(&pdev->dev, "enet_2x_txclk"); + if (IS_ERR(fep->clk_2x_txclk)) + fep->clk_2x_txclk = NULL; + } fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX; fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); @@ -3762,6 +3950,9 @@ fec_probe(struct platform_device *pdev) fep->irq[i] = irq; } + /* Decide which interrupt line is wakeup capable */ + fec_enet_get_wakeup_irq(pdev); + ret = fec_enet_mii_init(pdev); if (ret) goto failed_mii_init; @@ -3809,6 +4000,7 @@ failed_clk_ahb: failed_clk_ipg: fec_enet_clk_enable(ndev, false); failed_clk: +failed_rgmii_delay: if (of_phy_is_fixed_link(np)) of_phy_deregister_fixed_link(np); of_node_put(phy_node); diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index 02c47658a215..73ff359a15f1 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -792,7 +792,7 @@ static const struct net_device_ops mpc52xx_fec_netdev_ops = { .ndo_set_rx_mode = mpc52xx_fec_set_multicast_list, .ndo_set_mac_address = mpc52xx_fec_set_mac_address, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = phy_do_ioctl, + .ndo_eth_ioctl = phy_do_ioctl, .ndo_tx_timeout = mpc52xx_fec_tx_timeout, .ndo_get_stats = mpc52xx_fec_get_stats, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c index 6ee325ad35c5..2db6e38a772e 100644 --- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c +++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c @@ -900,7 +900,7 @@ static const struct net_device_ops fs_enet_netdev_ops = { .ndo_start_xmit = fs_enet_start_xmit, .ndo_tx_timeout = fs_timeout, .ndo_set_rx_mode = fs_set_multicast_list, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 9646483137c4..af6ad94bf24a 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -3184,7 +3184,7 @@ static const struct net_device_ops gfar_netdev_ops = { .ndo_set_features = gfar_set_features, .ndo_set_rx_mode = gfar_set_multi, .ndo_tx_timeout = gfar_timeout, - .ndo_do_ioctl = gfar_ioctl, + .ndo_eth_ioctl = gfar_ioctl, .ndo_get_stats64 = gfar_get_stats64, .ndo_change_carrier = fixed_phy_change_carrier, .ndo_set_mac_address = gfar_set_mac_addr, diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index cc7d4f93da54..7b32ed29bf4c 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -243,7 +243,9 @@ static unsigned int gfar_ticks2usecs(struct gfar_private *priv, /* Get the coalescing parameters, and put them in the cvals * structure. */ static int gfar_gcoalesce(struct net_device *dev, - struct ethtool_coalesce *cvals) + struct ethtool_coalesce *cvals, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct gfar_private *priv = netdev_priv(dev); struct gfar_priv_rx_q *rx_queue = NULL; @@ -280,7 +282,9 @@ static int gfar_gcoalesce(struct net_device *dev, * in order for coalescing to be active */ static int gfar_scoalesce(struct net_device *dev, - struct ethtool_coalesce *cvals) + struct ethtool_coalesce *cvals, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct gfar_private *priv = netdev_priv(dev); int i, err = 0; diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 0acfafb73db1..3eb288d10b0c 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -3516,7 +3516,7 @@ static const struct net_device_ops ucc_geth_netdev_ops = { .ndo_set_mac_address = ucc_geth_set_mac_addr, .ndo_set_rx_mode = ucc_geth_set_multi, .ndo_tx_timeout = ucc_geth_timeout, - .ndo_do_ioctl = ucc_geth_ioctl, + .ndo_eth_ioctl = ucc_geth_ioctl, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ucc_netpoll, #endif diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c index 5bb56b454541..f089d33dd48e 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.c +++ b/drivers/net/ethernet/google/gve/gve_adminq.c @@ -322,7 +322,8 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv, tail = ioread32be(&priv->reg_bar0->adminq_event_counter); // Check if next command will overflow the buffer. - if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) == tail) { + if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) == + (tail & priv->adminq_mask)) { int err; // Flush existing commands to make room. @@ -332,7 +333,8 @@ static int gve_adminq_issue_cmd(struct gve_priv *priv, // Retry. tail = ioread32be(&priv->reg_bar0->adminq_event_counter); - if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) == tail) { + if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) == + (tail & priv->adminq_mask)) { // This should never happen. We just flushed the // command queue so there should be enough space. return -ENOMEM; diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig index bb062b02fb85..3312e1d93c3b 100644 --- a/drivers/net/ethernet/hisilicon/Kconfig +++ b/drivers/net/ethernet/hisilicon/Kconfig @@ -90,6 +90,8 @@ config HNS_ENET config HNS3 tristate "Hisilicon Network Subsystem Support HNS3 (Framework)" depends on PCI + select NET_DEVLINK + select PAGE_POOL help This selects the framework support for Hisilicon Network Subsystem 3. This layer facilitates clients like ENET, RoCE and user-space ethernet @@ -102,7 +104,7 @@ config HNS3_HCLGE tristate "Hisilicon HNS3 HCLGE Acceleration Engine & Compatibility Layer Support" default m depends on PCI_MSI - imply PTP_1588_CLOCK + depends on PTP_1588_CLOCK_OPTIONAL help This selects the HNS3_HCLGE network acceleration engine & its hardware compatibility layer. The engine would be used in Hisilicon hip08 family of diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index e53512f6878a..37b605fed32c 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -796,7 +796,9 @@ static void hip04_tx_timeout_task(struct work_struct *work) } static int hip04_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct hip04_priv *priv = netdev_priv(netdev); @@ -807,7 +809,9 @@ static int hip04_get_coalesce(struct net_device *netdev, } static int hip04_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct hip04_priv *priv = netdev_priv(netdev); diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c index 3c4db4a6b431..22bf914f2dbd 100644 --- a/drivers/net/ethernet/hisilicon/hisi_femac.c +++ b/drivers/net/ethernet/hisilicon/hisi_femac.c @@ -685,7 +685,7 @@ static const struct net_device_ops hisi_femac_netdev_ops = { .ndo_open = hisi_femac_net_open, .ndo_stop = hisi_femac_net_close, .ndo_start_xmit = hisi_femac_net_xmit, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_set_mac_address = hisi_femac_set_mac_address, .ndo_set_rx_mode = hisi_femac_net_set_rx_mode, }; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index ad534f9e41ab..343c605c4be8 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -1945,7 +1945,7 @@ static const struct net_device_ops hns_nic_netdev_ops = { .ndo_tx_timeout = hns_nic_net_timeout, .ndo_set_mac_address = hns_nic_net_set_mac_address, .ndo_change_mtu = hns_nic_change_mtu, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_set_features = hns_nic_set_features, .ndo_fix_features = hns_nic_fix_features, .ndo_get_stats64 = hns_nic_get_stats64, diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 7e62dcff2426..ab7390225942 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -730,11 +730,15 @@ static int hns_set_pauseparam(struct net_device *net_dev, * hns_get_coalesce - get coalesce info. * @net_dev: net device * @ec: coalesce info. + * @kernel_coal: ethtool CQE mode setting structure + * @extack: extack for reporting error messages * * Return 0 on success, negative on failure. */ static int hns_get_coalesce(struct net_device *net_dev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct hns_nic_priv *priv = netdev_priv(net_dev); struct hnae_ae_ops *ops; @@ -774,11 +778,15 @@ static int hns_get_coalesce(struct net_device *net_dev, * hns_set_coalesce - set coalesce info. * @net_dev: net device * @ec: coalesce info. + * @kernel_coal: ethtool CQE mode setting structure + * @extack: extack for reporting error messages * * Return 0 on success, negative on failure. */ static int hns_set_coalesce(struct net_device *net_dev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct hns_nic_priv *priv = netdev_priv(net_dev); struct hnae_ae_ops *ops; diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index aa86a81c8f4a..c2bd2584201f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -9,7 +9,7 @@ enum HCLGE_MBX_OPCODE { HCLGE_MBX_RESET = 0x01, /* (VF -> PF) assert reset */ - HCLGE_MBX_ASSERTING_RESET, /* (PF -> VF) PF is asserting reset*/ + HCLGE_MBX_ASSERTING_RESET, /* (PF -> VF) PF is asserting reset */ HCLGE_MBX_SET_UNICAST, /* (VF -> PF) set UC addr */ HCLGE_MBX_SET_MULTICAST, /* (VF -> PF) set MC addr */ HCLGE_MBX_SET_VLAN, /* (VF -> PF) set VLAN */ diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index e0b7c3c44e7b..546a60530384 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -65,7 +65,7 @@ #define HNAE3_UNIC_CLIENT_INITED_B 0x4 #define HNAE3_ROCE_CLIENT_INITED_B 0x5 -#define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\ +#define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) | \ BIT(HNAE3_DEV_SUPPORT_ROCE_B)) #define hnae3_dev_roce_supported(hdev) \ @@ -718,6 +718,8 @@ struct hnae3_ae_ops { u32 nsec, u32 sec); int (*get_ts_info)(struct hnae3_handle *handle, struct ethtool_ts_info *info); + int (*get_link_diagnosis_info)(struct hnae3_handle *handle, + u32 *status_code); }; struct hnae3_dcb_ops { @@ -772,6 +774,7 @@ struct hnae3_knic_private_info { u16 int_rl_setting; enum pkt_hash_types rss_type; + void __iomem *io_base; }; struct hnae3_roce_private_info { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c index 532523069d74..2b66c59f5eaf 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -38,9 +38,8 @@ static struct hns3_dbg_dentry_info hns3_dbg_dentry[] = { }, }; -static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, unsigned int cmd); -static int hns3_dbg_common_file_init(struct hnae3_handle *handle, - unsigned int cmd); +static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd); +static int hns3_dbg_common_file_init(struct hnae3_handle *handle, u32 cmd); static struct hns3_dbg_cmd_info hns3_dbg_cmd[] = { { @@ -696,7 +695,7 @@ static int hns3_dbg_queue_map(struct hnae3_handle *h, char *buf, int len) sprintf(result[j++], "%u", i); sprintf(result[j++], "%u", h->ae_algo->ops->get_global_queue_id(h, i)); - sprintf(result[j++], "%u", + sprintf(result[j++], "%d", priv->ring[i].tqp_vector->vector_irq); hns3_dbg_fill_content(content, sizeof(content), queue_map_items, (const char **)result, @@ -798,10 +797,10 @@ static const struct hns3_dbg_item tx_bd_info_items[] = { { "T_CS_VLAN_TSO", 2 }, { "OT_VLAN_TAG", 3 }, { "TV", 2 }, - { "OLT_VLAN_LEN", 2}, - { "PAYLEN_OL4CS", 2}, - { "BD_FE_SC_VLD", 2}, - { "MSS_HW_CSUM", 0}, + { "OLT_VLAN_LEN", 2 }, + { "PAYLEN_OL4CS", 2 }, + { "BD_FE_SC_VLD", 2 }, + { "MSS_HW_CSUM", 0 }, }; static void hns3_dump_tx_bd_info(struct hns3_nic_priv *priv, @@ -868,7 +867,7 @@ static void hns3_dbg_dev_caps(struct hnae3_handle *h, char *buf, int len, int *pos) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); - static const char * const str[] = {"no", "yes"}; + const char * const str[] = {"no", "yes"}; unsigned long *caps = ae_dev->caps; u32 i, state; @@ -938,20 +937,19 @@ static int hns3_dbg_dev_info(struct hnae3_handle *h, char *buf, int len) return 0; } -static int hns3_dbg_get_cmd_index(struct hnae3_handle *handle, - const unsigned char *name, u32 *index) +static int hns3_dbg_get_cmd_index(struct hns3_dbg_data *dbg_data, u32 *index) { u32 i; for (i = 0; i < ARRAY_SIZE(hns3_dbg_cmd); i++) { - if (!strncmp(name, hns3_dbg_cmd[i].name, - strlen(hns3_dbg_cmd[i].name))) { + if (hns3_dbg_cmd[i].cmd == dbg_data->cmd) { *index = i; return 0; } } - dev_err(&handle->pdev->dev, "unknown command(%s)\n", name); + dev_err(&dbg_data->handle->pdev->dev, "unknown command(%d)\n", + dbg_data->cmd); return -EINVAL; } @@ -1019,8 +1017,7 @@ static ssize_t hns3_dbg_read(struct file *filp, char __user *buffer, u32 index; int ret; - ret = hns3_dbg_get_cmd_index(handle, filp->f_path.dentry->d_iname, - &index); + ret = hns3_dbg_get_cmd_index(dbg_data, &index); if (ret) return ret; @@ -1090,6 +1087,7 @@ static int hns3_dbg_bd_file_init(struct hnae3_handle *handle, u32 cmd) char name[HNS3_DBG_FILE_NAME_LEN]; data[i].handle = handle; + data[i].cmd = hns3_dbg_cmd[cmd].cmd; data[i].qid = i; sprintf(name, "%s%u", hns3_dbg_cmd[cmd].name, i); debugfs_create_file(name, 0400, entry_dir, &data[i], @@ -1110,6 +1108,7 @@ hns3_dbg_common_file_init(struct hnae3_handle *handle, u32 cmd) return -ENOMEM; data->handle = handle; + data->cmd = hns3_dbg_cmd[cmd].cmd; entry_dir = hns3_dbg_dentry[hns3_dbg_cmd[cmd].dentry].dentry; debugfs_create_file(hns3_dbg_cmd[cmd].name, 0400, entry_dir, data, &hns3_dbg_fops); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h index f3766ff38bb7..bd8801065e02 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.h @@ -22,6 +22,7 @@ struct hns3_dbg_item { struct hns3_dbg_data { struct hnae3_handle *handle; + enum hnae3_dbg_cmd cmd; u16 qid; }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index cdb5f14fb6bc..22af3d6ce178 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -63,7 +63,7 @@ MODULE_PARM_DESC(tx_sgl, "Minimum number of frags when using dma_map_sg() to opt #define HNS3_SGL_SIZE(nfrag) (sizeof(struct scatterlist) * (nfrag) + \ sizeof(struct sg_table)) -#define HNS3_MAX_SGL_SIZE ALIGN(HNS3_SGL_SIZE(HNS3_MAX_TSO_BD_NUM),\ +#define HNS3_MAX_SGL_SIZE ALIGN(HNS3_SGL_SIZE(HNS3_MAX_TSO_BD_NUM), \ dma_get_cache_alignment()) #define DEFAULT_MSG_LEVEL (NETIF_MSG_PROBE | NETIF_MSG_LINK | \ @@ -100,7 +100,7 @@ static const struct pci_device_id hns3_pci_tbl[] = { {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_RDMA_DCB_PFC_VF), HNAE3_DEV_SUPPORT_ROCE_DCB_BITS}, /* required last entry */ - {0, } + {0,} }; MODULE_DEVICE_TABLE(pci, hns3_pci_tbl); @@ -971,8 +971,7 @@ static u32 hns3_tx_spare_space(struct hns3_enet_ring *ring) /* The free tx buffer is divided into two part, so pick the * larger one. */ - return (ntc > (tx_spare->len - ntu) ? ntc : - (tx_spare->len - ntu)) - 1; + return max(ntc, tx_spare->len - ntu) - 1; } static void hns3_tx_spare_update(struct hns3_enet_ring *ring) @@ -2852,7 +2851,7 @@ static const struct net_device_ops hns3_nic_netdev_ops = { .ndo_start_xmit = hns3_nic_net_xmit, .ndo_tx_timeout = hns3_nic_net_timeout, .ndo_set_mac_address = hns3_nic_net_set_mac_address, - .ndo_do_ioctl = hns3_nic_do_ioctl, + .ndo_eth_ioctl = hns3_nic_do_ioctl, .ndo_change_mtu = hns3_nic_change_mtu, .ndo_set_features = hns3_nic_set_features, .ndo_features_check = hns3_features_check, @@ -3127,11 +3126,6 @@ static void hns3_set_default_feature(struct net_device *netdev) netdev->priv_flags |= IFF_UNICAST_FLT; - netdev->hw_enc_features |= NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | - NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | - NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_SCTP_CRC | NETIF_F_TSO_MANGLEID | NETIF_F_FRAGLIST; - netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM; netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | @@ -3141,62 +3135,37 @@ static void hns3_set_default_feature(struct net_device *netdev) NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST; - netdev->vlan_features |= NETIF_F_RXCSUM | - NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO | - NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | - NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST; - - netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | - NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO | - NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE | - NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL | - NETIF_F_SCTP_CRC | NETIF_F_FRAGLIST; - if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { - netdev->hw_features |= NETIF_F_GRO_HW; netdev->features |= NETIF_F_GRO_HW; - if (!(h->flags & HNAE3_SUPPORT_VF)) { - netdev->hw_features |= NETIF_F_NTUPLE; + if (!(h->flags & HNAE3_SUPPORT_VF)) netdev->features |= NETIF_F_NTUPLE; - } } - if (test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps)) { - netdev->hw_features |= NETIF_F_GSO_UDP_L4; + if (test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps)) netdev->features |= NETIF_F_GSO_UDP_L4; - netdev->vlan_features |= NETIF_F_GSO_UDP_L4; - netdev->hw_enc_features |= NETIF_F_GSO_UDP_L4; - } - if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) { - netdev->hw_features |= NETIF_F_HW_CSUM; + if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) netdev->features |= NETIF_F_HW_CSUM; - netdev->vlan_features |= NETIF_F_HW_CSUM; - netdev->hw_enc_features |= NETIF_F_HW_CSUM; - } else { - netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + else netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; - netdev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; - netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; - } - if (test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps)) { - netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; + if (test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps)) netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; - netdev->vlan_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; - netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; - } - if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) { - netdev->hw_features |= NETIF_F_HW_TC; + if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) netdev->features |= NETIF_F_HW_TC; - } - if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) - netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + netdev->hw_features |= netdev->features; + if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) + netdev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; + + netdev->vlan_features |= netdev->features & + ~(NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_CTAG_TX | + NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_GRO_HW | NETIF_F_NTUPLE | + NETIF_F_HW_TC); + + netdev->hw_enc_features |= netdev->vlan_features | NETIF_F_TSO_MANGLEID; } static int hns3_alloc_buffer(struct hns3_enet_ring *ring, @@ -3205,6 +3174,21 @@ static int hns3_alloc_buffer(struct hns3_enet_ring *ring, unsigned int order = hns3_page_order(ring); struct page *p; + if (ring->page_pool) { + p = page_pool_dev_alloc_frag(ring->page_pool, + &cb->page_offset, + hns3_buf_size(ring)); + if (unlikely(!p)) + return -ENOMEM; + + cb->priv = p; + cb->buf = page_address(p); + cb->dma = page_pool_get_dma_addr(p); + cb->type = DESC_TYPE_PP_FRAG; + cb->reuse_flag = 0; + return 0; + } + p = dev_alloc_pages(order); if (!p) return -ENOMEM; @@ -3227,8 +3211,13 @@ static void hns3_free_buffer(struct hns3_enet_ring *ring, if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_BOUNCE_HEAD | DESC_TYPE_BOUNCE_ALL | DESC_TYPE_SGL_SKB)) napi_consume_skb(cb->priv, budget); - else if (!HNAE3_IS_TX_RING(ring) && cb->pagecnt_bias) - __page_frag_cache_drain(cb->priv, cb->pagecnt_bias); + else if (!HNAE3_IS_TX_RING(ring)) { + if (cb->type & DESC_TYPE_PAGE && cb->pagecnt_bias) + __page_frag_cache_drain(cb->priv, cb->pagecnt_bias); + else if (cb->type & DESC_TYPE_PP_FRAG) + page_pool_put_full_page(ring->page_pool, cb->priv, + false); + } memset(cb, 0, sizeof(*cb)); } @@ -3315,7 +3304,7 @@ static int hns3_alloc_and_map_buffer(struct hns3_enet_ring *ring, int ret; ret = hns3_alloc_buffer(ring, cb); - if (ret) + if (ret || ring->page_pool) goto out; ret = hns3_map_buffer(ring, cb); @@ -3337,7 +3326,8 @@ static int hns3_alloc_and_attach_buffer(struct hns3_enet_ring *ring, int i) if (ret) return ret; - ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); + ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + + ring->desc_cb[i].page_offset); return 0; } @@ -3367,7 +3357,8 @@ static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i, { hns3_unmap_buffer(ring, &ring->desc_cb[i]); ring->desc_cb[i] = *res_cb; - ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma); + ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + + ring->desc_cb[i].page_offset); ring->desc[i].rx.bd_base_info = 0; } @@ -3539,6 +3530,12 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, u32 frag_size = size - pull_len; bool reused; + if (ring->page_pool) { + skb_add_rx_frag(skb, i, desc_cb->priv, frag_offset, + frag_size, truesize); + return; + } + /* Avoid re-using remote or pfmem page */ if (unlikely(!dev_page_is_reusable(desc_cb->priv))) goto out; @@ -3856,6 +3853,9 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, /* We can reuse buffer as-is, just make sure it is reusable */ if (dev_page_is_reusable(desc_cb->priv)) desc_cb->reuse_flag = 1; + else if (desc_cb->type & DESC_TYPE_PP_FRAG) + page_pool_put_full_page(ring->page_pool, desc_cb->priv, + false); else /* This page cannot be reused so discard it */ __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias); @@ -3863,6 +3863,10 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, hns3_rx_ring_move_fw(ring); return 0; } + + if (ring->page_pool) + skb_mark_for_recycle(skb); + u64_stats_update_begin(&ring->syncp); ring->stats.seg_pkt_cnt++; u64_stats_update_end(&ring->syncp); @@ -3901,6 +3905,10 @@ static int hns3_add_frag(struct hns3_enet_ring *ring) "alloc rx fraglist skb fail\n"); return -ENXIO; } + + if (ring->page_pool) + skb_mark_for_recycle(new_skb); + ring->frag_num = 0; if (ring->tail_skb) { @@ -4434,9 +4442,7 @@ static void hns3_tx_dim_work(struct work_struct *work) static void hns3_nic_init_dim(struct hns3_enet_tqp_vector *tqp_vector) { INIT_WORK(&tqp_vector->rx_group.dim.work, hns3_rx_dim_work); - tqp_vector->rx_group.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; INIT_WORK(&tqp_vector->tx_group.dim.work, hns3_tx_dim_work); - tqp_vector->tx_group.dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; } static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) @@ -4705,6 +4711,29 @@ static void hns3_put_ring_config(struct hns3_nic_priv *priv) priv->ring = NULL; } +static void hns3_alloc_page_pool(struct hns3_enet_ring *ring) +{ + struct page_pool_params pp_params = { + .flags = PP_FLAG_DMA_MAP | PP_FLAG_PAGE_FRAG | + PP_FLAG_DMA_SYNC_DEV, + .order = hns3_page_order(ring), + .pool_size = ring->desc_num * hns3_buf_size(ring) / + (PAGE_SIZE << hns3_page_order(ring)), + .nid = dev_to_node(ring_to_dev(ring)), + .dev = ring_to_dev(ring), + .dma_dir = DMA_FROM_DEVICE, + .offset = 0, + .max_len = PAGE_SIZE << hns3_page_order(ring), + }; + + ring->page_pool = page_pool_create(&pp_params); + if (IS_ERR(ring->page_pool)) { + dev_warn(ring_to_dev(ring), "page pool creation failed: %ld\n", + PTR_ERR(ring->page_pool)); + ring->page_pool = NULL; + } +} + static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) { int ret; @@ -4724,6 +4753,8 @@ static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) goto out_with_desc_cb; if (!HNAE3_IS_TX_RING(ring)) { + hns3_alloc_page_pool(ring); + ret = hns3_alloc_ring_buffers(ring); if (ret) goto out_with_desc; @@ -4764,6 +4795,11 @@ void hns3_fini_ring(struct hns3_enet_ring *ring) devm_kfree(ring_to_dev(ring), tx_spare); ring->tx_spare = NULL; } + + if (!HNAE3_IS_TX_RING(ring) && ring->page_pool) { + page_pool_destroy(ring->page_pool); + ring->page_pool = NULL; + } } static int hns3_buf_size2type(u32 buf_size) @@ -4954,6 +4990,66 @@ static void hns3_info_show(struct hns3_nic_priv *priv) dev_info(priv->dev, "Max mtu size: %u\n", priv->netdev->max_mtu); } +static void hns3_set_cq_period_mode(struct hns3_nic_priv *priv, + enum dim_cq_period_mode mode, bool is_tx) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); + struct hnae3_handle *handle = priv->ae_handle; + int i; + + if (is_tx) { + priv->tx_cqe_mode = mode; + + for (i = 0; i < priv->vector_num; i++) + priv->tqp_vector[i].tx_group.dim.mode = mode; + } else { + priv->rx_cqe_mode = mode; + + for (i = 0; i < priv->vector_num; i++) + priv->tqp_vector[i].rx_group.dim.mode = mode; + } + + /* only device version above V3(include V3), GL can switch CQ/EQ + * period mode. + */ + if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) { + u32 new_mode; + u64 reg; + + new_mode = (mode == DIM_CQ_PERIOD_MODE_START_FROM_CQE) ? + HNS3_CQ_MODE_CQE : HNS3_CQ_MODE_EQE; + reg = is_tx ? HNS3_GL1_CQ_MODE_REG : HNS3_GL0_CQ_MODE_REG; + + writel(new_mode, handle->kinfo.io_base + reg); + } +} + +void hns3_cq_period_mode_init(struct hns3_nic_priv *priv, + enum dim_cq_period_mode tx_mode, + enum dim_cq_period_mode rx_mode) +{ + hns3_set_cq_period_mode(priv, tx_mode, true); + hns3_set_cq_period_mode(priv, rx_mode, false); +} + +static void hns3_state_init(struct hnae3_handle *handle) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); + struct net_device *netdev = handle->kinfo.netdev; + struct hns3_nic_priv *priv = netdev_priv(netdev); + + set_bit(HNS3_NIC_STATE_INITED, &priv->state); + + if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) + set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags); + + if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) + set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state); + + if (hnae3_ae_dev_rxd_adv_layout_supported(ae_dev)) + set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state); +} + static int hns3_client_init(struct hnae3_handle *handle) { struct pci_dev *pdev = handle->pdev; @@ -5021,6 +5117,9 @@ static int hns3_client_init(struct hnae3_handle *handle) goto out_init_ring; } + hns3_cq_period_mode_init(priv, DIM_CQ_PERIOD_MODE_START_FROM_EQE, + DIM_CQ_PERIOD_MODE_START_FROM_EQE); + ret = hns3_init_phy(netdev); if (ret) goto out_init_phy; @@ -5054,16 +5153,7 @@ static int hns3_client_init(struct hnae3_handle *handle) netdev->max_mtu = HNS3_MAX_MTU(ae_dev->dev_specs.max_frm_size); - if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) - set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state); - - if (hnae3_ae_dev_rxd_adv_layout_supported(ae_dev)) - set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state); - - set_bit(HNS3_NIC_STATE_INITED, &priv->state); - - if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) - set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags); + hns3_state_init(handle); ret = register_netdev(netdev); if (ret) { @@ -5353,6 +5443,8 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) if (ret) goto err_uninit_vector; + hns3_cq_period_mode_init(priv, priv->tx_cqe_mode, priv->rx_cqe_mode); + /* the device can work without cpu rmap, only aRFS needs it */ ret = hns3_set_rx_cpu_rmap(netdev); if (ret) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 15af3d93857b..6162d9f88e37 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -6,6 +6,7 @@ #include <linux/dim.h> #include <linux/if_vlan.h> +#include <net/page_pool.h> #include "hnae3.h" @@ -201,6 +202,12 @@ enum hns3_nic_state { #define HNS3_RING_EN_B 0 +#define HNS3_GL0_CQ_MODE_REG 0x20d00 +#define HNS3_GL1_CQ_MODE_REG 0x20d04 +#define HNS3_GL2_CQ_MODE_REG 0x20d08 +#define HNS3_CQ_MODE_EQE 1U +#define HNS3_CQ_MODE_CQE 0U + enum hns3_pkt_l2t_type { HNS3_L2_TYPE_UNICAST, HNS3_L2_TYPE_MULTICAST, @@ -307,6 +314,7 @@ enum hns3_desc_type { DESC_TYPE_BOUNCE_ALL = 1 << 3, DESC_TYPE_BOUNCE_HEAD = 1 << 4, DESC_TYPE_SGL_SKB = 1 << 5, + DESC_TYPE_PP_FRAG = 1 << 6, }; struct hns3_desc_cb { @@ -340,7 +348,7 @@ enum hns3_pkt_l3type { HNS3_L3_TYPE_LLDP, HNS3_L3_TYPE_BPDU, HNS3_L3_TYPE_MAC_PAUSE, - HNS3_L3_TYPE_PFC_PAUSE,/* 0x9*/ + HNS3_L3_TYPE_PFC_PAUSE, /* 0x9 */ /* reserved for 0xA~0xB */ @@ -384,11 +392,11 @@ enum hns3_pkt_ol4type { }; struct hns3_rx_ptype { - u32 ptype:8; - u32 csum_level:2; - u32 ip_summed:2; - u32 l3_type:4; - u32 valid:1; + u32 ptype : 8; + u32 csum_level : 2; + u32 ip_summed : 2; + u32 l3_type : 4; + u32 valid : 1; }; struct ring_stats { @@ -451,6 +459,7 @@ struct hns3_enet_ring { struct hnae3_queue *tqp; int queue_index; struct device *dev; /* will be used for DMA mapping of descriptors */ + struct page_pool *page_pool; /* statistic */ struct ring_stats stats; @@ -513,9 +522,9 @@ struct hns3_enet_coalesce { u16 int_gl; u16 int_ql; u16 int_ql_max; - u8 adapt_enable:1; - u8 ql_enable:1; - u8 unit_1us:1; + u8 adapt_enable : 1; + u8 ql_enable : 1; + u8 unit_1us : 1; enum hns3_flow_level_range flow_level; }; @@ -569,6 +578,8 @@ struct hns3_nic_priv { unsigned long state; + enum dim_cq_period_mode tx_cqe_mode; + enum dim_cq_period_mode rx_cqe_mode; struct hns3_enet_coalesce tx_coal; struct hns3_enet_coalesce rx_coal; u32 tx_copybreak; @@ -593,6 +604,11 @@ struct hns3_hw_error_info { const char *msg; }; +struct hns3_reset_type_map { + enum ethtool_reset_flags rst_flags; + enum hnae3_reset_type rst_type; +}; + static inline int ring_space(struct hns3_enet_ring *ring) { /* This smp_load_acquire() pairs with smp_store_release() in @@ -702,4 +718,7 @@ void hns3_dbg_register_debugfs(const char *debugfs_dir_name); void hns3_dbg_unregister_debugfs(void); void hns3_shinfo_pack(struct skb_shared_info *shinfo, __u32 *size); u16 hns3_get_max_available_channels(struct hnae3_handle *h); +void hns3_cq_period_mode_init(struct hns3_nic_priv *priv, + enum dim_cq_period_mode tx_mode, + enum dim_cq_period_mode rx_mode); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index 82061ab6930f..7ea511d59e91 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -7,21 +7,7 @@ #include <linux/sfp.h> #include "hns3_enet.h" - -struct hns3_stats { - char stats_string[ETH_GSTRING_LEN]; - int stats_offset; -}; - -struct hns3_sfp_type { - u8 type; - u8 ext_type; -}; - -struct hns3_pflag_desc { - char name[ETH_GSTRING_LEN]; - void (*handler)(struct net_device *netdev, bool enable); -}; +#include "hns3_ethtool.h" /* tqp related stats */ #define HNS3_TQP_STAT(_string, _member) { \ @@ -312,33 +298,8 @@ out: return ret_val; } -/** - * hns3_self_test - self test - * @ndev: net device - * @eth_test: test cmd - * @data: test result - */ -static void hns3_self_test(struct net_device *ndev, - struct ethtool_test *eth_test, u64 *data) +static void hns3_set_selftest_param(struct hnae3_handle *h, int (*st_param)[2]) { - struct hns3_nic_priv *priv = netdev_priv(ndev); - struct hnae3_handle *h = priv->ae_handle; - int st_param[HNS3_SELF_TEST_TYPE_NUM][2]; - bool if_running = netif_running(ndev); - int test_index = 0; - u32 i; - - if (hns3_nic_resetting(ndev)) { - netdev_err(ndev, "dev resetting!"); - return; - } - - /* Only do offline selftest, or pass by default */ - if (eth_test->flags != ETH_TEST_FL_OFFLINE) - return; - - netif_dbg(h, drv, ndev, "self test start"); - st_param[HNAE3_LOOP_APP][0] = HNAE3_LOOP_APP; st_param[HNAE3_LOOP_APP][1] = h->flags & HNAE3_SUPPORT_APP_LOOPBACK; @@ -355,6 +316,18 @@ static void hns3_self_test(struct net_device *ndev, st_param[HNAE3_LOOP_PHY][0] = HNAE3_LOOP_PHY; st_param[HNAE3_LOOP_PHY][1] = h->flags & HNAE3_SUPPORT_PHY_LOOPBACK; +} + +static void hns3_selftest_prepare(struct net_device *ndev, + bool if_running, int (*st_param)[2]) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + + if (netif_msg_ifdown(h)) + netdev_info(ndev, "self test start\n"); + + hns3_set_selftest_param(h, st_param); if (if_running) ndev->netdev_ops->ndo_stop(ndev); @@ -373,6 +346,35 @@ static void hns3_self_test(struct net_device *ndev, h->ae_algo->ops->halt_autoneg(h, true); set_bit(HNS3_NIC_STATE_TESTING, &priv->state); +} + +static void hns3_selftest_restore(struct net_device *ndev, bool if_running) +{ + struct hns3_nic_priv *priv = netdev_priv(ndev); + struct hnae3_handle *h = priv->ae_handle; + + clear_bit(HNS3_NIC_STATE_TESTING, &priv->state); + + if (h->ae_algo->ops->halt_autoneg) + h->ae_algo->ops->halt_autoneg(h, false); + +#if IS_ENABLED(CONFIG_VLAN_8021Q) + if (h->ae_algo->ops->enable_vlan_filter) + h->ae_algo->ops->enable_vlan_filter(h, true); +#endif + + if (if_running) + ndev->netdev_ops->ndo_open(ndev); + + if (netif_msg_ifdown(h)) + netdev_info(ndev, "self test end\n"); +} + +static void hns3_do_selftest(struct net_device *ndev, int (*st_param)[2], + struct ethtool_test *eth_test, u64 *data) +{ + int test_index = 0; + u32 i; for (i = 0; i < HNS3_SELF_TEST_TYPE_NUM; i++) { enum hnae3_loop loop_type = (enum hnae3_loop)st_param[i][0]; @@ -391,21 +393,32 @@ static void hns3_self_test(struct net_device *ndev, test_index++; } +} - clear_bit(HNS3_NIC_STATE_TESTING, &priv->state); - - if (h->ae_algo->ops->halt_autoneg) - h->ae_algo->ops->halt_autoneg(h, false); +/** + * hns3_nic_self_test - self test + * @ndev: net device + * @eth_test: test cmd + * @data: test result + */ +static void hns3_self_test(struct net_device *ndev, + struct ethtool_test *eth_test, u64 *data) +{ + int st_param[HNS3_SELF_TEST_TYPE_NUM][2]; + bool if_running = netif_running(ndev); -#if IS_ENABLED(CONFIG_VLAN_8021Q) - if (h->ae_algo->ops->enable_vlan_filter) - h->ae_algo->ops->enable_vlan_filter(h, true); -#endif + if (hns3_nic_resetting(ndev)) { + netdev_err(ndev, "dev resetting!"); + return; + } - if (if_running) - ndev->netdev_ops->ndo_open(ndev); + /* Only do offline selftest, or pass by default */ + if (eth_test->flags != ETH_TEST_FL_OFFLINE) + return; - netif_dbg(h, drv, ndev, "self test end\n"); + hns3_selftest_prepare(ndev, if_running, st_param); + hns3_do_selftest(ndev, st_param, eth_test, data); + hns3_selftest_restore(ndev, if_running); } static void hns3_update_limit_promisc_mode(struct net_device *netdev, @@ -953,6 +966,60 @@ static int hns3_get_rxnfc(struct net_device *netdev, } } +static const struct hns3_reset_type_map hns3_reset_type[] = { + {ETH_RESET_MGMT, HNAE3_IMP_RESET}, + {ETH_RESET_ALL, HNAE3_GLOBAL_RESET}, + {ETH_RESET_DEDICATED, HNAE3_FUNC_RESET}, +}; + +static const struct hns3_reset_type_map hns3vf_reset_type[] = { + {ETH_RESET_DEDICATED, HNAE3_VF_FUNC_RESET}, +}; + +static int hns3_set_reset(struct net_device *netdev, u32 *flags) +{ + enum hnae3_reset_type rst_type = HNAE3_NONE_RESET; + struct hnae3_handle *h = hns3_get_handle(netdev); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(h->pdev); + const struct hnae3_ae_ops *ops = h->ae_algo->ops; + const struct hns3_reset_type_map *rst_type_map; + u32 i, size; + + if (ops->ae_dev_resetting && ops->ae_dev_resetting(h)) + return -EBUSY; + + if (!ops->set_default_reset_request || !ops->reset_event) + return -EOPNOTSUPP; + + if (h->flags & HNAE3_SUPPORT_VF) { + rst_type_map = hns3vf_reset_type; + size = ARRAY_SIZE(hns3vf_reset_type); + } else { + rst_type_map = hns3_reset_type; + size = ARRAY_SIZE(hns3_reset_type); + } + + for (i = 0; i < size; i++) { + if (rst_type_map[i].rst_flags == *flags) { + rst_type = rst_type_map[i].rst_type; + break; + } + } + + if (rst_type == HNAE3_NONE_RESET || + (rst_type == HNAE3_IMP_RESET && + ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2)) + return -EOPNOTSUPP; + + netdev_info(netdev, "Setting reset type %d\n", rst_type); + + ops->set_default_reset_request(ae_dev, rst_type); + + ops->reset_event(h->pdev, h); + + return 0; +} + static void hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv, u32 tx_desc_num, u32 rx_desc_num) { @@ -1139,7 +1206,9 @@ static void hns3_get_channels(struct net_device *netdev, } static int hns3_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *cmd) + struct ethtool_coalesce *cmd, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct hns3_nic_priv *priv = netdev_priv(netdev); struct hns3_enet_coalesce *tx_coal = &priv->tx_coal; @@ -1161,6 +1230,11 @@ static int hns3_get_coalesce(struct net_device *netdev, cmd->tx_max_coalesced_frames = tx_coal->int_ql; cmd->rx_max_coalesced_frames = rx_coal->int_ql; + kernel_coal->use_cqe_mode_tx = (priv->tx_cqe_mode == + DIM_CQ_PERIOD_MODE_START_FROM_CQE); + kernel_coal->use_cqe_mode_rx = (priv->rx_cqe_mode == + DIM_CQ_PERIOD_MODE_START_FROM_CQE); + return 0; } @@ -1321,13 +1395,17 @@ static void hns3_set_coalesce_per_queue(struct net_device *netdev, } static int hns3_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *cmd) + struct ethtool_coalesce *cmd, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct hnae3_handle *h = hns3_get_handle(netdev); struct hns3_nic_priv *priv = netdev_priv(netdev); struct hns3_enet_coalesce *tx_coal = &priv->tx_coal; struct hns3_enet_coalesce *rx_coal = &priv->rx_coal; u16 queue_num = h->kinfo.num_tqps; + enum dim_cq_period_mode tx_mode; + enum dim_cq_period_mode rx_mode; int ret; int i; @@ -1353,6 +1431,14 @@ static int hns3_set_coalesce(struct net_device *netdev, for (i = 0; i < queue_num; i++) hns3_set_coalesce_per_queue(netdev, cmd, i); + tx_mode = kernel_coal->use_cqe_mode_tx ? + DIM_CQ_PERIOD_MODE_START_FROM_CQE : + DIM_CQ_PERIOD_MODE_START_FROM_EQE; + rx_mode = kernel_coal->use_cqe_mode_rx ? + DIM_CQ_PERIOD_MODE_START_FROM_CQE : + DIM_CQ_PERIOD_MODE_START_FROM_EQE; + hns3_cq_period_mode_init(priv, tx_mode, rx_mode); + return 0; } @@ -1658,7 +1744,8 @@ static int hns3_set_tunable(struct net_device *netdev, ETHTOOL_COALESCE_USE_ADAPTIVE | \ ETHTOOL_COALESCE_RX_USECS_HIGH | \ ETHTOOL_COALESCE_TX_USECS_HIGH | \ - ETHTOOL_COALESCE_MAX_FRAMES) + ETHTOOL_COALESCE_MAX_FRAMES | \ + ETHTOOL_COALESCE_USE_CQE) static int hns3_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) @@ -1671,6 +1758,71 @@ static int hns3_get_ts_info(struct net_device *netdev, return ethtool_op_get_ts_info(netdev, info); } +static const struct hns3_ethtool_link_ext_state_mapping +hns3_link_ext_state_map[] = { + {1, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_NO_HCD}, + {2, ETHTOOL_LINK_EXT_STATE_AUTONEG, + ETHTOOL_LINK_EXT_SUBSTATE_AN_ACK_NOT_RECEIVED}, + + {256, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_INHIBIT_TIMEOUT}, + {257, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, + ETHTOOL_LINK_EXT_SUBSTATE_LT_KR_LINK_PARTNER_DID_NOT_SET_RECEIVER_READY}, + {512, ETHTOOL_LINK_EXT_STATE_LINK_TRAINING_FAILURE, + ETHTOOL_LINK_EXT_SUBSTATE_LT_REMOTE_FAULT}, + + {513, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_PCS_DID_NOT_ACQUIRE_BLOCK_LOCK}, + {514, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_FC_FEC_IS_NOT_LOCKED}, + {515, ETHTOOL_LINK_EXT_STATE_LINK_LOGICAL_MISMATCH, + ETHTOOL_LINK_EXT_SUBSTATE_LLM_RS_FEC_IS_NOT_LOCKED}, + + {768, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_LARGE_NUMBER_OF_PHYSICAL_ERRORS}, + {769, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_REFERENCE_CLOCK_LOST}, + {770, ETHTOOL_LINK_EXT_STATE_BAD_SIGNAL_INTEGRITY, + ETHTOOL_LINK_EXT_SUBSTATE_BSI_SERDES_ALOS}, + + {1024, ETHTOOL_LINK_EXT_STATE_NO_CABLE, 0}, + {1025, ETHTOOL_LINK_EXT_STATE_CABLE_ISSUE, + ETHTOOL_LINK_EXT_SUBSTATE_CI_UNSUPPORTED_CABLE}, + + {1026, ETHTOOL_LINK_EXT_STATE_EEPROM_ISSUE, 0}, +}; + +static int hns3_get_link_ext_state(struct net_device *netdev, + struct ethtool_link_ext_state_info *info) +{ + const struct hns3_ethtool_link_ext_state_mapping *map; + struct hnae3_handle *h = hns3_get_handle(netdev); + u32 status_code, i; + int ret; + + if (netif_carrier_ok(netdev)) + return -ENODATA; + + if (!h->ae_algo->ops->get_link_diagnosis_info) + return -EOPNOTSUPP; + + ret = h->ae_algo->ops->get_link_diagnosis_info(h, &status_code); + if (ret) + return ret; + + for (i = 0; i < ARRAY_SIZE(hns3_link_ext_state_map); i++) { + map = &hns3_link_ext_state_map[i]; + if (map->status_code == status_code) { + info->link_ext_state = map->link_ext_state; + info->__link_ext_substate = map->link_ext_substate; + return 0; + } + } + + return -ENODATA; +} + static const struct ethtool_ops hns3vf_ethtool_ops = { .supported_coalesce_params = HNS3_ETHTOOL_COALESCE, .get_drvinfo = hns3_get_drvinfo, @@ -1699,6 +1851,7 @@ static const struct ethtool_ops hns3vf_ethtool_ops = { .set_priv_flags = hns3_set_priv_flags, .get_tunable = hns3_get_tunable, .set_tunable = hns3_set_tunable, + .reset = hns3_set_reset, }; static const struct ethtool_ops hns3_ethtool_ops = { @@ -1740,6 +1893,8 @@ static const struct ethtool_ops hns3_ethtool_ops = { .get_ts_info = hns3_get_ts_info, .get_tunable = hns3_get_tunable, .set_tunable = hns3_set_tunable, + .reset = hns3_set_reset, + .get_link_ext_state = hns3_get_link_ext_state, }; void hns3_ethtool_set_ops(struct net_device *netdev) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h new file mode 100644 index 000000000000..822d6fcbc73b --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +// Copyright (c) 2021 Hisilicon Limited. + +#ifndef __HNS3_ETHTOOL_H +#define __HNS3_ETHTOOL_H + +#include <linux/ethtool.h> +#include <linux/netdevice.h> + +struct hns3_stats { + char stats_string[ETH_GSTRING_LEN]; + int stats_offset; +}; + +struct hns3_sfp_type { + u8 type; + u8 ext_type; +}; + +struct hns3_pflag_desc { + char name[ETH_GSTRING_LEN]; + void (*handler)(struct net_device *netdev, bool enable); +}; + +struct hns3_ethtool_link_ext_state_mapping { + u32 status_code; + enum ethtool_link_ext_state link_ext_state; + u8 link_ext_substate; +}; + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile index a685392dbfe9..d1bf5c4c0abb 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile @@ -7,6 +7,6 @@ ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3 ccflags-y += -I $(srctree)/$(src) obj-$(CONFIG_HNS3_HCLGE) += hclge.o -hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o hclge_ptp.o +hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o hclge_ptp.o hclge_devlink.o hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index 887297e37cf3..474c6d1664e7 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -169,17 +169,19 @@ static bool hclge_is_special_opcode(u16 opcode) /* these commands have several descriptors, * and use the first one to save opcode and return value */ - u16 spec_opcode[] = {HCLGE_OPC_STATS_64_BIT, - HCLGE_OPC_STATS_32_BIT, - HCLGE_OPC_STATS_MAC, - HCLGE_OPC_STATS_MAC_ALL, - HCLGE_OPC_QUERY_32_BIT_REG, - HCLGE_OPC_QUERY_64_BIT_REG, - HCLGE_QUERY_CLEAR_MPF_RAS_INT, - HCLGE_QUERY_CLEAR_PF_RAS_INT, - HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT, - HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT, - HCLGE_QUERY_ALL_ERR_INFO}; + static const u16 spec_opcode[] = { + HCLGE_OPC_STATS_64_BIT, + HCLGE_OPC_STATS_32_BIT, + HCLGE_OPC_STATS_MAC, + HCLGE_OPC_STATS_MAC_ALL, + HCLGE_OPC_QUERY_32_BIT_REG, + HCLGE_OPC_QUERY_64_BIT_REG, + HCLGE_QUERY_CLEAR_MPF_RAS_INT, + HCLGE_QUERY_CLEAR_PF_RAS_INT, + HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT, + HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT, + HCLGE_QUERY_ALL_ERR_INFO + }; int i; for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) { @@ -360,41 +362,34 @@ static void hclge_set_default_capability(struct hclge_dev *hdev) } } +const struct hclge_caps_bit_map hclge_cmd_caps_bit_map0[] = { + {HCLGE_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B}, + {HCLGE_CAP_PTP_B, HNAE3_DEV_SUPPORT_PTP_B}, + {HCLGE_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B}, + {HCLGE_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B}, + {HCLGE_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B}, + {HCLGE_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B}, + {HCLGE_CAP_FD_FORWARD_TC_B, HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B}, + {HCLGE_CAP_FEC_B, HNAE3_DEV_SUPPORT_FEC_B}, + {HCLGE_CAP_PAUSE_B, HNAE3_DEV_SUPPORT_PAUSE_B}, + {HCLGE_CAP_PHY_IMP_B, HNAE3_DEV_SUPPORT_PHY_IMP_B}, + {HCLGE_CAP_RAS_IMP_B, HNAE3_DEV_SUPPORT_RAS_IMP_B}, + {HCLGE_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B}, + {HCLGE_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B}, + {HCLGE_CAP_PORT_VLAN_BYPASS_B, HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B}, +}; + static void hclge_parse_capability(struct hclge_dev *hdev, struct hclge_query_version_cmd *cmd) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); - u32 caps; + u32 caps, i; caps = __le32_to_cpu(cmd->caps[0]); - if (hnae3_get_bit(caps, HCLGE_CAP_UDP_GSO_B)) - set_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGE_CAP_PTP_B)) - set_bit(HNAE3_DEV_SUPPORT_PTP_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGE_CAP_INT_QL_B)) - set_bit(HNAE3_DEV_SUPPORT_INT_QL_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGE_CAP_TQP_TXRX_INDEP_B)) - set_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGE_CAP_HW_TX_CSUM_B)) - set_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGE_CAP_UDP_TUNNEL_CSUM_B)) - set_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGE_CAP_FD_FORWARD_TC_B)) - set_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGE_CAP_FEC_B)) - set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGE_CAP_PAUSE_B)) - set_bit(HNAE3_DEV_SUPPORT_PAUSE_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGE_CAP_PHY_IMP_B)) - set_bit(HNAE3_DEV_SUPPORT_PHY_IMP_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGE_CAP_RAS_IMP_B)) - set_bit(HNAE3_DEV_SUPPORT_RAS_IMP_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGE_CAP_RXD_ADV_LAYOUT_B)) - set_bit(HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGE_CAP_PORT_VLAN_BYPASS_B)) { - set_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps); - set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps); - } + for (i = 0; i < ARRAY_SIZE(hclge_cmd_caps_bit_map0); i++) + if (hnae3_get_bit(caps, hclge_cmd_caps_bit_map0[i].imp_bit)) + set_bit(hclge_cmd_caps_bit_map0[i].local_bit, + ae_dev->caps); } static __le32 hclge_build_api_caps(void) @@ -573,9 +568,13 @@ static void hclge_cmd_uninit_regs(struct hclge_hw *hw) void hclge_cmd_uninit(struct hclge_dev *hdev) { + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + /* wait to ensure that the firmware completes the possible left + * over commands. + */ + msleep(HCLGE_CMDQ_CLEAR_WAIT_TIME); spin_lock_bh(&hdev->hw.cmq.csq.lock); spin_lock(&hdev->hw.cmq.crq.lock); - set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); hclge_cmd_uninit_regs(&hdev->hw); spin_unlock(&hdev->hw.cmq.crq.lock); spin_unlock_bh(&hdev->hw.cmq.csq.lock); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 18bde77ef944..33244472e0d0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -9,6 +9,7 @@ #include "hnae3.h" #define HCLGE_CMDQ_TX_TIMEOUT 30000 +#define HCLGE_CMDQ_CLEAR_WAIT_TIME 200 #define HCLGE_DESC_DATA_LEN 6 struct hclge_dev; @@ -270,6 +271,9 @@ enum hclge_opcode_type { /* Led command */ HCLGE_OPC_LED_STATUS_CFG = 0xB000, + /* clear hardware resource command */ + HCLGE_OPC_CLEAR_HW_RESOURCE = 0x700B, + /* NCL config command */ HCLGE_OPC_QUERY_NCL_CONFIG = 0x7011, @@ -316,6 +320,9 @@ enum hclge_opcode_type { /* PHY command */ HCLGE_OPC_PHY_LINK_KSETTING = 0x7025, HCLGE_OPC_PHY_REG = 0x7026, + + /* Query link diagnosis info command */ + HCLGE_OPC_QUERY_LINK_DIAGNOSIS = 0x702A, }; #define HCLGE_TQP_REG_OFFSET 0x80000 @@ -446,7 +453,7 @@ struct hclge_tc_thrd { }; struct hclge_priv_buf { - struct hclge_waterline wl; /* Waterline for low and high*/ + struct hclge_waterline wl; /* Waterline for low and high */ u32 buf_size; /* TC private buffer size */ u32 tx_buf_size; u32 enable; /* Enable TC private buffer or not */ @@ -1010,16 +1017,6 @@ struct hclge_common_lb_cmd { #define HCLGE_TYPE_CRQ 0 #define HCLGE_TYPE_CSQ 1 -#define HCLGE_NIC_CSQ_BASEADDR_L_REG 0x27000 -#define HCLGE_NIC_CSQ_BASEADDR_H_REG 0x27004 -#define HCLGE_NIC_CSQ_DEPTH_REG 0x27008 -#define HCLGE_NIC_CSQ_TAIL_REG 0x27010 -#define HCLGE_NIC_CSQ_HEAD_REG 0x27014 -#define HCLGE_NIC_CRQ_BASEADDR_L_REG 0x27018 -#define HCLGE_NIC_CRQ_BASEADDR_H_REG 0x2701c -#define HCLGE_NIC_CRQ_DEPTH_REG 0x27020 -#define HCLGE_NIC_CRQ_TAIL_REG 0x27024 -#define HCLGE_NIC_CRQ_HEAD_REG 0x27028 /* this bit indicates that the driver is ready for hardware reset */ #define HCLGE_NIC_SW_RST_RDY_B 16 @@ -1194,6 +1191,19 @@ struct hclge_dev_specs_1_cmd { u8 rsv1[18]; }; +/* mac speed type defined in firmware command */ +enum HCLGE_FIRMWARE_MAC_SPEED { + HCLGE_FW_MAC_SPEED_1G, + HCLGE_FW_MAC_SPEED_10G, + HCLGE_FW_MAC_SPEED_25G, + HCLGE_FW_MAC_SPEED_40G, + HCLGE_FW_MAC_SPEED_50G, + HCLGE_FW_MAC_SPEED_100G, + HCLGE_FW_MAC_SPEED_10M, + HCLGE_FW_MAC_SPEED_100M, + HCLGE_FW_MAC_SPEED_200G, +}; + #define HCLGE_PHY_LINK_SETTING_BD_NUM 2 struct hclge_phy_link_ksetting_0_cmd { @@ -1224,6 +1234,12 @@ struct hclge_phy_reg_cmd { u8 rsv1[18]; }; +/* capabilities bits map between imp firmware and local driver */ +struct hclge_caps_bit_map { + u16 imp_bit; + u16 local_bit; +}; + int hclge_cmd_init(struct hclge_dev *hdev); static inline void hclge_write_reg(void __iomem *base, u32 reg, u32 value) { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c index 5bf5db91d16c..4a619e5d3f35 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c @@ -104,26 +104,30 @@ static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc, return 0; } -static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets, - u8 *tc, bool *changed) +static u8 hclge_ets_tc_changed(struct hclge_dev *hdev, struct ieee_ets *ets, + bool *changed) { - bool has_ets_tc = false; - u32 total_ets_bw = 0; - u8 max_tc = 0; - int ret; + u8 max_tc_id = 0; u8 i; for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i]) *changed = true; - if (ets->prio_tc[i] > max_tc) - max_tc = ets->prio_tc[i]; + if (ets->prio_tc[i] > max_tc_id) + max_tc_id = ets->prio_tc[i]; } - ret = hclge_dcb_common_validate(hdev, max_tc + 1, ets->prio_tc); - if (ret) - return ret; + /* return max tc number, max tc id need to plus 1 */ + return max_tc_id + 1; +} + +static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev, + struct ieee_ets *ets, bool *changed) +{ + bool has_ets_tc = false; + u32 total_ets_bw = 0; + u8 i; for (i = 0; i < hdev->tc_max; i++) { switch (ets->tc_tsa[i]) { @@ -148,7 +152,26 @@ static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets, if (has_ets_tc && total_ets_bw != BW_PERCENT) return -EINVAL; - *tc = max_tc + 1; + return 0; +} + +static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets, + u8 *tc, bool *changed) +{ + u8 tc_num; + int ret; + + tc_num = hclge_ets_tc_changed(hdev, ets, changed); + + ret = hclge_dcb_common_validate(hdev, tc_num, ets->prio_tc); + if (ret) + return ret; + + ret = hclge_ets_sch_mode_validate(hdev, ets, changed); + if (ret) + return ret; + + *tc = tc_num; if (*tc != hdev->tm_info.num_tc) *changed = true; @@ -234,9 +257,7 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets) if (ret) goto err_out; - ret = hclge_notify_init_up(hdev); - if (ret) - return ret; + return hclge_notify_init_up(hdev); } return hclge_tm_dwrr_cfg(hdev); @@ -255,21 +276,12 @@ static int hclge_ieee_getpfc(struct hnae3_handle *h, struct ieee_pfc *pfc) u64 requests[HNAE3_MAX_TC], indications[HNAE3_MAX_TC]; struct hclge_vport *vport = hclge_get_vport(h); struct hclge_dev *hdev = vport->back; - u8 i, j, pfc_map, *prio_tc; int ret; + u8 i; memset(pfc, 0, sizeof(*pfc)); pfc->pfc_cap = hdev->pfc_max; - prio_tc = hdev->tm_info.prio_tc; - pfc_map = hdev->tm_info.hw_pfc_map; - - /* Pfc setting is based on TC */ - for (i = 0; i < hdev->tm_info.num_tc; i++) { - for (j = 0; j < HNAE3_MAX_USER_PRIO; j++) { - if ((prio_tc[j] == i) && (pfc_map & BIT(i))) - pfc->pfc_en |= BIT(j); - } - } + pfc->pfc_en = hdev->tm_info.pfc_en; ret = hclge_pfc_tx_stats_get(hdev, requests); if (ret) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index 288788186ecc..68ed1715ac52 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -926,26 +926,45 @@ static int hclge_dbg_dump_tm_nodes(struct hclge_dev *hdev, char *buf, int len) return 0; } +static const struct hclge_dbg_item tm_pri_items[] = { + { "ID", 4 }, + { "MODE", 2 }, + { "DWRR", 2 }, + { "C_IR_B", 2 }, + { "C_IR_U", 2 }, + { "C_IR_S", 2 }, + { "C_BS_B", 2 }, + { "C_BS_S", 2 }, + { "C_FLAG", 2 }, + { "C_RATE(Mbps)", 2 }, + { "P_IR_B", 2 }, + { "P_IR_U", 2 }, + { "P_IR_S", 2 }, + { "P_BS_B", 2 }, + { "P_BS_S", 2 }, + { "P_FLAG", 2 }, + { "P_RATE(Mbps)", 0 } +}; + static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len) { - struct hclge_tm_shaper_para c_shaper_para; - struct hclge_tm_shaper_para p_shaper_para; - u8 pri_num, sch_mode, weight; - char *sch_mode_str; - int pos = 0; - int ret; - u8 i; + char data_str[ARRAY_SIZE(tm_pri_items)][HCLGE_DBG_DATA_STR_LEN]; + struct hclge_tm_shaper_para c_shaper_para, p_shaper_para; + char *result[ARRAY_SIZE(tm_pri_items)], *sch_mode_str; + char content[HCLGE_DBG_TM_INFO_LEN]; + u8 pri_num, sch_mode, weight, i, j; + int pos, ret; ret = hclge_tm_get_pri_num(hdev, &pri_num); if (ret) return ret; - pos += scnprintf(buf + pos, len - pos, - "ID MODE DWRR C_IR_B C_IR_U C_IR_S C_BS_B "); - pos += scnprintf(buf + pos, len - pos, - "C_BS_S C_FLAG C_RATE(Mbps) P_IR_B P_IR_U "); - pos += scnprintf(buf + pos, len - pos, - "P_IR_S P_BS_B P_BS_S P_FLAG P_RATE(Mbps)\n"); + for (i = 0; i < ARRAY_SIZE(tm_pri_items); i++) + result[i] = &data_str[i][0]; + + hclge_dbg_fill_content(content, sizeof(content), tm_pri_items, + NULL, ARRAY_SIZE(tm_pri_items)); + pos = scnprintf(buf, len, "%s", content); for (i = 0; i < pri_num; i++) { ret = hclge_tm_get_pri_sch_mode(hdev, i, &sch_mode); @@ -971,21 +990,16 @@ static int hclge_dbg_dump_tm_pri(struct hclge_dev *hdev, char *buf, int len) sch_mode_str = sch_mode & HCLGE_TM_TX_SCHD_DWRR_MSK ? "dwrr" : "sp"; - pos += scnprintf(buf + pos, len - pos, - "%04u %4s %3u %3u %3u %3u ", - i, sch_mode_str, weight, c_shaper_para.ir_b, - c_shaper_para.ir_u, c_shaper_para.ir_s); - pos += scnprintf(buf + pos, len - pos, - "%3u %3u %1u %6u ", - c_shaper_para.bs_b, c_shaper_para.bs_s, - c_shaper_para.flag, c_shaper_para.rate); - pos += scnprintf(buf + pos, len - pos, - "%3u %3u %3u %3u %3u ", - p_shaper_para.ir_b, p_shaper_para.ir_u, - p_shaper_para.ir_s, p_shaper_para.bs_b, - p_shaper_para.bs_s); - pos += scnprintf(buf + pos, len - pos, "%1u %6u\n", - p_shaper_para.flag, p_shaper_para.rate); + j = 0; + sprintf(result[j++], "%04u", i); + sprintf(result[j++], "%4s", sch_mode_str); + sprintf(result[j++], "%3u", weight); + hclge_dbg_fill_shaper_content(&c_shaper_para, result, &j); + hclge_dbg_fill_shaper_content(&p_shaper_para, result, &j); + hclge_dbg_fill_content(content, sizeof(content), tm_pri_items, + (const char **)result, + ARRAY_SIZE(tm_pri_items)); + pos += scnprintf(buf + pos, len - pos, "%s", content); } return 0; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c new file mode 100644 index 000000000000..e4aad695abcc --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.c @@ -0,0 +1,148 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2021 Hisilicon Limited. */ + +#include <net/devlink.h> + +#include "hclge_devlink.h" + +static int hclge_devlink_info_get(struct devlink *devlink, + struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ +#define HCLGE_DEVLINK_FW_STRING_LEN 32 + struct hclge_devlink_priv *priv = devlink_priv(devlink); + char version_str[HCLGE_DEVLINK_FW_STRING_LEN]; + struct hclge_dev *hdev = priv->hdev; + int ret; + + ret = devlink_info_driver_name_put(req, KBUILD_MODNAME); + if (ret) + return ret; + + snprintf(version_str, sizeof(version_str), "%lu.%lu.%lu.%lu", + hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK, + HNAE3_FW_VERSION_BYTE3_SHIFT), + hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK, + HNAE3_FW_VERSION_BYTE2_SHIFT), + hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK, + HNAE3_FW_VERSION_BYTE1_SHIFT), + hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK, + HNAE3_FW_VERSION_BYTE0_SHIFT)); + + return devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW, + version_str); +} + +static int hclge_devlink_reload_down(struct devlink *devlink, bool netns_change, + enum devlink_reload_action action, + enum devlink_reload_limit limit, + struct netlink_ext_ack *extack) +{ + struct hclge_devlink_priv *priv = devlink_priv(devlink); + struct hclge_dev *hdev = priv->hdev; + struct hnae3_handle *h = &hdev->vport->nic; + struct pci_dev *pdev = hdev->pdev; + int ret; + + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) { + dev_err(&pdev->dev, "reset is handling\n"); + return -EBUSY; + } + + switch (action) { + case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: + rtnl_lock(); + ret = hdev->nic_client->ops->reset_notify(h, HNAE3_DOWN_CLIENT); + if (ret) { + rtnl_unlock(); + return ret; + } + + ret = hdev->nic_client->ops->reset_notify(h, + HNAE3_UNINIT_CLIENT); + rtnl_unlock(); + return ret; + default: + return -EOPNOTSUPP; + } +} + +static int hclge_devlink_reload_up(struct devlink *devlink, + enum devlink_reload_action action, + enum devlink_reload_limit limit, + u32 *actions_performed, + struct netlink_ext_ack *extack) +{ + struct hclge_devlink_priv *priv = devlink_priv(devlink); + struct hclge_dev *hdev = priv->hdev; + struct hnae3_handle *h = &hdev->vport->nic; + int ret; + + *actions_performed = BIT(action); + switch (action) { + case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: + rtnl_lock(); + ret = hdev->nic_client->ops->reset_notify(h, HNAE3_INIT_CLIENT); + if (ret) { + rtnl_unlock(); + return ret; + } + + ret = hdev->nic_client->ops->reset_notify(h, HNAE3_UP_CLIENT); + rtnl_unlock(); + return ret; + default: + return -EOPNOTSUPP; + } +} + +static const struct devlink_ops hclge_devlink_ops = { + .info_get = hclge_devlink_info_get, + .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT), + .reload_down = hclge_devlink_reload_down, + .reload_up = hclge_devlink_reload_up, +}; + +int hclge_devlink_init(struct hclge_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + struct hclge_devlink_priv *priv; + struct devlink *devlink; + int ret; + + devlink = devlink_alloc(&hclge_devlink_ops, + sizeof(struct hclge_devlink_priv), &pdev->dev); + if (!devlink) + return -ENOMEM; + + priv = devlink_priv(devlink); + priv->hdev = hdev; + hdev->devlink = devlink; + + ret = devlink_register(devlink); + if (ret) { + dev_err(&pdev->dev, "failed to register devlink, ret = %d\n", + ret); + goto out_reg_fail; + } + + devlink_reload_enable(devlink); + + return 0; + +out_reg_fail: + devlink_free(devlink); + return ret; +} + +void hclge_devlink_uninit(struct hclge_dev *hdev) +{ + struct devlink *devlink = hdev->devlink; + + devlink_reload_disable(devlink); + + devlink_unregister(devlink); + + devlink_free(devlink); +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h new file mode 100644 index 000000000000..918be04507a5 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_devlink.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2021 Hisilicon Limited. */ + +#ifndef __HCLGE_DEVLINK_H +#define __HCLGE_DEVLINK_H + +#include "hclge_main.h" + +struct hclge_devlink_priv { + struct hclge_dev *hdev; +}; + +int hclge_devlink_init(struct hclge_dev *hdev); +void hclge_devlink_uninit(struct hclge_dev *hdev); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c index ec9a7f8bc3fe..718c16d686fa 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -4,468 +4,895 @@ #include "hclge_err.h" static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = { - { .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(17), .msg = "imp_itcm4_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(1), + .msg = "imp_itcm0_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(3), + .msg = "imp_itcm1_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(5), + .msg = "imp_itcm2_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(7), + .msg = "imp_itcm3_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(9), + .msg = "imp_dtcm0_mem0_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(11), + .msg = "imp_dtcm0_mem1_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(13), + .msg = "imp_dtcm1_mem0_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(15), + .msg = "imp_dtcm1_mem1_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(17), + .msg = "imp_itcm4_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = { - { .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(17), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(19), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(21), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(23), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(25), .msg = "cmdq_rocee_rx_head_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(27), .msg = "cmdq_rocee_tx_head_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(29), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(31), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(1), + .msg = "cmdq_nic_rx_depth_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(3), + .msg = "cmdq_nic_tx_depth_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(5), + .msg = "cmdq_nic_rx_tail_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(7), + .msg = "cmdq_nic_tx_tail_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(9), + .msg = "cmdq_nic_rx_head_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(11), + .msg = "cmdq_nic_tx_head_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(13), + .msg = "cmdq_nic_rx_addr_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(15), + .msg = "cmdq_nic_tx_addr_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(17), + .msg = "cmdq_rocee_rx_depth_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(19), + .msg = "cmdq_rocee_tx_depth_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(21), + .msg = "cmdq_rocee_rx_tail_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(23), + .msg = "cmdq_rocee_tx_tail_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(25), + .msg = "cmdq_rocee_rx_head_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(27), + .msg = "cmdq_rocee_tx_head_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(29), + .msg = "cmdq_rocee_rx_addr_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(31), + .msg = "cmdq_rocee_tx_addr_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = { - { .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(9), .msg = "tqp_int_ctrl_odd_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(10), .msg = "tx_que_scan_int_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(11), .msg = "rx_que_scan_int_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(6), + .msg = "tqp_int_cfg_even_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(7), + .msg = "tqp_int_cfg_odd_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(8), + .msg = "tqp_int_ctrl_even_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(9), + .msg = "tqp_int_ctrl_odd_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(10), + .msg = "tx_que_scan_int_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(11), + .msg = "rx_que_scan_int_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_msix_sram_ecc_int[] = { - { .int_msk = BIT(1), .msg = "msix_nic_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(3), .msg = "msix_rocee_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(1), + .msg = "msix_nic_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(3), + .msg = "msix_rocee_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_igu_int[] = { - { .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "igu_rx_buf0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "igu_rx_buf1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = { - { .int_msk = BIT(0), .msg = "rx_buf_overflow", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "rx_stp_fifo_underflow", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "tx_buf_overflow", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(4), .msg = "tx_buf_underrun", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "rx_stp_buf_overflow", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "rx_buf_overflow", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(1), + .msg = "rx_stp_fifo_overflow", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "rx_stp_fifo_underflow", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "tx_buf_overflow", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "tx_buf_underrun", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "rx_stp_buf_overflow", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_ncsi_err_int[] = { - { .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err", - .reset_level = HNAE3_NONE_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(1), + .msg = "ncsi_tx_ecc_mbit_err", + .reset_level = HNAE3_NONE_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = { - { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_m1bit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(27), .msg = "flow_director_ad_mem0_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(28), .msg = "flow_director_ad_mem1_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(29), .msg = "rx_vlan_tag_memory_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(30), .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "vf_vlan_ad_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(1), + .msg = "umv_mcast_group_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "umv_key_mem0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "umv_key_mem1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "umv_key_mem2_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "umv_key_mem3_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "umv_ad_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "rss_tc_mode_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(8), + .msg = "rss_idt_mem0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(9), + .msg = "rss_idt_mem1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(10), + .msg = "rss_idt_mem2_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(11), + .msg = "rss_idt_mem3_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(12), + .msg = "rss_idt_mem4_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(13), + .msg = "rss_idt_mem5_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(14), + .msg = "rss_idt_mem6_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(15), + .msg = "rss_idt_mem7_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(16), + .msg = "rss_idt_mem8_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(17), + .msg = "rss_idt_mem9_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(18), + .msg = "rss_idt_mem10_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(19), + .msg = "rss_idt_mem11_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(20), + .msg = "rss_idt_mem12_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(21), + .msg = "rss_idt_mem13_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(22), + .msg = "rss_idt_mem14_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(23), + .msg = "rss_idt_mem15_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(24), + .msg = "port_vlan_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(25), + .msg = "mcast_linear_table_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(26), + .msg = "mcast_result_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(27), + .msg = "flow_director_ad_mem0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(28), + .msg = "flow_director_ad_mem1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(29), + .msg = "rx_vlan_tag_memory_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(30), + .msg = "Tx_UP_mapping_config_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_ppp_pf_abnormal_int[] = { - { .int_msk = BIT(0), .msg = "tx_vlan_tag_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err", - .reset_level = HNAE3_NONE_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "tx_vlan_tag_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(1), + .msg = "rss_list_tc_unassigned_queue_err", + .reset_level = HNAE3_NONE_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st3[] = { - { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "hfs_fifo_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(1), + .msg = "rslt_descr_fifo_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "tx_vlan_tag_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "FD_CN0_memory_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "FD_CN1_memory_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "GRO_AD_memory_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_tm_sch_rint[] = { - { .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(12), .msg = "tm_sch_port_shap_offset_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(13), .msg = "tm_sch_port_shap_offset_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(14), .msg = "tm_sch_pg_pshap_offset_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(15), .msg = "tm_sch_pg_pshap_offset_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(16), .msg = "tm_sch_pg_cshap_offset_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(17), .msg = "tm_sch_pg_cshap_offset_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(18), .msg = "tm_sch_pri_pshap_offset_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(19), .msg = "tm_sch_pri_pshap_offset_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(20), .msg = "tm_sch_pri_cshap_offset_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(21), .msg = "tm_sch_pri_cshap_offset_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(1), + .msg = "tm_sch_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "tm_sch_port_shap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "tm_sch_port_shap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "tm_sch_pg_pshap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "tm_sch_pg_pshap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "tm_sch_pg_cshap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "tm_sch_pg_cshap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(8), + .msg = "tm_sch_pri_pshap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(9), + .msg = "tm_sch_pri_pshap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(10), + .msg = "tm_sch_pri_cshap_sub_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(11), + .msg = "tm_sch_pri_cshap_sub_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(12), + .msg = "tm_sch_port_shap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(13), + .msg = "tm_sch_port_shap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(14), + .msg = "tm_sch_pg_pshap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(15), + .msg = "tm_sch_pg_pshap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(16), + .msg = "tm_sch_pg_cshap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(17), + .msg = "tm_sch_pg_cshap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(18), + .msg = "tm_sch_pri_pshap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(19), + .msg = "tm_sch_pri_pshap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(20), + .msg = "tm_sch_pri_cshap_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(21), + .msg = "tm_sch_pri_cshap_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(22), + .msg = "tm_sch_rq_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(23), + .msg = "tm_sch_rq_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(24), + .msg = "tm_sch_nq_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(25), + .msg = "tm_sch_nq_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(26), + .msg = "tm_sch_roce_up_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(27), + .msg = "tm_sch_roce_up_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(28), + .msg = "tm_sch_rcb_byte_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(29), + .msg = "tm_sch_rcb_byte_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(30), + .msg = "tm_sch_ssu_byte_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(31), + .msg = "tm_sch_ssu_byte_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_qcn_fifo_rint[] = { - { .int_msk = BIT(0), .msg = "qcn_shap_gp0_sch_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(1), .msg = "qcn_shap_gp0_sch_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "qcn_shap_gp1_sch_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "qcn_shap_gp1_sch_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(4), .msg = "qcn_shap_gp2_sch_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "qcn_shap_gp2_sch_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(6), .msg = "qcn_shap_gp3_sch_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(7), .msg = "qcn_shap_gp3_sch_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(8), .msg = "qcn_shap_gp0_offset_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(9), .msg = "qcn_shap_gp0_offset_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(10), .msg = "qcn_shap_gp1_offset_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(11), .msg = "qcn_shap_gp1_offset_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(12), .msg = "qcn_shap_gp2_offset_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(13), .msg = "qcn_shap_gp2_offset_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(14), .msg = "qcn_shap_gp3_offset_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(15), .msg = "qcn_shap_gp3_offset_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(16), .msg = "qcn_byte_info_fifo_rd_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(17), .msg = "qcn_byte_info_fifo_wr_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "qcn_shap_gp0_sch_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(1), + .msg = "qcn_shap_gp0_sch_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "qcn_shap_gp1_sch_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "qcn_shap_gp1_sch_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "qcn_shap_gp2_sch_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "qcn_shap_gp2_sch_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "qcn_shap_gp3_sch_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "qcn_shap_gp3_sch_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(8), + .msg = "qcn_shap_gp0_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(9), + .msg = "qcn_shap_gp0_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(10), + .msg = "qcn_shap_gp1_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(11), + .msg = "qcn_shap_gp1_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(12), + .msg = "qcn_shap_gp2_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(13), + .msg = "qcn_shap_gp2_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(14), + .msg = "qcn_shap_gp3_offset_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(15), + .msg = "qcn_shap_gp3_offset_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(16), + .msg = "qcn_byte_info_fifo_rd_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(17), + .msg = "qcn_byte_info_fifo_wr_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_qcn_ecc_rint[] = { - { .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(1), + .msg = "qcn_byte_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "qcn_time_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "qcn_fb_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "qcn_link_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(9), + .msg = "qcn_rate_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(11), + .msg = "qcn_tmplt_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(13), + .msg = "qcn_shap_cfg_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(15), + .msg = "qcn_gp0_barrel_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(17), + .msg = "qcn_gp1_barrel_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(19), + .msg = "qcn_gp2_barrel_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(21), + .msg = "qcn_gp3_barral_mem_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_mac_afifo_tnl_int[] = { - { .int_msk = BIT(0), .msg = "egu_cge_afifo_ecc_1bit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(1), .msg = "egu_cge_afifo_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "egu_lge_afifo_ecc_1bit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(3), .msg = "egu_lge_afifo_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(4), .msg = "cge_igu_afifo_ecc_1bit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(5), .msg = "cge_igu_afifo_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(6), .msg = "lge_igu_afifo_ecc_1bit_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(7), .msg = "lge_igu_afifo_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(8), .msg = "cge_igu_afifo_overflow_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(9), .msg = "lge_igu_afifo_overflow_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(10), .msg = "egu_cge_afifo_underrun_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(11), .msg = "egu_lge_afifo_underrun_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(12), .msg = "egu_ge_afifo_underrun_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(13), .msg = "ge_igu_afifo_overflow_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "egu_cge_afifo_ecc_1bit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(1), + .msg = "egu_cge_afifo_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "egu_lge_afifo_ecc_1bit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(3), + .msg = "egu_lge_afifo_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "cge_igu_afifo_ecc_1bit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(5), + .msg = "cge_igu_afifo_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "lge_igu_afifo_ecc_1bit_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(7), + .msg = "lge_igu_afifo_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(8), + .msg = "cge_igu_afifo_overflow_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(9), + .msg = "lge_igu_afifo_overflow_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(10), + .msg = "egu_cge_afifo_underrun_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(11), + .msg = "egu_lge_afifo_underrun_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(12), + .msg = "egu_ge_afifo_underrun_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(13), + .msg = "ge_igu_afifo_overflow_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st2[] = { - { .int_msk = BIT(13), .msg = "rpu_rx_pkt_bit32_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(14), .msg = "rpu_rx_pkt_bit33_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(15), .msg = "rpu_rx_pkt_bit34_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(16), .msg = "rpu_rx_pkt_bit35_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(17), .msg = "rcb_tx_ring_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(18), .msg = "rcb_rx_ring_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(19), .msg = "rcb_tx_fbd_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(20), .msg = "rcb_rx_ebd_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(21), .msg = "rcb_tso_info_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(22), .msg = "rcb_tx_int_info_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(23), .msg = "rcb_rx_int_info_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(24), .msg = "tpu_tx_pkt_0_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(25), .msg = "tpu_tx_pkt_1_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(26), .msg = "rd_bus_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(27), .msg = "wr_bus_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(28), .msg = "reg_search_miss", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(29), .msg = "rx_q_search_miss", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(30), .msg = "ooo_ecc_err_detect", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(31), .msg = "ooo_ecc_err_multpl", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(13), + .msg = "rpu_rx_pkt_bit32_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(14), + .msg = "rpu_rx_pkt_bit33_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(15), + .msg = "rpu_rx_pkt_bit34_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(16), + .msg = "rpu_rx_pkt_bit35_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(17), + .msg = "rcb_tx_ring_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(18), + .msg = "rcb_rx_ring_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(19), + .msg = "rcb_tx_fbd_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(20), + .msg = "rcb_rx_ebd_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(21), + .msg = "rcb_tso_info_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(22), + .msg = "rcb_tx_int_info_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(23), + .msg = "rcb_rx_int_info_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(24), + .msg = "tpu_tx_pkt_0_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(25), + .msg = "tpu_tx_pkt_1_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(26), + .msg = "rd_bus_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(27), + .msg = "wr_bus_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(28), + .msg = "reg_search_miss", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(29), + .msg = "rx_q_search_miss", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(30), + .msg = "ooo_ecc_err_detect", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(31), + .msg = "ooo_ecc_err_multpl", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st3[] = { - { .int_msk = BIT(4), .msg = "gro_bd_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "gro_context_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(6), .msg = "rx_stash_cfg_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(7), .msg = "axi_rd_fbd_ecc_mbit_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(4), + .msg = "gro_bd_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "gro_context_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "rx_stash_cfg_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "axi_rd_fbd_ecc_mbit_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_ppu_pf_abnormal_int[] = { - { .int_msk = BIT(0), .msg = "over_8bd_no_fe", - .reset_level = HNAE3_FUNC_RESET }, - { .int_msk = BIT(1), .msg = "tso_mss_cmp_min_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(2), .msg = "tso_mss_cmp_max_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(3), .msg = "tx_rd_fbd_poison", - .reset_level = HNAE3_FUNC_RESET }, - { .int_msk = BIT(4), .msg = "rx_rd_ebd_poison", - .reset_level = HNAE3_FUNC_RESET }, - { .int_msk = BIT(5), .msg = "buf_wait_timeout", - .reset_level = HNAE3_NONE_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "over_8bd_no_fe", + .reset_level = HNAE3_FUNC_RESET + }, { + .int_msk = BIT(1), + .msg = "tso_mss_cmp_min_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(2), + .msg = "tso_mss_cmp_max_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(3), + .msg = "tx_rd_fbd_poison", + .reset_level = HNAE3_FUNC_RESET + }, { + .int_msk = BIT(4), + .msg = "rx_rd_ebd_poison", + .reset_level = HNAE3_FUNC_RESET + }, { + .int_msk = BIT(5), + .msg = "buf_wait_timeout", + .reset_level = HNAE3_NONE_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_ssu_com_err_int[] = { - { .int_msk = BIT(0), .msg = "buf_sum_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(1), .msg = "ppp_mb_num_err", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(2), .msg = "ppp_mbid_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "ppp_rlt_mac_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(4), .msg = "ppp_rlt_host_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "cks_edit_position_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(6), .msg = "cks_edit_condition_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(7), .msg = "vlan_edit_condition_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(8), .msg = "vlan_num_ot_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(9), .msg = "vlan_num_in_err", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "buf_sum_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(1), + .msg = "ppp_mb_num_err", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(2), + .msg = "ppp_mbid_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "ppp_rlt_mac_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "ppp_rlt_host_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "cks_edit_position_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "cks_edit_condition_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "vlan_edit_condition_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(8), + .msg = "vlan_num_ot_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(9), + .msg = "vlan_num_in_err", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; #define HCLGE_SSU_MEM_ECC_ERR(x) \ - { .int_msk = BIT(x), .msg = "ssu_mem" #x "_ecc_mbit_err", \ - .reset_level = HNAE3_GLOBAL_RESET } +{ \ + .int_msk = BIT(x), \ + .msg = "ssu_mem" #x "_ecc_mbit_err", \ + .reset_level = HNAE3_GLOBAL_RESET \ +} static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = { HCLGE_SSU_MEM_ECC_ERR(0), @@ -504,131 +931,269 @@ static const struct hclge_hw_error hclge_ssu_mem_ecc_err_int[] = { }; static const struct hclge_hw_error hclge_ssu_port_based_err_int[] = { - { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port", - .reset_level = HNAE3_FUNC_RESET }, - { .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "igu_pkt_without_key_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "roc_eof_mis_match_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(4), .msg = "tpu_eof_mis_match_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "igu_eof_mis_match_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(6), .msg = "roc_sof_mis_match_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(7), .msg = "tpu_sof_mis_match_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(8), .msg = "igu_sof_mis_match_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(11), .msg = "ets_rd_int_rx_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(12), .msg = "ets_wr_int_rx_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(13), .msg = "ets_rd_int_tx_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(14), .msg = "ets_wr_int_tx_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "roc_pkt_without_key_port", + .reset_level = HNAE3_FUNC_RESET + }, { + .int_msk = BIT(1), + .msg = "tpu_pkt_without_key_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "igu_pkt_without_key_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "roc_eof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "tpu_eof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "igu_eof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "roc_sof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "tpu_sof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(8), + .msg = "igu_sof_mis_match_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(11), + .msg = "ets_rd_int_rx_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(12), + .msg = "ets_wr_int_rx_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(13), + .msg = "ets_rd_int_tx_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(14), + .msg = "ets_wr_int_tx_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_ssu_fifo_overflow_int[] = { - { .int_msk = BIT(0), .msg = "ig_mac_inf_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(1), .msg = "ig_host_inf_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "ig_roc_buf_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "ig_host_data_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(4), .msg = "ig_host_key_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(5), .msg = "tx_qcn_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(6), .msg = "rx_qcn_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(7), .msg = "tx_pf_rd_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(8), .msg = "rx_pf_rd_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(9), .msg = "qm_eof_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(10), .msg = "mb_rlt_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(11), .msg = "dup_uncopy_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(12), .msg = "dup_cnt_rd_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(13), .msg = "dup_cnt_drop_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(14), .msg = "dup_cnt_wrb_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(15), .msg = "host_cmd_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(16), .msg = "mac_cmd_fifo_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(17), .msg = "host_cmd_bitmap_empty_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(18), .msg = "mac_cmd_bitmap_empty_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(19), .msg = "dup_bitmap_empty_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(20), .msg = "out_queue_bitmap_empty_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(21), .msg = "bank2_bitmap_empty_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(22), .msg = "bank1_bitmap_empty_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(23), .msg = "bank0_bitmap_empty_int", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "ig_mac_inf_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(1), + .msg = "ig_host_inf_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "ig_roc_buf_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "ig_host_data_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(4), + .msg = "ig_host_key_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(5), + .msg = "tx_qcn_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(6), + .msg = "rx_qcn_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(7), + .msg = "tx_pf_rd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(8), + .msg = "rx_pf_rd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(9), + .msg = "qm_eof_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(10), + .msg = "mb_rlt_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(11), + .msg = "dup_uncopy_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(12), + .msg = "dup_cnt_rd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(13), + .msg = "dup_cnt_drop_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(14), + .msg = "dup_cnt_wrb_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(15), + .msg = "host_cmd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(16), + .msg = "mac_cmd_fifo_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(17), + .msg = "host_cmd_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(18), + .msg = "mac_cmd_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(19), + .msg = "dup_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(20), + .msg = "out_queue_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(21), + .msg = "bank2_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(22), + .msg = "bank1_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(23), + .msg = "bank0_bitmap_empty_int", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_ssu_ets_tcg_int[] = { - { .int_msk = BIT(0), .msg = "ets_rd_int_rx_tcg", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(1), .msg = "ets_wr_int_rx_tcg", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(2), .msg = "ets_rd_int_tx_tcg", - .reset_level = HNAE3_GLOBAL_RESET }, - { .int_msk = BIT(3), .msg = "ets_wr_int_tx_tcg", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "ets_rd_int_rx_tcg", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(1), + .msg = "ets_wr_int_rx_tcg", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(2), + .msg = "ets_rd_int_tx_tcg", + .reset_level = HNAE3_GLOBAL_RESET + }, { + .int_msk = BIT(3), + .msg = "ets_wr_int_tx_tcg", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_ssu_port_based_pf_int[] = { - { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port", - .reset_level = HNAE3_FUNC_RESET }, - { .int_msk = BIT(9), .msg = "low_water_line_err_port", - .reset_level = HNAE3_NONE_RESET }, - { .int_msk = BIT(10), .msg = "hi_water_line_err_port", - .reset_level = HNAE3_GLOBAL_RESET }, - { /* sentinel */ } + { + .int_msk = BIT(0), + .msg = "roc_pkt_without_key_port", + .reset_level = HNAE3_FUNC_RESET + }, { + .int_msk = BIT(9), + .msg = "low_water_line_err_port", + .reset_level = HNAE3_NONE_RESET + }, { + .int_msk = BIT(10), + .msg = "hi_water_line_err_port", + .reset_level = HNAE3_GLOBAL_RESET + }, { + /* sentinel */ + } }; static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = { - { .int_msk = 0, .msg = "rocee qmm ovf: sgid invalid err" }, - { .int_msk = 0x4, .msg = "rocee qmm ovf: sgid ovf err" }, - { .int_msk = 0x8, .msg = "rocee qmm ovf: smac invalid err" }, - { .int_msk = 0xC, .msg = "rocee qmm ovf: smac ovf err" }, - { .int_msk = 0x10, .msg = "rocee qmm ovf: cqc invalid err" }, - { .int_msk = 0x11, .msg = "rocee qmm ovf: cqc ovf err" }, - { .int_msk = 0x12, .msg = "rocee qmm ovf: cqc hopnum err" }, - { .int_msk = 0x13, .msg = "rocee qmm ovf: cqc ba0 err" }, - { .int_msk = 0x14, .msg = "rocee qmm ovf: srqc invalid err" }, - { .int_msk = 0x15, .msg = "rocee qmm ovf: srqc ovf err" }, - { .int_msk = 0x16, .msg = "rocee qmm ovf: srqc hopnum err" }, - { .int_msk = 0x17, .msg = "rocee qmm ovf: srqc ba0 err" }, - { .int_msk = 0x18, .msg = "rocee qmm ovf: mpt invalid err" }, - { .int_msk = 0x19, .msg = "rocee qmm ovf: mpt ovf err" }, - { .int_msk = 0x1A, .msg = "rocee qmm ovf: mpt hopnum err" }, - { .int_msk = 0x1B, .msg = "rocee qmm ovf: mpt ba0 err" }, - { .int_msk = 0x1C, .msg = "rocee qmm ovf: qpc invalid err" }, - { .int_msk = 0x1D, .msg = "rocee qmm ovf: qpc ovf err" }, - { .int_msk = 0x1E, .msg = "rocee qmm ovf: qpc hopnum err" }, - { .int_msk = 0x1F, .msg = "rocee qmm ovf: qpc ba0 err" }, - { /* sentinel */ } + { + .int_msk = 0, + .msg = "rocee qmm ovf: sgid invalid err" + }, { + .int_msk = 0x4, + .msg = "rocee qmm ovf: sgid ovf err" + }, { + .int_msk = 0x8, + .msg = "rocee qmm ovf: smac invalid err" + }, { + .int_msk = 0xC, + .msg = "rocee qmm ovf: smac ovf err" + }, { + .int_msk = 0x10, + .msg = "rocee qmm ovf: cqc invalid err" + }, { + .int_msk = 0x11, + .msg = "rocee qmm ovf: cqc ovf err" + }, { + .int_msk = 0x12, + .msg = "rocee qmm ovf: cqc hopnum err" + }, { + .int_msk = 0x13, + .msg = "rocee qmm ovf: cqc ba0 err" + }, { + .int_msk = 0x14, + .msg = "rocee qmm ovf: srqc invalid err" + }, { + .int_msk = 0x15, + .msg = "rocee qmm ovf: srqc ovf err" + }, { + .int_msk = 0x16, + .msg = "rocee qmm ovf: srqc hopnum err" + }, { + .int_msk = 0x17, + .msg = "rocee qmm ovf: srqc ba0 err" + }, { + .int_msk = 0x18, + .msg = "rocee qmm ovf: mpt invalid err" + }, { + .int_msk = 0x19, + .msg = "rocee qmm ovf: mpt ovf err" + }, { + .int_msk = 0x1A, + .msg = "rocee qmm ovf: mpt hopnum err" + }, { + .int_msk = 0x1B, + .msg = "rocee qmm ovf: mpt ba0 err" + }, { + .int_msk = 0x1C, + .msg = "rocee qmm ovf: qpc invalid err" + }, { + .int_msk = 0x1D, + .msg = "rocee qmm ovf: qpc ovf err" + }, { + .int_msk = 0x1E, + .msg = "rocee qmm ovf: qpc hopnum err" + }, { + .int_msk = 0x1F, + .msg = "rocee qmm ovf: qpc ba0 err" + }, { + /* sentinel */ + } }; static const struct hclge_hw_module_id hclge_hw_module_id_st[] = { @@ -1709,34 +2274,36 @@ static void hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev) static const struct hclge_hw_blk hw_blk[] = { { - .msk = BIT(0), .name = "IGU_EGU", - .config_err_int = hclge_config_igu_egu_hw_err_int, - }, - { - .msk = BIT(1), .name = "PPP", - .config_err_int = hclge_config_ppp_hw_err_int, - }, - { - .msk = BIT(2), .name = "SSU", - .config_err_int = hclge_config_ssu_hw_err_int, - }, - { - .msk = BIT(3), .name = "PPU", - .config_err_int = hclge_config_ppu_hw_err_int, - }, - { - .msk = BIT(4), .name = "TM", - .config_err_int = hclge_config_tm_hw_err_int, - }, - { - .msk = BIT(5), .name = "COMMON", - .config_err_int = hclge_config_common_hw_err_int, - }, - { - .msk = BIT(8), .name = "MAC", - .config_err_int = hclge_config_mac_err_int, - }, - { /* sentinel */ } + .msk = BIT(0), + .name = "IGU_EGU", + .config_err_int = hclge_config_igu_egu_hw_err_int, + }, { + .msk = BIT(1), + .name = "PPP", + .config_err_int = hclge_config_ppp_hw_err_int, + }, { + .msk = BIT(2), + .name = "SSU", + .config_err_int = hclge_config_ssu_hw_err_int, + }, { + .msk = BIT(3), + .name = "PPU", + .config_err_int = hclge_config_ppu_hw_err_int, + }, { + .msk = BIT(4), + .name = "TM", + .config_err_int = hclge_config_tm_hw_err_int, + }, { + .msk = BIT(5), + .name = "COMMON", + .config_err_int = hclge_config_common_hw_err_int, + }, { + .msk = BIT(8), + .name = "MAC", + .config_err_int = hclge_config_mac_err_int, + }, { + /* sentinel */ + } }; static void hclge_config_all_msix_error(struct hclge_dev *hdev, bool enable) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index ebeaf12e409b..e55ba2e511b1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -23,6 +23,7 @@ #include "hclge_tm.h" #include "hclge_err.h" #include "hnae3.h" +#include "hclge_devlink.h" #define HCLGE_NAME "hclge" #define HCLGE_STATS_READ(p, offset) (*(u64 *)((u8 *)(p) + (offset))) @@ -91,23 +92,23 @@ static const struct pci_device_id ae_algo_pci_tbl[] = { MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl); -static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG, - HCLGE_CMDQ_TX_ADDR_H_REG, - HCLGE_CMDQ_TX_DEPTH_REG, - HCLGE_CMDQ_TX_TAIL_REG, - HCLGE_CMDQ_TX_HEAD_REG, - HCLGE_CMDQ_RX_ADDR_L_REG, - HCLGE_CMDQ_RX_ADDR_H_REG, - HCLGE_CMDQ_RX_DEPTH_REG, - HCLGE_CMDQ_RX_TAIL_REG, - HCLGE_CMDQ_RX_HEAD_REG, +static const u32 cmdq_reg_addr_list[] = {HCLGE_NIC_CSQ_BASEADDR_L_REG, + HCLGE_NIC_CSQ_BASEADDR_H_REG, + HCLGE_NIC_CSQ_DEPTH_REG, + HCLGE_NIC_CSQ_TAIL_REG, + HCLGE_NIC_CSQ_HEAD_REG, + HCLGE_NIC_CRQ_BASEADDR_L_REG, + HCLGE_NIC_CRQ_BASEADDR_H_REG, + HCLGE_NIC_CRQ_DEPTH_REG, + HCLGE_NIC_CRQ_TAIL_REG, + HCLGE_NIC_CRQ_HEAD_REG, HCLGE_VECTOR0_CMDQ_SRC_REG, HCLGE_CMDQ_INTR_STS_REG, HCLGE_CMDQ_INTR_EN_REG, HCLGE_CMDQ_INTR_GEN_REG}; static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE, - HCLGE_VECTOR0_OTER_EN_REG, + HCLGE_PF_OTHER_INT_REG, HCLGE_MISC_RESET_STS_REG, HCLGE_MISC_VECTOR_INT_STS, HCLGE_GLOBAL_RESET_REG, @@ -374,14 +375,14 @@ static const enum hclge_opcode_type hclge_dfx_reg_opcode_list[] = { }; static const struct key_info meta_data_key_info[] = { - { PACKET_TYPE_ID, 6}, - { IP_FRAGEMENT, 1}, - { ROCE_TYPE, 1}, - { NEXT_KEY, 5}, - { VLAN_NUMBER, 2}, - { SRC_VPORT, 12}, - { DST_VPORT, 12}, - { TUNNEL_PACKET, 1}, + { PACKET_TYPE_ID, 6 }, + { IP_FRAGEMENT, 1 }, + { ROCE_TYPE, 1 }, + { NEXT_KEY, 5 }, + { VLAN_NUMBER, 2 }, + { SRC_VPORT, 12 }, + { DST_VPORT, 12 }, + { TUNNEL_PACKET, 1 }, }; static const struct key_info tuple_key_info[] = { @@ -748,9 +749,9 @@ static void hclge_update_stats(struct hnae3_handle *handle, static int hclge_get_sset_count(struct hnae3_handle *handle, int stringset) { -#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK |\ - HNAE3_SUPPORT_PHY_LOOPBACK |\ - HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK |\ +#define HCLGE_LOOPBACK_TEST_FLAGS (HNAE3_SUPPORT_APP_LOOPBACK | \ + HNAE3_SUPPORT_PHY_LOOPBACK | \ + HNAE3_SUPPORT_SERDES_SERIAL_LOOPBACK | \ HNAE3_SUPPORT_SERDES_PARALLEL_LOOPBACK) struct hclge_vport *vport = hclge_get_vport(handle); @@ -958,31 +959,31 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev) static int hclge_parse_speed(u8 speed_cmd, u32 *speed) { switch (speed_cmd) { - case 6: + case HCLGE_FW_MAC_SPEED_10M: *speed = HCLGE_MAC_SPEED_10M; break; - case 7: + case HCLGE_FW_MAC_SPEED_100M: *speed = HCLGE_MAC_SPEED_100M; break; - case 0: + case HCLGE_FW_MAC_SPEED_1G: *speed = HCLGE_MAC_SPEED_1G; break; - case 1: + case HCLGE_FW_MAC_SPEED_10G: *speed = HCLGE_MAC_SPEED_10G; break; - case 2: + case HCLGE_FW_MAC_SPEED_25G: *speed = HCLGE_MAC_SPEED_25G; break; - case 3: + case HCLGE_FW_MAC_SPEED_40G: *speed = HCLGE_MAC_SPEED_40G; break; - case 4: + case HCLGE_FW_MAC_SPEED_50G: *speed = HCLGE_MAC_SPEED_50G; break; - case 5: + case HCLGE_FW_MAC_SPEED_100G: *speed = HCLGE_MAC_SPEED_100G; break; - case 8: + case HCLGE_FW_MAC_SPEED_200G: *speed = HCLGE_MAC_SPEED_200G; break; default: @@ -992,44 +993,43 @@ static int hclge_parse_speed(u8 speed_cmd, u32 *speed) return 0; } +static const struct hclge_speed_bit_map speed_bit_map[] = { + {HCLGE_MAC_SPEED_10M, HCLGE_SUPPORT_10M_BIT}, + {HCLGE_MAC_SPEED_100M, HCLGE_SUPPORT_100M_BIT}, + {HCLGE_MAC_SPEED_1G, HCLGE_SUPPORT_1G_BIT}, + {HCLGE_MAC_SPEED_10G, HCLGE_SUPPORT_10G_BIT}, + {HCLGE_MAC_SPEED_25G, HCLGE_SUPPORT_25G_BIT}, + {HCLGE_MAC_SPEED_40G, HCLGE_SUPPORT_40G_BIT}, + {HCLGE_MAC_SPEED_50G, HCLGE_SUPPORT_50G_BIT}, + {HCLGE_MAC_SPEED_100G, HCLGE_SUPPORT_100G_BIT}, + {HCLGE_MAC_SPEED_200G, HCLGE_SUPPORT_200G_BIT}, +}; + +static int hclge_get_speed_bit(u32 speed, u32 *speed_bit) +{ + u16 i; + + for (i = 0; i < ARRAY_SIZE(speed_bit_map); i++) { + if (speed == speed_bit_map[i].speed) { + *speed_bit = speed_bit_map[i].speed_bit; + return 0; + } + } + + return -EINVAL; +} + static int hclge_check_port_speed(struct hnae3_handle *handle, u32 speed) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; u32 speed_ability = hdev->hw.mac.speed_ability; u32 speed_bit = 0; + int ret; - switch (speed) { - case HCLGE_MAC_SPEED_10M: - speed_bit = HCLGE_SUPPORT_10M_BIT; - break; - case HCLGE_MAC_SPEED_100M: - speed_bit = HCLGE_SUPPORT_100M_BIT; - break; - case HCLGE_MAC_SPEED_1G: - speed_bit = HCLGE_SUPPORT_1G_BIT; - break; - case HCLGE_MAC_SPEED_10G: - speed_bit = HCLGE_SUPPORT_10G_BIT; - break; - case HCLGE_MAC_SPEED_25G: - speed_bit = HCLGE_SUPPORT_25G_BIT; - break; - case HCLGE_MAC_SPEED_40G: - speed_bit = HCLGE_SUPPORT_40G_BIT; - break; - case HCLGE_MAC_SPEED_50G: - speed_bit = HCLGE_SUPPORT_50G_BIT; - break; - case HCLGE_MAC_SPEED_100G: - speed_bit = HCLGE_SUPPORT_100G_BIT; - break; - case HCLGE_MAC_SPEED_200G: - speed_bit = HCLGE_SUPPORT_200G_BIT; - break; - default: - return -EINVAL; - } + ret = hclge_get_speed_bit(speed, &speed_bit); + if (ret) + return ret; if (speed_bit & speed_ability) return 0; @@ -1550,6 +1550,7 @@ static int hclge_configure(struct hclge_dev *hdev) hdev->tm_info.hw_pfc_map = 0; hdev->wanted_umv_size = cfg.umv_space; hdev->tx_spare_buf_size = cfg.tx_spare_buf_size; + hdev->gro_en = true; if (cfg.vlan_fliter_cap == HCLGE_VLAN_FLTR_CAN_MDF) set_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps); @@ -1618,7 +1619,7 @@ static int hclge_config_tso(struct hclge_dev *hdev, u16 tso_mss_min, return hclge_cmd_send(&hdev->hw, &desc, 1); } -static int hclge_config_gro(struct hclge_dev *hdev, bool en) +static int hclge_config_gro(struct hclge_dev *hdev) { struct hclge_cfg_gro_status_cmd *req; struct hclge_desc desc; @@ -1630,7 +1631,7 @@ static int hclge_config_gro(struct hclge_dev *hdev, bool en) hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false); req = (struct hclge_cfg_gro_status_cmd *)desc.data; - req->gro_en = en ? 1 : 0; + req->gro_en = hdev->gro_en ? 1 : 0; ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) @@ -1813,6 +1814,7 @@ static int hclge_vport_setup(struct hclge_vport *vport, u16 num_tqps) nic->pdev = hdev->pdev; nic->ae_algo = &ae_algo; nic->numa_node_mask = hdev->numa_node_mask; + nic->kinfo.io_base = hdev->hw.io_base; ret = hclge_knic_setup(vport, num_tqps, hdev->num_tx_desc, hdev->num_rx_desc); @@ -2579,39 +2581,39 @@ static int hclge_cfg_mac_speed_dup_hw(struct hclge_dev *hdev, int speed, switch (speed) { case HCLGE_MAC_SPEED_10M: hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 6); + HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10M); break; case HCLGE_MAC_SPEED_100M: hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 7); + HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100M); break; case HCLGE_MAC_SPEED_1G: hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 0); + HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_1G); break; case HCLGE_MAC_SPEED_10G: hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 1); + HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_10G); break; case HCLGE_MAC_SPEED_25G: hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 2); + HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_25G); break; case HCLGE_MAC_SPEED_40G: hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 3); + HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_40G); break; case HCLGE_MAC_SPEED_50G: hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 4); + HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_50G); break; case HCLGE_MAC_SPEED_100G: hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 5); + HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_100G); break; case HCLGE_MAC_SPEED_200G: hnae3_set_field(req->speed_dup, HCLGE_CFG_SPEED_M, - HCLGE_CFG_SPEED_S, 8); + HCLGE_CFG_SPEED_S, HCLGE_FW_MAC_SPEED_200G); break; default: dev_err(&hdev->pdev->dev, "invalid speed (%d)\n", speed); @@ -2952,12 +2954,12 @@ static void hclge_update_link_status(struct hclge_dev *hdev) } if (state != hdev->hw.mac.link) { + hdev->hw.mac.link = state; client->ops->link_status_change(handle, state); hclge_config_mac_tnl_int(hdev, state); if (rclient && rclient->ops->link_status_change) rclient->ops->link_status_change(rhandle, state); - hdev->hw.mac.link = state; hclge_push_link_status(hdev); } @@ -3419,7 +3421,7 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data) hclge_enable_vector(&hdev->misc_vector, false); event_cause = hclge_check_event_cause(hdev, &clearval); - /* vector 0 interrupt is shared with reset and mailbox source events.*/ + /* vector 0 interrupt is shared with reset and mailbox source events. */ switch (event_cause) { case HCLGE_VECTOR0_EVENT_ERR: hclge_errhand_task_schedule(hdev); @@ -3788,6 +3790,12 @@ static void hclge_do_reset(struct hclge_dev *hdev) } switch (hdev->reset_type) { + case HNAE3_IMP_RESET: + dev_info(&pdev->dev, "IMP reset requested\n"); + val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); + hnae3_set_bit(val, HCLGE_TRIGGER_IMP_RESET_B, 1); + hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, val); + break; case HNAE3_GLOBAL_RESET: dev_info(&pdev->dev, "global reset requested\n"); val = hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG); @@ -5936,7 +5944,7 @@ static int hclge_config_key(struct hclge_dev *hdev, u8 stage, cur_key_x = key_x; cur_key_y = key_y; - for (i = 0 ; i < MAX_TUPLE; i++) { + for (i = 0; i < MAX_TUPLE; i++) { bool tuple_valid; tuple_size = tuple_key_info[i].key_length / 8; @@ -10073,7 +10081,11 @@ static int hclge_init_vlan_config(struct hclge_dev *hdev) static void hclge_add_vport_vlan_table(struct hclge_vport *vport, u16 vlan_id, bool writen_to_tbl) { - struct hclge_vport_vlan_cfg *vlan; + struct hclge_vport_vlan_cfg *vlan, *tmp; + + list_for_each_entry_safe(vlan, tmp, &vport->vlan_list, node) + if (vlan->vlan_id == vlan_id) + return; vlan = kzalloc(sizeof(*vlan), GFP_KERNEL); if (!vlan) @@ -11443,6 +11455,28 @@ static void hclge_clear_resetting_state(struct hclge_dev *hdev) } } +static int hclge_clear_hw_resource(struct hclge_dev *hdev) +{ + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CLEAR_HW_RESOURCE, false); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + /* This new command is only supported by new firmware, it will + * fail with older firmware. Error value -EOPNOSUPP can only be + * returned by older firmware running this command, to keep code + * backward compatible we will override this value and return + * success. + */ + if (ret && ret != -EOPNOTSUPP) { + dev_err(&hdev->pdev->dev, + "failed to clear hw resource, ret = %d\n", ret); + return ret; + } + return 0; +} + static void hclge_init_rxd_adv_layout(struct hclge_dev *hdev) { if (hnae3_ae_dev_rxd_adv_layout_supported(hdev->ae_dev)) @@ -11482,16 +11516,24 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) if (ret) goto out; + ret = hclge_devlink_init(hdev); + if (ret) + goto err_pci_uninit; + /* Firmware command queue initialize */ ret = hclge_cmd_queue_init(hdev); if (ret) - goto err_pci_uninit; + goto err_devlink_uninit; /* Firmware command initialize */ ret = hclge_cmd_init(hdev); if (ret) goto err_cmd_uninit; + ret = hclge_clear_hw_resource(hdev); + if (ret) + goto err_cmd_uninit; + ret = hclge_get_cap(hdev); if (ret) goto err_cmd_uninit; @@ -11556,7 +11598,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) goto err_mdiobus_unreg; } - ret = hclge_config_gro(hdev, true); + ret = hclge_config_gro(hdev); if (ret) goto err_mdiobus_unreg; @@ -11658,6 +11700,8 @@ err_msi_uninit: pci_free_irq_vectors(pdev); err_cmd_uninit: hclge_cmd_uninit(hdev); +err_devlink_uninit: + hclge_devlink_uninit(hdev); err_pci_uninit: pcim_iounmap(pdev, hdev->hw.io_base); pci_clear_master(pdev); @@ -11937,7 +11981,7 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } - ret = hclge_config_gro(hdev, true); + ret = hclge_config_gro(hdev); if (ret) return ret; @@ -12048,6 +12092,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_cmd_uninit(hdev); hclge_misc_irq_uninit(hdev); + hclge_devlink_uninit(hdev); hclge_pci_uninit(hdev); mutex_destroy(&hdev->vport_lock); hclge_uninit_vport_vlan_table(hdev); @@ -12671,8 +12716,15 @@ static int hclge_gro_en(struct hnae3_handle *handle, bool enable) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; + bool gro_en_old = hdev->gro_en; + int ret; + + hdev->gro_en = enable; + ret = hclge_config_gro(hdev); + if (ret) + hdev->gro_en = gro_en_old; - return hclge_config_gro(hdev, enable); + return ret; } static void hclge_sync_promisc_mode(struct hclge_dev *hdev) @@ -12829,6 +12881,29 @@ static int hclge_get_module_eeprom(struct hnae3_handle *handle, u32 offset, return 0; } +static int hclge_get_link_diagnosis_info(struct hnae3_handle *handle, + u32 *status_code) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + struct hclge_desc desc; + int ret; + + if (hdev->ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2) + return -EOPNOTSUPP; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_LINK_DIAGNOSIS, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to query link diagnosis info, ret = %d\n", ret); + return ret; + } + + *status_code = le32_to_cpu(desc.data[0]); + return 0; +} + static const struct hnae3_ae_ops hclge_ops = { .init_ae_dev = hclge_init_ae_dev, .uninit_ae_dev = hclge_uninit_ae_dev, @@ -12929,6 +13004,7 @@ static const struct hnae3_ae_ops hclge_ops = { .set_tx_hwts_info = hclge_ptp_set_tx_info, .get_rx_hwts = hclge_ptp_get_rx_hwts, .get_ts_info = hclge_ptp_get_ts_info, + .get_link_diagnosis_info = hclge_get_link_diagnosis_info, }; static struct hnae3_ae_algo ae_algo = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 3d3352491dba..de6afbcbfbac 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -8,6 +8,7 @@ #include <linux/phy.h> #include <linux/if_vlan.h> #include <linux/kfifo.h> +#include <net/devlink.h> #include "hclge_cmd.h" #include "hclge_ptp.h" @@ -37,22 +38,22 @@ #define HCLGE_VECTOR_REG_OFFSET_H 0x1000 #define HCLGE_VECTOR_VF_OFFSET 0x100000 -#define HCLGE_CMDQ_TX_ADDR_L_REG 0x27000 -#define HCLGE_CMDQ_TX_ADDR_H_REG 0x27004 -#define HCLGE_CMDQ_TX_DEPTH_REG 0x27008 -#define HCLGE_CMDQ_TX_TAIL_REG 0x27010 -#define HCLGE_CMDQ_TX_HEAD_REG 0x27014 -#define HCLGE_CMDQ_RX_ADDR_L_REG 0x27018 -#define HCLGE_CMDQ_RX_ADDR_H_REG 0x2701C -#define HCLGE_CMDQ_RX_DEPTH_REG 0x27020 -#define HCLGE_CMDQ_RX_TAIL_REG 0x27024 -#define HCLGE_CMDQ_RX_HEAD_REG 0x27028 +#define HCLGE_NIC_CSQ_BASEADDR_L_REG 0x27000 +#define HCLGE_NIC_CSQ_BASEADDR_H_REG 0x27004 +#define HCLGE_NIC_CSQ_DEPTH_REG 0x27008 +#define HCLGE_NIC_CSQ_TAIL_REG 0x27010 +#define HCLGE_NIC_CSQ_HEAD_REG 0x27014 +#define HCLGE_NIC_CRQ_BASEADDR_L_REG 0x27018 +#define HCLGE_NIC_CRQ_BASEADDR_H_REG 0x2701C +#define HCLGE_NIC_CRQ_DEPTH_REG 0x27020 +#define HCLGE_NIC_CRQ_TAIL_REG 0x27024 +#define HCLGE_NIC_CRQ_HEAD_REG 0x27028 + #define HCLGE_CMDQ_INTR_STS_REG 0x27104 #define HCLGE_CMDQ_INTR_EN_REG 0x27108 #define HCLGE_CMDQ_INTR_GEN_REG 0x2710C /* bar registers for common func */ -#define HCLGE_VECTOR0_OTER_EN_REG 0x20600 #define HCLGE_GRO_EN_REG 0x28000 #define HCLGE_RXD_ADV_LAYOUT_EN_REG 0x28008 @@ -193,6 +194,7 @@ enum HLCGE_PORT_TYPE { #define HCLGE_VECTOR0_IMP_CMDQ_ERR_B 4U #define HCLGE_VECTOR0_IMP_RD_POISON_B 5U #define HCLGE_VECTOR0_ALL_MSIX_ERR_B 6U +#define HCLGE_TRIGGER_IMP_RESET_B 7U #define HCLGE_MAC_DEFAULT_FRAME \ (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN) @@ -927,6 +929,7 @@ struct hclge_dev { unsigned long fd_bmap[BITS_TO_LONGS(MAX_FD_FILTER_NUM)]; enum HCLGE_FD_ACTIVE_RULE_TYPE fd_active_type; u8 fd_en; + bool gro_en; u16 wanted_umv_size; /* max available unicast mac vlan space */ @@ -943,6 +946,7 @@ struct hclge_dev { cpumask_t affinity_mask; struct irq_affinity_notify affinity_notify; struct hclge_ptp *ptp; + struct devlink *devlink; }; /* VPort level vlan tag configuration for TX direction */ @@ -1054,6 +1058,11 @@ struct hclge_vport { struct list_head vlan_list; /* Store VF vlan table */ }; +struct hclge_speed_bit_map { + u32 speed; + u32 speed_bit; +}; + int hclge_set_vport_promisc_mode(struct hclge_vport *vport, bool en_uc_pmc, bool en_mc_pmc, bool en_bc_pmc); int hclge_add_uc_addr_common(struct hclge_vport *vport, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index c0a478ae9583..2ce5302c5956 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -10,7 +10,14 @@ static u16 hclge_errno_to_resp(int errno) { - return abs(errno); + int resp = abs(errno); + + /* The status for pf to vf msg cmd is u16, constrainted by HW. + * We need to keep the same type with it. + * The intput errno is the stander error code, it's safely to + * use a u16 to store the abs(errno). + */ + return (u16)resp; } /* hclge_gen_resp_to_vf: used to generate a synchronous response to VF when PF @@ -66,6 +73,8 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport, memcpy(resp_pf_to_vf->msg.resp_data, resp_msg->data, resp_msg->len); + trace_hclge_pf_mbx_send(hdev, resp_pf_to_vf); + status = hclge_cmd_send(&hdev->hw, &desc, 1); if (status) dev_err(&hdev->pdev->dev, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h index dbf5f4c08019..7a9b77de632a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_ptp.h @@ -127,7 +127,7 @@ static inline struct hclge_dev *hclge_ptp_get_hdev(struct ptp_clock_info *info) } bool hclge_ptp_set_tx_info(struct hnae3_handle *handle, struct sk_buff *skb); -void hclge_ptp_clean_tx_hwts(struct hclge_dev *dev); +void hclge_ptp_clean_tx_hwts(struct hclge_dev *hdev); void hclge_ptp_get_rx_hwts(struct hnae3_handle *handle, struct sk_buff *skb, u32 nsec, u32 sec); int hclge_ptp_get_cfg(struct hclge_dev *hdev, struct ifreq *ifr); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile index 2c26ea607a53..51ff7d86ee90 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/Makefile @@ -7,4 +7,4 @@ ccflags-y := -I $(srctree)/drivers/net/ethernet/hisilicon/hns3 ccflags-y += -I $(srctree)/$(src) obj-$(CONFIG_HNS3_HCLGEVF) += hclgevf.o -hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o +hclgevf-objs = hclgevf_main.o hclgevf_cmd.o hclgevf_mbx.o hclgevf_devlink.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c index bd19a2d89f6c..59772b0e9531 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c @@ -71,7 +71,7 @@ static bool hclgevf_cmd_csq_done(struct hclgevf_hw *hw) static bool hclgevf_is_special_opcode(u16 opcode) { - static const u16 spec_opcode[] = {0x30, 0x31, 0x32}; + const u16 spec_opcode[] = {0x30, 0x31, 0x32}; int i; for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) { @@ -342,25 +342,26 @@ static void hclgevf_set_default_capability(struct hclgevf_dev *hdev) set_bit(HNAE3_DEV_SUPPORT_FEC_B, ae_dev->caps); } +const struct hclgevf_caps_bit_map hclgevf_cmd_caps_bit_map0[] = { + {HCLGEVF_CAP_UDP_GSO_B, HNAE3_DEV_SUPPORT_UDP_GSO_B}, + {HCLGEVF_CAP_INT_QL_B, HNAE3_DEV_SUPPORT_INT_QL_B}, + {HCLGEVF_CAP_TQP_TXRX_INDEP_B, HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B}, + {HCLGEVF_CAP_HW_TX_CSUM_B, HNAE3_DEV_SUPPORT_HW_TX_CSUM_B}, + {HCLGEVF_CAP_UDP_TUNNEL_CSUM_B, HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B}, + {HCLGEVF_CAP_RXD_ADV_LAYOUT_B, HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B}, +}; + static void hclgevf_parse_capability(struct hclgevf_dev *hdev, struct hclgevf_query_version_cmd *cmd) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); - u32 caps; + u32 caps, i; caps = __le32_to_cpu(cmd->caps[0]); - if (hnae3_get_bit(caps, HCLGEVF_CAP_UDP_GSO_B)) - set_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGEVF_CAP_INT_QL_B)) - set_bit(HNAE3_DEV_SUPPORT_INT_QL_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGEVF_CAP_TQP_TXRX_INDEP_B)) - set_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGEVF_CAP_HW_TX_CSUM_B)) - set_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGEVF_CAP_UDP_TUNNEL_CSUM_B)) - set_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps); - if (hnae3_get_bit(caps, HCLGEVF_CAP_RXD_ADV_LAYOUT_B)) - set_bit(HNAE3_DEV_SUPPORT_RXD_ADV_LAYOUT_B, ae_dev->caps); + for (i = 0; i < ARRAY_SIZE(hclgevf_cmd_caps_bit_map0); i++) + if (hnae3_get_bit(caps, hclgevf_cmd_caps_bit_map0[i].imp_bit)) + set_bit(hclgevf_cmd_caps_bit_map0[i].local_bit, + ae_dev->caps); } static __le32 hclgevf_build_api_caps(void) @@ -507,12 +508,17 @@ static void hclgevf_cmd_uninit_regs(struct hclgevf_hw *hw) void hclgevf_cmd_uninit(struct hclgevf_dev *hdev) { + set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); + /* wait to ensure that the firmware completes the possible left + * over commands. + */ + msleep(HCLGEVF_CMDQ_CLEAR_WAIT_TIME); spin_lock_bh(&hdev->hw.cmq.csq.lock); spin_lock(&hdev->hw.cmq.crq.lock); - set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); hclgevf_cmd_uninit_regs(&hdev->hw); spin_unlock(&hdev->hw.cmq.crq.lock); spin_unlock_bh(&hdev->hw.cmq.csq.lock); + hclgevf_free_cmd_desc(&hdev->hw.cmq.csq); hclgevf_free_cmd_desc(&hdev->hw.cmq.crq); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h index 202feb70dba5..39d0b589c720 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h @@ -8,6 +8,7 @@ #include "hnae3.h" #define HCLGEVF_CMDQ_TX_TIMEOUT 30000 +#define HCLGEVF_CMDQ_CLEAR_WAIT_TIME 200 #define HCLGEVF_CMDQ_RX_INVLD_B 0 #define HCLGEVF_CMDQ_RX_OUTVLD_B 1 @@ -265,16 +266,6 @@ struct hclgevf_cfg_tx_queue_pointer_cmd { #define HCLGEVF_TYPE_CRQ 0 #define HCLGEVF_TYPE_CSQ 1 -#define HCLGEVF_NIC_CSQ_BASEADDR_L_REG 0x27000 -#define HCLGEVF_NIC_CSQ_BASEADDR_H_REG 0x27004 -#define HCLGEVF_NIC_CSQ_DEPTH_REG 0x27008 -#define HCLGEVF_NIC_CSQ_TAIL_REG 0x27010 -#define HCLGEVF_NIC_CSQ_HEAD_REG 0x27014 -#define HCLGEVF_NIC_CRQ_BASEADDR_L_REG 0x27018 -#define HCLGEVF_NIC_CRQ_BASEADDR_H_REG 0x2701c -#define HCLGEVF_NIC_CRQ_DEPTH_REG 0x27020 -#define HCLGEVF_NIC_CRQ_TAIL_REG 0x27024 -#define HCLGEVF_NIC_CRQ_HEAD_REG 0x27028 /* this bit indicates that the driver is ready for hardware reset */ #define HCLGEVF_NIC_SW_RST_RDY_B 16 @@ -305,6 +296,12 @@ struct hclgevf_dev_specs_1_cmd { u8 rsv1[18]; }; +/* capabilities bits map between imp firmware and local driver */ +struct hclgevf_caps_bit_map { + u16 imp_bit; + u16 local_bit; +}; + static inline void hclgevf_write_reg(void __iomem *base, u32 reg, u32 value) { writel(value, base + reg); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c new file mode 100644 index 000000000000..f478770299c6 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.c @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2021 Hisilicon Limited. */ + +#include <net/devlink.h> + +#include "hclgevf_devlink.h" + +static int hclgevf_devlink_info_get(struct devlink *devlink, + struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ +#define HCLGEVF_DEVLINK_FW_STRING_LEN 32 + struct hclgevf_devlink_priv *priv = devlink_priv(devlink); + char version_str[HCLGEVF_DEVLINK_FW_STRING_LEN]; + struct hclgevf_dev *hdev = priv->hdev; + int ret; + + ret = devlink_info_driver_name_put(req, KBUILD_MODNAME); + if (ret) + return ret; + + snprintf(version_str, sizeof(version_str), "%lu.%lu.%lu.%lu", + hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE3_MASK, + HNAE3_FW_VERSION_BYTE3_SHIFT), + hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE2_MASK, + HNAE3_FW_VERSION_BYTE2_SHIFT), + hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE1_MASK, + HNAE3_FW_VERSION_BYTE1_SHIFT), + hnae3_get_field(hdev->fw_version, HNAE3_FW_VERSION_BYTE0_MASK, + HNAE3_FW_VERSION_BYTE0_SHIFT)); + + return devlink_info_version_running_put(req, + DEVLINK_INFO_VERSION_GENERIC_FW, + version_str); +} + +static int hclgevf_devlink_reload_down(struct devlink *devlink, + bool netns_change, + enum devlink_reload_action action, + enum devlink_reload_limit limit, + struct netlink_ext_ack *extack) +{ + struct hclgevf_devlink_priv *priv = devlink_priv(devlink); + struct hclgevf_dev *hdev = priv->hdev; + struct hnae3_handle *h = &hdev->nic; + struct pci_dev *pdev = hdev->pdev; + int ret; + + if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) { + dev_err(&pdev->dev, "reset is handling\n"); + return -EBUSY; + } + + switch (action) { + case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: + rtnl_lock(); + ret = hdev->nic_client->ops->reset_notify(h, HNAE3_DOWN_CLIENT); + if (ret) { + rtnl_unlock(); + return ret; + } + + ret = hdev->nic_client->ops->reset_notify(h, + HNAE3_UNINIT_CLIENT); + rtnl_unlock(); + return ret; + default: + return -EOPNOTSUPP; + } +} + +static int hclgevf_devlink_reload_up(struct devlink *devlink, + enum devlink_reload_action action, + enum devlink_reload_limit limit, + u32 *actions_performed, + struct netlink_ext_ack *extack) +{ + struct hclgevf_devlink_priv *priv = devlink_priv(devlink); + struct hclgevf_dev *hdev = priv->hdev; + struct hnae3_handle *h = &hdev->nic; + int ret; + + *actions_performed = BIT(action); + switch (action) { + case DEVLINK_RELOAD_ACTION_DRIVER_REINIT: + rtnl_lock(); + ret = hdev->nic_client->ops->reset_notify(h, HNAE3_INIT_CLIENT); + if (ret) { + rtnl_unlock(); + return ret; + } + + ret = hdev->nic_client->ops->reset_notify(h, HNAE3_UP_CLIENT); + rtnl_unlock(); + return ret; + default: + return -EOPNOTSUPP; + } +} + +static const struct devlink_ops hclgevf_devlink_ops = { + .info_get = hclgevf_devlink_info_get, + .reload_actions = BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT), + .reload_down = hclgevf_devlink_reload_down, + .reload_up = hclgevf_devlink_reload_up, +}; + +int hclgevf_devlink_init(struct hclgevf_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + struct hclgevf_devlink_priv *priv; + struct devlink *devlink; + int ret; + + devlink = + devlink_alloc(&hclgevf_devlink_ops, + sizeof(struct hclgevf_devlink_priv), &pdev->dev); + if (!devlink) + return -ENOMEM; + + priv = devlink_priv(devlink); + priv->hdev = hdev; + hdev->devlink = devlink; + + ret = devlink_register(devlink); + if (ret) { + dev_err(&pdev->dev, "failed to register devlink, ret = %d\n", + ret); + goto out_reg_fail; + } + + devlink_reload_enable(devlink); + + return 0; + +out_reg_fail: + devlink_free(devlink); + return ret; +} + +void hclgevf_devlink_uninit(struct hclgevf_dev *hdev) +{ + struct devlink *devlink = hdev->devlink; + + devlink_reload_disable(devlink); + + devlink_unregister(devlink); + + devlink_free(devlink); +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.h new file mode 100644 index 000000000000..e09ea3d8a963 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_devlink.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2021 Hisilicon Limited. */ + +#ifndef __HCLGEVF_DEVLINK_H +#define __HCLGEVF_DEVLINK_H + +#include "hclgevf_main.h" + +struct hclgevf_devlink_priv { + struct hclgevf_dev *hdev; +}; + +int hclgevf_devlink_init(struct hclgevf_dev *hdev); +void hclgevf_devlink_uninit(struct hclgevf_dev *hdev); +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 8784d61e833f..82e727020120 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -8,6 +8,7 @@ #include "hclgevf_main.h" #include "hclge_mbx.h" #include "hnae3.h" +#include "hclgevf_devlink.h" #define HCLGEVF_NAME "hclgevf" @@ -39,16 +40,16 @@ static const u8 hclgevf_hash_key[] = { MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); -static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, - HCLGEVF_CMDQ_TX_ADDR_H_REG, - HCLGEVF_CMDQ_TX_DEPTH_REG, - HCLGEVF_CMDQ_TX_TAIL_REG, - HCLGEVF_CMDQ_TX_HEAD_REG, - HCLGEVF_CMDQ_RX_ADDR_L_REG, - HCLGEVF_CMDQ_RX_ADDR_H_REG, - HCLGEVF_CMDQ_RX_DEPTH_REG, - HCLGEVF_CMDQ_RX_TAIL_REG, - HCLGEVF_CMDQ_RX_HEAD_REG, +static const u32 cmdq_reg_addr_list[] = {HCLGEVF_NIC_CSQ_BASEADDR_L_REG, + HCLGEVF_NIC_CSQ_BASEADDR_H_REG, + HCLGEVF_NIC_CSQ_DEPTH_REG, + HCLGEVF_NIC_CSQ_TAIL_REG, + HCLGEVF_NIC_CSQ_HEAD_REG, + HCLGEVF_NIC_CRQ_BASEADDR_L_REG, + HCLGEVF_NIC_CRQ_BASEADDR_H_REG, + HCLGEVF_NIC_CRQ_DEPTH_REG, + HCLGEVF_NIC_CRQ_TAIL_REG, + HCLGEVF_NIC_CRQ_HEAD_REG, HCLGEVF_VECTOR0_CMDQ_SRC_REG, HCLGEVF_VECTOR0_CMDQ_STATE_REG, HCLGEVF_CMDQ_INTR_EN_REG, @@ -506,10 +507,10 @@ void hclgevf_update_link_status(struct hclgevf_dev *hdev, int link_state) link_state = test_bit(HCLGEVF_STATE_DOWN, &hdev->state) ? 0 : link_state; if (link_state != hdev->hw.mac.link) { + hdev->hw.mac.link = link_state; client->ops->link_status_change(handle, !!link_state); if (rclient && rclient->ops->link_status_change) rclient->ops->link_status_change(rhandle, !!link_state); - hdev->hw.mac.link = link_state; } clear_bit(HCLGEVF_STATE_LINK_UPDATING, &hdev->state); @@ -538,6 +539,7 @@ static int hclgevf_set_handle_info(struct hclgevf_dev *hdev) nic->pdev = hdev->pdev; nic->numa_node_mask = hdev->numa_node_mask; nic->flags |= HNAE3_SUPPORT_VF; + nic->kinfo.io_base = hdev->hw.io_base; ret = hclgevf_knic_setup(hdev); if (ret) @@ -1961,7 +1963,7 @@ static void hclgevf_dump_rst_info(struct hclgevf_dev *hdev) dev_info(&hdev->pdev->dev, "vector0 interrupt status: 0x%x\n", hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_STATE_REG)); dev_info(&hdev->pdev->dev, "handshake status: 0x%x\n", - hclgevf_read_dev(&hdev->hw, HCLGEVF_CMDQ_TX_DEPTH_REG)); + hclgevf_read_dev(&hdev->hw, HCLGEVF_NIC_CSQ_DEPTH_REG)); dev_info(&hdev->pdev->dev, "function reset status: 0x%x\n", hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING)); dev_info(&hdev->pdev->dev, "hdev state: 0x%lx\n", hdev->state); @@ -2487,6 +2489,8 @@ static int hclgevf_configure(struct hclgevf_dev *hdev) { int ret; + hdev->gro_en = true; + ret = hclgevf_get_basic_info(hdev); if (ret) return ret; @@ -2549,7 +2553,7 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) return 0; } -static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) +static int hclgevf_config_gro(struct hclgevf_dev *hdev) { struct hclgevf_cfg_gro_status_cmd *req; struct hclgevf_desc desc; @@ -2562,7 +2566,7 @@ static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) false); req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; - req->gro_en = en ? 1 : 0; + req->gro_en = hdev->gro_en ? 1 : 0; ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); if (ret) @@ -3308,7 +3312,7 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) return ret; } - ret = hclgevf_config_gro(hdev, true); + ret = hclgevf_config_gro(hdev); if (ret) return ret; @@ -3337,6 +3341,10 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) if (ret) return ret; + ret = hclgevf_devlink_init(hdev); + if (ret) + goto err_devlink_init; + ret = hclgevf_cmd_queue_init(hdev); if (ret) goto err_cmd_queue_init; @@ -3389,7 +3397,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) if (ret) goto err_config; - ret = hclgevf_config_gro(hdev, true); + ret = hclgevf_config_gro(hdev); if (ret) goto err_config; @@ -3441,6 +3449,8 @@ err_misc_irq_init: err_cmd_init: hclgevf_cmd_uninit(hdev); err_cmd_queue_init: + hclgevf_devlink_uninit(hdev); +err_devlink_init: hclgevf_pci_uninit(hdev); clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); return ret; @@ -3462,6 +3472,7 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) } hclgevf_cmd_uninit(hdev); + hclgevf_devlink_uninit(hdev); hclgevf_pci_uninit(hdev); hclgevf_uninit_mac_list(hdev); } @@ -3638,8 +3649,15 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, static int hclgevf_gro_en(struct hnae3_handle *handle, bool enable) { struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + bool gro_en_old = hdev->gro_en; + int ret; + + hdev->gro_en = enable; + ret = hclgevf_config_gro(hdev); + if (ret) + hdev->gro_en = gro_en_old; - return hclgevf_config_gro(hdev, enable); + return ret; } static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index d7d02848d674..883130a9b48f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -6,6 +6,7 @@ #include <linux/fs.h> #include <linux/if_vlan.h> #include <linux/types.h> +#include <net/devlink.h> #include "hclge_mbx.h" #include "hclgevf_cmd.h" #include "hnae3.h" @@ -32,16 +33,17 @@ #define HCLGEVF_VECTOR_VF_OFFSET 0x100000 /* bar registers for cmdq */ -#define HCLGEVF_CMDQ_TX_ADDR_L_REG 0x27000 -#define HCLGEVF_CMDQ_TX_ADDR_H_REG 0x27004 -#define HCLGEVF_CMDQ_TX_DEPTH_REG 0x27008 -#define HCLGEVF_CMDQ_TX_TAIL_REG 0x27010 -#define HCLGEVF_CMDQ_TX_HEAD_REG 0x27014 -#define HCLGEVF_CMDQ_RX_ADDR_L_REG 0x27018 -#define HCLGEVF_CMDQ_RX_ADDR_H_REG 0x2701C -#define HCLGEVF_CMDQ_RX_DEPTH_REG 0x27020 -#define HCLGEVF_CMDQ_RX_TAIL_REG 0x27024 -#define HCLGEVF_CMDQ_RX_HEAD_REG 0x27028 +#define HCLGEVF_NIC_CSQ_BASEADDR_L_REG 0x27000 +#define HCLGEVF_NIC_CSQ_BASEADDR_H_REG 0x27004 +#define HCLGEVF_NIC_CSQ_DEPTH_REG 0x27008 +#define HCLGEVF_NIC_CSQ_TAIL_REG 0x27010 +#define HCLGEVF_NIC_CSQ_HEAD_REG 0x27014 +#define HCLGEVF_NIC_CRQ_BASEADDR_L_REG 0x27018 +#define HCLGEVF_NIC_CRQ_BASEADDR_H_REG 0x2701C +#define HCLGEVF_NIC_CRQ_DEPTH_REG 0x27020 +#define HCLGEVF_NIC_CRQ_TAIL_REG 0x27024 +#define HCLGEVF_NIC_CRQ_HEAD_REG 0x27028 + #define HCLGEVF_CMDQ_INTR_EN_REG 0x27108 #define HCLGEVF_CMDQ_INTR_GEN_REG 0x2710C @@ -310,11 +312,12 @@ struct hclgevf_dev { u16 *vector_status; int *vector_irq; + bool gro_en; + unsigned long vlan_del_fail_bmap[BITS_TO_LONGS(VLAN_N_VID)]; struct hclgevf_mac_table_cfg mac_table; - bool mbx_event_pending; struct hclgevf_mbx_resp_status mbx_resp; /* mailbox response */ struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */ @@ -330,6 +333,8 @@ struct hclgevf_dev { u32 flag; unsigned long serv_processed_cnt; unsigned long last_serv_processed; + + struct devlink *devlink; }; static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index 772b2f8acd2e..fdc66fae0960 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -155,18 +155,66 @@ static bool hclgevf_cmd_crq_empty(struct hclgevf_hw *hw) return tail == hw->cmq.crq.next_to_use; } +static void hclgevf_handle_mbx_response(struct hclgevf_dev *hdev, + struct hclge_mbx_pf_to_vf_cmd *req) +{ + struct hclgevf_mbx_resp_status *resp = &hdev->mbx_resp; + + if (resp->received_resp) + dev_warn(&hdev->pdev->dev, + "VF mbx resp flag not clear(%u)\n", + req->msg.vf_mbx_msg_code); + + resp->origin_mbx_msg = + (req->msg.vf_mbx_msg_code << 16); + resp->origin_mbx_msg |= req->msg.vf_mbx_msg_subcode; + resp->resp_status = + hclgevf_resp_to_errno(req->msg.resp_status); + memcpy(resp->additional_info, req->msg.resp_data, + HCLGE_MBX_MAX_RESP_DATA_SIZE * sizeof(u8)); + if (req->match_id) { + /* If match_id is not zero, it means PF support match_id. + * if the match_id is right, VF get the right response, or + * ignore the response. and driver will clear hdev->mbx_resp + * when send next message which need response. + */ + if (req->match_id == resp->match_id) + resp->received_resp = true; + } else { + resp->received_resp = true; + } +} + +static void hclgevf_handle_mbx_msg(struct hclgevf_dev *hdev, + struct hclge_mbx_pf_to_vf_cmd *req) +{ + /* we will drop the async msg if we find ARQ as full + * and continue with next message + */ + if (atomic_read(&hdev->arq.count) >= + HCLGE_MBX_MAX_ARQ_MSG_NUM) { + dev_warn(&hdev->pdev->dev, + "Async Q full, dropping msg(%u)\n", + req->msg.code); + return; + } + + /* tail the async message in arq */ + memcpy(hdev->arq.msg_q[hdev->arq.tail], &req->msg, + HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16)); + hclge_mbx_tail_ptr_move_arq(hdev->arq); + atomic_inc(&hdev->arq.count); + + hclgevf_mbx_task_schedule(hdev); +} + void hclgevf_mbx_handler(struct hclgevf_dev *hdev) { - struct hclgevf_mbx_resp_status *resp; struct hclge_mbx_pf_to_vf_cmd *req; struct hclgevf_cmq_ring *crq; struct hclgevf_desc *desc; - u16 *msg_q; u16 flag; - u8 *temp; - int i; - resp = &hdev->mbx_resp; crq = &hdev->hw.cmq.crq; while (!hclgevf_cmd_crq_empty(&hdev->hw)) { @@ -200,69 +248,14 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) */ switch (req->msg.code) { case HCLGE_MBX_PF_VF_RESP: - if (resp->received_resp) - dev_warn(&hdev->pdev->dev, - "VF mbx resp flag not clear(%u)\n", - req->msg.vf_mbx_msg_code); - resp->received_resp = true; - - resp->origin_mbx_msg = - (req->msg.vf_mbx_msg_code << 16); - resp->origin_mbx_msg |= req->msg.vf_mbx_msg_subcode; - resp->resp_status = - hclgevf_resp_to_errno(req->msg.resp_status); - - temp = (u8 *)req->msg.resp_data; - for (i = 0; i < HCLGE_MBX_MAX_RESP_DATA_SIZE; i++) { - resp->additional_info[i] = *temp; - temp++; - } - - /* If match_id is not zero, it means PF support - * match_id. If the match_id is right, VF get the - * right response, otherwise ignore the response. - * Driver will clear hdev->mbx_resp when send - * next message which need response. - */ - if (req->match_id) { - if (req->match_id == resp->match_id) - resp->received_resp = true; - } else { - resp->received_resp = true; - } + hclgevf_handle_mbx_response(hdev, req); break; case HCLGE_MBX_LINK_STAT_CHANGE: case HCLGE_MBX_ASSERTING_RESET: case HCLGE_MBX_LINK_STAT_MODE: case HCLGE_MBX_PUSH_VLAN_INFO: case HCLGE_MBX_PUSH_PROMISC_INFO: - /* set this mbx event as pending. This is required as we - * might loose interrupt event when mbx task is busy - * handling. This shall be cleared when mbx task just - * enters handling state. - */ - hdev->mbx_event_pending = true; - - /* we will drop the async msg if we find ARQ as full - * and continue with next message - */ - if (atomic_read(&hdev->arq.count) >= - HCLGE_MBX_MAX_ARQ_MSG_NUM) { - dev_warn(&hdev->pdev->dev, - "Async Q full, dropping msg(%u)\n", - req->msg.code); - break; - } - - /* tail the async message in arq */ - msg_q = hdev->arq.msg_q[hdev->arq.tail]; - memcpy(&msg_q[0], &req->msg, - HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16)); - hclge_mbx_tail_ptr_move_arq(hdev->arq); - atomic_inc(&hdev->arq.count); - - hclgevf_mbx_task_schedule(hdev); - + hclgevf_handle_mbx_msg(hdev, req); break; default: dev_err(&hdev->pdev->dev, @@ -298,11 +291,6 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) u8 flag; u8 idx; - /* we can safely clear it now as we are at start of the async message - * processing - */ - hdev->mbx_event_pending = false; - tail = hdev->arq.tail; /* process all the async queue messages */ @@ -323,8 +311,8 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) flag = (u8)msg_q[5]; /* update upper layer with new link link status */ - hclgevf_update_link_status(hdev, link_status); hclgevf_update_speed_duplex(hdev, speed, duplex); + hclgevf_update_link_status(hdev, link_status); if (flag & HCLGE_MBX_PUSH_LINK_STATUS_EN) set_bit(HCLGEVF_STATE_PF_PUSH_LINK_STATUS, diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c index 58d5646444b0..6e11ee339f12 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.c @@ -293,9 +293,9 @@ static const struct devlink_ops hinic_devlink_ops = { .flash_update = hinic_devlink_flash_update, }; -struct devlink *hinic_devlink_alloc(void) +struct devlink *hinic_devlink_alloc(struct device *dev) { - return devlink_alloc(&hinic_devlink_ops, sizeof(struct hinic_dev)); + return devlink_alloc(&hinic_devlink_ops, sizeof(struct hinic_dev), dev); } void hinic_devlink_free(struct devlink *devlink) @@ -303,11 +303,11 @@ void hinic_devlink_free(struct devlink *devlink) devlink_free(devlink); } -int hinic_devlink_register(struct hinic_devlink_priv *priv, struct device *dev) +int hinic_devlink_register(struct hinic_devlink_priv *priv) { struct devlink *devlink = priv_to_devlink(priv); - return devlink_register(devlink, dev); + return devlink_register(devlink); } void hinic_devlink_unregister(struct hinic_devlink_priv *priv) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_devlink.h b/drivers/net/ethernet/huawei/hinic/hinic_devlink.h index a090ebcfaabb..9e315011015c 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_devlink.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_devlink.h @@ -108,9 +108,9 @@ struct host_image_st { u32 device_id; }; -struct devlink *hinic_devlink_alloc(void); +struct devlink *hinic_devlink_alloc(struct device *dev); void hinic_devlink_free(struct devlink *devlink); -int hinic_devlink_register(struct hinic_devlink_priv *priv, struct device *dev); +int hinic_devlink_register(struct hinic_devlink_priv *priv); void hinic_devlink_unregister(struct hinic_devlink_priv *priv); int hinic_health_reporters_create(struct hinic_devlink_priv *priv); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c index 162d3c330dec..b431c300ef1b 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c @@ -795,13 +795,17 @@ static int __hinic_set_coalesce(struct net_device *netdev, } static int hinic_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { return __hinic_get_coalesce(netdev, coal, COALESCE_ALL_QUEUE); } static int hinic_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { return __hinic_set_coalesce(netdev, coal, COALESCE_ALL_QUEUE); } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c index 428108eb10d2..56b6b04e209b 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c @@ -754,7 +754,7 @@ static int init_pfhwdev(struct hinic_pfhwdev *pfhwdev) return err; } - err = hinic_devlink_register(hwdev->devlink_dev, &pdev->dev); + err = hinic_devlink_register(hwdev->devlink_dev); if (err) { dev_err(&hwif->pdev->dev, "Failed to register devlink\n"); hinic_pf_to_mgmt_free(&pfhwdev->pf_to_mgmt); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index 405ee4d2d2b1..ae707e305684 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -1183,7 +1183,7 @@ static int nic_dev_init(struct pci_dev *pdev) struct devlink *devlink; int err, num_qps; - devlink = hinic_devlink_alloc(); + devlink = hinic_devlink_alloc(&pdev->dev); if (!devlink) { dev_err(&pdev->dev, "Hinic devlink alloc failed\n"); return -ENOMEM; @@ -1392,28 +1392,16 @@ static int hinic_probe(struct pci_dev *pdev, pci_set_master(pdev); - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) { dev_warn(&pdev->dev, "Couldn't set 64-bit DMA mask\n"); - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "Failed to set DMA mask\n"); goto err_dma_mask; } } - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - dev_warn(&pdev->dev, - "Couldn't set 64-bit consistent DMA mask\n"); - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, - "Failed to set consistent DMA mask\n"); - goto err_dma_consistent_mask; - } - } - err = nic_dev_init(pdev); if (err) { dev_err(&pdev->dev, "Failed to initialize NIC device\n"); @@ -1424,7 +1412,6 @@ static int hinic_probe(struct pci_dev *pdev, return 0; err_nic_dev_init: -err_dma_consistent_mask: err_dma_mask: pci_release_regions(pdev); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c index f8a26459ff65..a78c398bf5b2 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c @@ -836,8 +836,10 @@ int hinic_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) int hinic_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, int max_tx_rate) { - u32 speeds[] = {SPEED_10, SPEED_100, SPEED_1000, SPEED_10000, - SPEED_25000, SPEED_40000, SPEED_100000}; + static const u32 speeds[] = { + SPEED_10, SPEED_100, SPEED_1000, SPEED_10000, + SPEED_25000, SPEED_40000, SPEED_100000 + }; struct hinic_dev *nic_dev = netdev_priv(netdev); struct hinic_port_cap port_cap = { 0 }; enum hinic_port_link_state link_state; diff --git a/drivers/net/ethernet/i825xx/82596.c b/drivers/net/ethernet/i825xx/82596.c index fc8c7cd67471..b8a40146b895 100644 --- a/drivers/net/ethernet/i825xx/82596.c +++ b/drivers/net/ethernet/i825xx/82596.c @@ -1110,9 +1110,6 @@ static void print_eth(unsigned char *add, char *str) add, add + 6, add, add[12], add[13], str); } -static int io = 0x300; -static int irq = 10; - static const struct net_device_ops i596_netdev_ops = { .ndo_open = i596_open, .ndo_stop = i596_close, @@ -1123,7 +1120,7 @@ static const struct net_device_ops i596_netdev_ops = { .ndo_validate_addr = eth_validate_addr, }; -struct net_device * __init i82596_probe(int unit) +static struct net_device * __init i82596_probe(void) { struct net_device *dev; int i; @@ -1140,14 +1137,6 @@ struct net_device * __init i82596_probe(int unit) if (!dev) return ERR_PTR(-ENOMEM); - if (unit >= 0) { - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - } else { - dev->base_addr = io; - dev->irq = irq; - } - #ifdef ENABLE_MVME16x_NET if (MACH_IS_MVME16x) { if (mvme16x_config & MVME16x_CONFIG_NO_ETHERNET) { @@ -1515,22 +1504,22 @@ static void set_multicast_list(struct net_device *dev) } } -#ifdef MODULE static struct net_device *dev_82596; static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "i82596 debug mask"); -int __init init_module(void) +static int __init i82596_init(void) { if (debug >= 0) i596_debug = debug; - dev_82596 = i82596_probe(-1); + dev_82596 = i82596_probe(); return PTR_ERR_OR_ZERO(dev_82596); } +module_init(i82596_init); -void __exit cleanup_module(void) +static void __exit i82596_cleanup(void) { unregister_netdev(dev_82596); #ifdef __mc68000__ @@ -1544,5 +1533,4 @@ void __exit cleanup_module(void) free_page ((u32)(dev_82596->mem_start)); free_netdev(dev_82596); } - -#endif /* MODULE */ +module_exit(i82596_cleanup); diff --git a/drivers/net/ethernet/i825xx/sun3_82586.c b/drivers/net/ethernet/i825xx/sun3_82586.c index 4564ee02c95f..893e0ddcb611 100644 --- a/drivers/net/ethernet/i825xx/sun3_82586.c +++ b/drivers/net/ethernet/i825xx/sun3_82586.c @@ -29,6 +29,7 @@ static int rfdadd = 0; /* rfdadd=1 may be better for 8K MEM cards */ static int fifo=0x8; /* don't change */ #include <linux/kernel.h> +#include <linux/module.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/ioport.h> @@ -276,7 +277,7 @@ static void alloc586(struct net_device *dev) memset((char *)p->scb,0,sizeof(struct scb_struct)); } -struct net_device * __init sun3_82586_probe(int unit) +static int __init sun3_82586_probe(void) { struct net_device *dev; unsigned long ioaddr; @@ -291,25 +292,20 @@ struct net_device * __init sun3_82586_probe(int unit) break; default: - return ERR_PTR(-ENODEV); + return -ENODEV; } if (found) - return ERR_PTR(-ENODEV); + return -ENODEV; ioaddr = (unsigned long)ioremap(IE_OBIO, SUN3_82586_TOTAL_SIZE); if (!ioaddr) - return ERR_PTR(-ENOMEM); + return -ENOMEM; found = 1; dev = alloc_etherdev(sizeof(struct priv)); if (!dev) goto out; - if (unit >= 0) { - sprintf(dev->name, "eth%d", unit); - netdev_boot_setup_check(dev); - } - dev->irq = IE_IRQ; dev->base_addr = ioaddr; err = sun3_82586_probe1(dev, ioaddr); @@ -326,8 +322,9 @@ out1: free_netdev(dev); out: iounmap((void __iomem *)ioaddr); - return ERR_PTR(err); + return err; } +module_init(sun3_82586_probe); static const struct net_device_ops sun3_82586_netdev_ops = { .ndo_open = sun3_82586_open, diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 471be6ec7e8a..664a91af662d 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -3011,7 +3011,7 @@ static const struct net_device_ops emac_netdev_ops = { .ndo_stop = emac_close, .ndo_get_stats = emac_stats, .ndo_set_rx_mode = emac_set_multicast_list, - .ndo_do_ioctl = emac_ioctl, + .ndo_eth_ioctl = emac_ioctl, .ndo_tx_timeout = emac_tx_timeout, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = emac_set_mac_address, @@ -3023,7 +3023,7 @@ static const struct net_device_ops emac_gige_netdev_ops = { .ndo_stop = emac_close, .ndo_get_stats = emac_stats, .ndo_set_rx_mode = emac_set_multicast_list, - .ndo_do_ioctl = emac_ioctl, + .ndo_eth_ioctl = emac_ioctl, .ndo_tx_timeout = emac_tx_timeout, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = emac_set_mac_address, diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 737ba85e409f..3d9b4f99d357 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1630,7 +1630,7 @@ static const struct net_device_ops ibmveth_netdev_ops = { .ndo_stop = ibmveth_close, .ndo_start_xmit = ibmveth_start_xmit, .ndo_set_rx_mode = ibmveth_set_multicast_list, - .ndo_do_ioctl = ibmveth_ioctl, + .ndo_eth_ioctl = ibmveth_ioctl, .ndo_change_mtu = ibmveth_change_mtu, .ndo_fix_features = ibmveth_fix_features, .ndo_set_features = ibmveth_set_features, diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 82744a7501c7..b0b6f90deb7d 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -58,8 +58,8 @@ config E1000 config E1000E tristate "Intel(R) PRO/1000 PCI-Express Gigabit Ethernet support" depends on PCI && (!SPARC32 || BROKEN) + depends on PTP_1588_CLOCK_OPTIONAL select CRC32 - imply PTP_1588_CLOCK help This driver supports the PCI-Express Intel(R) PRO/1000 gigabit ethernet family of adapters. For PCI or PCI-X e1000 adapters, @@ -87,7 +87,7 @@ config E1000E_HWTS config IGB tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support" depends on PCI - imply PTP_1588_CLOCK + depends on PTP_1588_CLOCK_OPTIONAL select I2C select I2C_ALGOBIT help @@ -159,9 +159,9 @@ config IXGB config IXGBE tristate "Intel(R) 10GbE PCI Express adapters support" depends on PCI + depends on PTP_1588_CLOCK_OPTIONAL select MDIO select PHYLIB - imply PTP_1588_CLOCK help This driver supports Intel(R) 10GbE PCI Express family of adapters. For more information on how to identify your adapter, go @@ -239,7 +239,7 @@ config IXGBEVF_IPSEC config I40E tristate "Intel(R) Ethernet Controller XL710 Family support" - imply PTP_1588_CLOCK + depends on PTP_1588_CLOCK_OPTIONAL depends on PCI select AUXILIARY_BUS help @@ -295,11 +295,11 @@ config ICE tristate "Intel(R) Ethernet Connection E800 Series Support" default n depends on PCI_MSI + depends on PTP_1588_CLOCK_OPTIONAL select AUXILIARY_BUS select DIMLIB select NET_DEVLINK select PLDMFW - imply PTP_1588_CLOCK help This driver supports Intel(R) Ethernet Connection E800 Series of devices. For more information on how to identify your adapter, go @@ -317,7 +317,7 @@ config FM10K tristate "Intel(R) FM10000 Ethernet Switch Host Interface Support" default n depends on PCI_MSI - imply PTP_1588_CLOCK + depends on PTP_1588_CLOCK_OPTIONAL help This driver supports Intel(R) FM10000 Ethernet Switch Host Interface. For more information on how to identify your adapter, diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 1b0958bd24f6..373eb027b925 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -2715,10 +2715,10 @@ static void e100_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { switch (stringset) { case ETH_SS_TEST: - memcpy(data, *e100_gstrings_test, sizeof(e100_gstrings_test)); + memcpy(data, e100_gstrings_test, sizeof(e100_gstrings_test)); break; case ETH_SS_STATS: - memcpy(data, *e100_gstrings_stats, sizeof(e100_gstrings_stats)); + memcpy(data, e100_gstrings_stats, sizeof(e100_gstrings_stats)); break; } } @@ -2809,7 +2809,7 @@ static const struct net_device_ops e100_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = e100_set_multicast_list, .ndo_set_mac_address = e100_set_mac_address, - .ndo_do_ioctl = e100_do_ioctl, + .ndo_eth_ioctl = e100_do_ioctl, .ndo_tx_timeout = e100_tx_timeout, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = e100_netpoll, diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c index 3c51ee94fa00..0a57172dfcbc 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c +++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c @@ -1739,7 +1739,9 @@ static int e1000_set_phys_id(struct net_device *netdev, } static int e1000_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct e1000_adapter *adapter = netdev_priv(netdev); @@ -1755,7 +1757,9 @@ static int e1000_get_coalesce(struct net_device *netdev, } static int e1000_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index c2a109126c27..bed4f040face 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -832,7 +832,7 @@ static const struct net_device_ops e1000_netdev_ops = { .ndo_set_mac_address = e1000_set_mac, .ndo_tx_timeout = e1000_tx_timeout, .ndo_change_mtu = e1000_change_mtu, - .ndo_do_ioctl = e1000_ioctl, + .ndo_eth_ioctl = e1000_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid, diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c index 06442e6bef73..8515e00d1b40 100644 --- a/drivers/net/ethernet/intel/e1000e/ethtool.c +++ b/drivers/net/ethernet/intel/e1000e/ethtool.c @@ -903,6 +903,7 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: + case e1000_pch_lnp: mask |= BIT(18); break; default: @@ -1569,6 +1570,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter) case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: + case e1000_pch_lnp: fext_nvm11 = er32(FEXTNVM11); fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX; ew32(FEXTNVM11, fext_nvm11); @@ -1991,7 +1993,9 @@ static int e1000_set_phys_id(struct net_device *netdev, } static int e1000_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct e1000_adapter *adapter = netdev_priv(netdev); @@ -2004,7 +2008,9 @@ static int e1000_get_coalesce(struct net_device *netdev, } static int e1000_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct e1000_adapter *adapter = netdev_priv(netdev); diff --git a/drivers/net/ethernet/intel/e1000e/hw.h b/drivers/net/ethernet/intel/e1000e/hw.h index db79c4e6413e..bcf680e83811 100644 --- a/drivers/net/ethernet/intel/e1000e/hw.h +++ b/drivers/net/ethernet/intel/e1000e/hw.h @@ -98,14 +98,22 @@ struct e1000_hw; #define E1000_DEV_ID_PCH_TGP_I219_V14 0x15FA #define E1000_DEV_ID_PCH_TGP_I219_LM15 0x15F4 #define E1000_DEV_ID_PCH_TGP_I219_V15 0x15F5 +#define E1000_DEV_ID_PCH_RPL_I219_LM23 0x0DC5 +#define E1000_DEV_ID_PCH_RPL_I219_V23 0x0DC6 #define E1000_DEV_ID_PCH_ADP_I219_LM16 0x1A1E #define E1000_DEV_ID_PCH_ADP_I219_V16 0x1A1F #define E1000_DEV_ID_PCH_ADP_I219_LM17 0x1A1C #define E1000_DEV_ID_PCH_ADP_I219_V17 0x1A1D +#define E1000_DEV_ID_PCH_RPL_I219_LM22 0x0DC7 +#define E1000_DEV_ID_PCH_RPL_I219_V22 0x0DC8 #define E1000_DEV_ID_PCH_MTP_I219_LM18 0x550A #define E1000_DEV_ID_PCH_MTP_I219_V18 0x550B #define E1000_DEV_ID_PCH_MTP_I219_LM19 0x550C #define E1000_DEV_ID_PCH_MTP_I219_V19 0x550D +#define E1000_DEV_ID_PCH_LNP_I219_LM20 0x550E +#define E1000_DEV_ID_PCH_LNP_I219_V20 0x550F +#define E1000_DEV_ID_PCH_LNP_I219_LM21 0x5510 +#define E1000_DEV_ID_PCH_LNP_I219_V21 0x5511 #define E1000_REVISION_4 4 @@ -132,6 +140,7 @@ enum e1000_mac_type { e1000_pch_tgp, e1000_pch_adp, e1000_pch_mtp, + e1000_pch_lnp, }; enum e1000_media_type { diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index cf7b3887da1d..60c582a16821 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -321,6 +321,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: + case e1000_pch_lnp: if (e1000_phy_is_accessible_pchlan(hw)) break; @@ -466,6 +467,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: + case e1000_pch_lnp: /* In case the PHY needs to be in mdio slow mode, * set slow mode and try to get the PHY id again. */ @@ -711,6 +713,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: + case e1000_pch_lnp: case e1000_pchlan: /* check management mode */ mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; @@ -1006,6 +1009,8 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) { u32 reg = link << (E1000_LTRV_REQ_SHIFT + E1000_LTRV_NOSNOOP_SHIFT) | link << E1000_LTRV_REQ_SHIFT | E1000_LTRV_SEND; + u16 max_ltr_enc_d = 0; /* maximum LTR decoded by platform */ + u16 lat_enc_d = 0; /* latency decoded */ u16 lat_enc = 0; /* latency encoded */ if (link) { @@ -1059,7 +1064,17 @@ static s32 e1000_platform_pm_pch_lpt(struct e1000_hw *hw, bool link) E1000_PCI_LTR_CAP_LPT + 2, &max_nosnoop); max_ltr_enc = max_t(u16, max_snoop, max_nosnoop); - if (lat_enc > max_ltr_enc) + lat_enc_d = (lat_enc & E1000_LTRV_VALUE_MASK) * + (1U << (E1000_LTRV_SCALE_FACTOR * + ((lat_enc & E1000_LTRV_SCALE_MASK) + >> E1000_LTRV_SCALE_SHIFT))); + + max_ltr_enc_d = (max_ltr_enc & E1000_LTRV_VALUE_MASK) * + (1U << (E1000_LTRV_SCALE_FACTOR * + ((max_ltr_enc & E1000_LTRV_SCALE_MASK) + >> E1000_LTRV_SCALE_SHIFT))); + + if (lat_enc_d > max_ltr_enc_d) lat_enc = max_ltr_enc; } @@ -1266,9 +1281,11 @@ static s32 e1000_disable_ulp_lpt_lp(struct e1000_hw *hw, bool force) usleep_range(10000, 11000); } if (firmware_bug) - e_warn("ULP_CONFIG_DONE took %dmsec. This is a firmware bug\n", i * 10); + e_warn("ULP_CONFIG_DONE took %d msec. This is a firmware bug\n", + i * 10); else - e_dbg("ULP_CONFIG_DONE cleared after %dmsec\n", i * 10); + e_dbg("ULP_CONFIG_DONE cleared after %d msec\n", + i * 10); if (force) { mac_reg = er32(H2ME); @@ -1663,6 +1680,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: + case e1000_pch_lnp: rc = e1000_init_phy_params_pchlan(hw); break; default: @@ -2118,6 +2136,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: + case e1000_pch_lnp: sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; break; default: @@ -3162,6 +3181,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: + case e1000_pch_lnp: bank1_offset = nvm->flash_bank_size; act_offset = E1000_ICH_NVM_SIG_WORD; @@ -4101,6 +4121,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: + case e1000_pch_lnp: word = NVM_COMPAT; valid_csum_mask = NVM_COMPAT_VALID_CSUM; break; @@ -4115,13 +4136,17 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) return ret_val; if (!(data & valid_csum_mask)) { - data |= valid_csum_mask; - ret_val = e1000_write_nvm(hw, word, 1, &data); - if (ret_val) - return ret_val; - ret_val = e1000e_update_nvm_checksum(hw); - if (ret_val) - return ret_val; + e_dbg("NVM Checksum Invalid\n"); + + if (hw->mac.type < e1000_pch_cnp) { + data |= valid_csum_mask; + ret_val = e1000_write_nvm(hw, word, 1, &data); + if (ret_val) + return ret_val; + ret_val = e1000e_update_nvm_checksum(hw); + if (ret_val) + return ret_val; + } } return e1000e_validate_nvm_checksum_generic(hw); diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.h b/drivers/net/ethernet/intel/e1000e/ich8lan.h index 1502895eb45d..d6a092e5ee74 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.h +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.h @@ -41,12 +41,15 @@ #define E1000_FWSM_WLOCK_MAC_MASK 0x0380 #define E1000_FWSM_WLOCK_MAC_SHIFT 7 #define E1000_FWSM_ULP_CFG_DONE 0x00000400 /* Low power cfg done */ +#define E1000_EXFWSM_DPG_EXIT_DONE 0x00000001 /* Shared Receive Address Registers */ #define E1000_SHRAL_PCH_LPT(_i) (0x05408 + ((_i) * 8)) #define E1000_SHRAH_PCH_LPT(_i) (0x0540C + ((_i) * 8)) #define E1000_H2ME 0x05B50 /* Host to ME */ +#define E1000_H2ME_START_DPG 0x00000001 /* indicate the ME of DPG */ +#define E1000_H2ME_EXIT_DPG 0x00000002 /* indicate the ME exit DPG */ #define E1000_H2ME_ULP 0x00000800 /* ULP Indication Bit */ #define E1000_H2ME_ENFORCE_SETTINGS 0x00001000 /* Enforce Settings */ @@ -274,8 +277,11 @@ /* Latency Tolerance Reporting */ #define E1000_LTRV 0x000F8 +#define E1000_LTRV_VALUE_MASK 0x000003FF #define E1000_LTRV_SCALE_MAX 5 #define E1000_LTRV_SCALE_FACTOR 5 +#define E1000_LTRV_SCALE_SHIFT 10 +#define E1000_LTRV_SCALE_MASK 0x00001C00 #define E1000_LTRV_REQ_SHIFT 15 #define E1000_LTRV_NOSNOOP_SHIFT 16 #define E1000_LTRV_SEND (1 << 30) diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 757a54c39eef..900b3ab998bd 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -3550,6 +3550,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: + case e1000_pch_lnp: if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { /* Stable 24MHz frequency */ incperiod = INCPERIOD_24MHZ; @@ -4068,6 +4069,7 @@ void e1000e_reset(struct e1000_adapter *adapter) case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: + case e1000_pch_lnp: fc->refresh_time = 0xFFFF; fc->pause_time = 0xFFFF; @@ -6343,42 +6345,110 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter) u32 mac_data; u16 phy_data; - /* Disable the periodic inband message, - * don't request PCIe clock in K1 page770_17[10:9] = 10b - */ - e1e_rphy(hw, HV_PM_CTRL, &phy_data); - phy_data &= ~HV_PM_CTRL_K1_CLK_REQ; - phy_data |= BIT(10); - e1e_wphy(hw, HV_PM_CTRL, phy_data); + if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) { + /* Request ME configure the device for S0ix */ + mac_data = er32(H2ME); + mac_data |= E1000_H2ME_START_DPG; + mac_data &= ~E1000_H2ME_EXIT_DPG; + ew32(H2ME, mac_data); + } else { + /* Request driver configure the device to S0ix */ + /* Disable the periodic inband message, + * don't request PCIe clock in K1 page770_17[10:9] = 10b + */ + e1e_rphy(hw, HV_PM_CTRL, &phy_data); + phy_data &= ~HV_PM_CTRL_K1_CLK_REQ; + phy_data |= BIT(10); + e1e_wphy(hw, HV_PM_CTRL, phy_data); - /* Make sure we don't exit K1 every time a new packet arrives - * 772_29[5] = 1 CS_Mode_Stay_In_K1 - */ - e1e_rphy(hw, I217_CGFREG, &phy_data); - phy_data |= BIT(5); - e1e_wphy(hw, I217_CGFREG, phy_data); + /* Make sure we don't exit K1 every time a new packet arrives + * 772_29[5] = 1 CS_Mode_Stay_In_K1 + */ + e1e_rphy(hw, I217_CGFREG, &phy_data); + phy_data |= BIT(5); + e1e_wphy(hw, I217_CGFREG, phy_data); - /* Change the MAC/PHY interface to SMBus - * Force the SMBus in PHY page769_23[0] = 1 - * Force the SMBus in MAC CTRL_EXT[11] = 1 - */ - e1e_rphy(hw, CV_SMB_CTRL, &phy_data); - phy_data |= CV_SMB_CTRL_FORCE_SMBUS; - e1e_wphy(hw, CV_SMB_CTRL, phy_data); - mac_data = er32(CTRL_EXT); - mac_data |= E1000_CTRL_EXT_FORCE_SMBUS; - ew32(CTRL_EXT, mac_data); + /* Change the MAC/PHY interface to SMBus + * Force the SMBus in PHY page769_23[0] = 1 + * Force the SMBus in MAC CTRL_EXT[11] = 1 + */ + e1e_rphy(hw, CV_SMB_CTRL, &phy_data); + phy_data |= CV_SMB_CTRL_FORCE_SMBUS; + e1e_wphy(hw, CV_SMB_CTRL, phy_data); + mac_data = er32(CTRL_EXT); + mac_data |= E1000_CTRL_EXT_FORCE_SMBUS; + ew32(CTRL_EXT, mac_data); + + /* DFT control: PHY bit: page769_20[0] = 1 + * page769_20[7] - PHY PLL stop + * page769_20[8] - PHY go to the electrical idle + * page769_20[9] - PHY serdes disable + * Gate PPW via EXTCNF_CTRL - set 0x0F00[7] = 1 + */ + e1e_rphy(hw, I82579_DFT_CTRL, &phy_data); + phy_data |= BIT(0); + phy_data |= BIT(7); + phy_data |= BIT(8); + phy_data |= BIT(9); + e1e_wphy(hw, I82579_DFT_CTRL, phy_data); + + mac_data = er32(EXTCNF_CTRL); + mac_data |= E1000_EXTCNF_CTRL_GATE_PHY_CFG; + ew32(EXTCNF_CTRL, mac_data); + + /* Enable the Dynamic Power Gating in the MAC */ + mac_data = er32(FEXTNVM7); + mac_data |= BIT(22); + ew32(FEXTNVM7, mac_data); + + /* Disable disconnected cable conditioning for Power Gating */ + mac_data = er32(DPGFR); + mac_data |= BIT(2); + ew32(DPGFR, mac_data); + + /* Don't wake from dynamic Power Gating with clock request */ + mac_data = er32(FEXTNVM12); + mac_data |= BIT(12); + ew32(FEXTNVM12, mac_data); + + /* Ungate PGCB clock */ + mac_data = er32(FEXTNVM9); + mac_data &= ~BIT(28); + ew32(FEXTNVM9, mac_data); + + /* Enable K1 off to enable mPHY Power Gating */ + mac_data = er32(FEXTNVM6); + mac_data |= BIT(31); + ew32(FEXTNVM6, mac_data); + + /* Enable mPHY power gating for any link and speed */ + mac_data = er32(FEXTNVM8); + mac_data |= BIT(9); + ew32(FEXTNVM8, mac_data); + + /* Enable the Dynamic Clock Gating in the DMA and MAC */ + mac_data = er32(CTRL_EXT); + mac_data |= E1000_CTRL_EXT_DMA_DYN_CLK_EN; + ew32(CTRL_EXT, mac_data); + + /* No MAC DPG gating SLP_S0 in modern standby + * Switch the logic of the lanphypc to use PMC counter + */ + mac_data = er32(FEXTNVM5); + mac_data |= BIT(7); + ew32(FEXTNVM5, mac_data); + } - /* DFT control: PHY bit: page769_20[0] = 1 - * Gate PPW via EXTCNF_CTRL - set 0x0F00[7] = 1 - */ - e1e_rphy(hw, I82579_DFT_CTRL, &phy_data); - phy_data |= BIT(0); - e1e_wphy(hw, I82579_DFT_CTRL, phy_data); + /* Disable the time synchronization clock */ + mac_data = er32(FEXTNVM7); + mac_data |= BIT(31); + mac_data &= ~BIT(0); + ew32(FEXTNVM7, mac_data); - mac_data = er32(EXTCNF_CTRL); - mac_data |= E1000_EXTCNF_CTRL_GATE_PHY_CFG; - ew32(EXTCNF_CTRL, mac_data); + /* Dynamic Power Gating Enable */ + mac_data = er32(CTRL_EXT); + mac_data |= BIT(3); + ew32(CTRL_EXT, mac_data); /* Check MAC Tx/Rx packet buffer pointers. * Reset MAC Tx/Rx packet buffer pointers to suppress any @@ -6414,148 +6484,130 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter) mac_data = er32(RDFPC); if (mac_data) ew32(RDFPC, 0); - - /* Enable the Dynamic Power Gating in the MAC */ - mac_data = er32(FEXTNVM7); - mac_data |= BIT(22); - ew32(FEXTNVM7, mac_data); - - /* Disable the time synchronization clock */ - mac_data = er32(FEXTNVM7); - mac_data |= BIT(31); - mac_data &= ~BIT(0); - ew32(FEXTNVM7, mac_data); - - /* Dynamic Power Gating Enable */ - mac_data = er32(CTRL_EXT); - mac_data |= BIT(3); - ew32(CTRL_EXT, mac_data); - - /* Disable disconnected cable conditioning for Power Gating */ - mac_data = er32(DPGFR); - mac_data |= BIT(2); - ew32(DPGFR, mac_data); - - /* Don't wake from dynamic Power Gating with clock request */ - mac_data = er32(FEXTNVM12); - mac_data |= BIT(12); - ew32(FEXTNVM12, mac_data); - - /* Ungate PGCB clock */ - mac_data = er32(FEXTNVM9); - mac_data &= ~BIT(28); - ew32(FEXTNVM9, mac_data); - - /* Enable K1 off to enable mPHY Power Gating */ - mac_data = er32(FEXTNVM6); - mac_data |= BIT(31); - ew32(FEXTNVM6, mac_data); - - /* Enable mPHY power gating for any link and speed */ - mac_data = er32(FEXTNVM8); - mac_data |= BIT(9); - ew32(FEXTNVM8, mac_data); - - /* Enable the Dynamic Clock Gating in the DMA and MAC */ - mac_data = er32(CTRL_EXT); - mac_data |= E1000_CTRL_EXT_DMA_DYN_CLK_EN; - ew32(CTRL_EXT, mac_data); - - /* No MAC DPG gating SLP_S0 in modern standby - * Switch the logic of the lanphypc to use PMC counter - */ - mac_data = er32(FEXTNVM5); - mac_data |= BIT(7); - ew32(FEXTNVM5, mac_data); } static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter) { struct e1000_hw *hw = &adapter->hw; + bool firmware_bug = false; u32 mac_data; u16 phy_data; + u32 i = 0; + + if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) { + /* Request ME unconfigure the device from S0ix */ + mac_data = er32(H2ME); + mac_data &= ~E1000_H2ME_START_DPG; + mac_data |= E1000_H2ME_EXIT_DPG; + ew32(H2ME, mac_data); + + /* Poll up to 2.5 seconds for ME to unconfigure DPG. + * If this takes more than 1 second, show a warning indicating a + * firmware bug + */ + while (!(er32(EXFWSM) & E1000_EXFWSM_DPG_EXIT_DONE)) { + if (i > 100 && !firmware_bug) + firmware_bug = true; - /* Disable the Dynamic Power Gating in the MAC */ - mac_data = er32(FEXTNVM7); - mac_data &= 0xFFBFFFFF; - ew32(FEXTNVM7, mac_data); + if (i++ == 250) { + e_dbg("Timeout (firmware bug): %d msec\n", + i * 10); + break; + } - /* Enable the time synchronization clock */ - mac_data = er32(FEXTNVM7); - mac_data |= BIT(0); - ew32(FEXTNVM7, mac_data); + usleep_range(10000, 11000); + } + if (firmware_bug) + e_warn("DPG_EXIT_DONE took %d msec. This is a firmware bug\n", + i * 10); + else + e_dbg("DPG_EXIT_DONE cleared after %d msec\n", i * 10); + } else { + /* Request driver unconfigure the device from S0ix */ + + /* Disable the Dynamic Power Gating in the MAC */ + mac_data = er32(FEXTNVM7); + mac_data &= 0xFFBFFFFF; + ew32(FEXTNVM7, mac_data); + + /* Disable mPHY power gating for any link and speed */ + mac_data = er32(FEXTNVM8); + mac_data &= ~BIT(9); + ew32(FEXTNVM8, mac_data); + + /* Disable K1 off */ + mac_data = er32(FEXTNVM6); + mac_data &= ~BIT(31); + ew32(FEXTNVM6, mac_data); + + /* Disable Ungate PGCB clock */ + mac_data = er32(FEXTNVM9); + mac_data |= BIT(28); + ew32(FEXTNVM9, mac_data); + + /* Cancel not waking from dynamic + * Power Gating with clock request + */ + mac_data = er32(FEXTNVM12); + mac_data &= ~BIT(12); + ew32(FEXTNVM12, mac_data); - /* Disable mPHY power gating for any link and speed */ - mac_data = er32(FEXTNVM8); - mac_data &= ~BIT(9); - ew32(FEXTNVM8, mac_data); + /* Cancel disable disconnected cable conditioning + * for Power Gating + */ + mac_data = er32(DPGFR); + mac_data &= ~BIT(2); + ew32(DPGFR, mac_data); - /* Disable K1 off */ - mac_data = er32(FEXTNVM6); - mac_data &= ~BIT(31); - ew32(FEXTNVM6, mac_data); + /* Disable the Dynamic Clock Gating in the DMA and MAC */ + mac_data = er32(CTRL_EXT); + mac_data &= 0xFFF7FFFF; + ew32(CTRL_EXT, mac_data); - /* Disable Ungate PGCB clock */ - mac_data = er32(FEXTNVM9); - mac_data |= BIT(28); - ew32(FEXTNVM9, mac_data); + /* Revert the lanphypc logic to use the internal Gbe counter + * and not the PMC counter + */ + mac_data = er32(FEXTNVM5); + mac_data &= 0xFFFFFF7F; + ew32(FEXTNVM5, mac_data); - /* Cancel not waking from dynamic - * Power Gating with clock request - */ - mac_data = er32(FEXTNVM12); - mac_data &= ~BIT(12); - ew32(FEXTNVM12, mac_data); + /* Enable the periodic inband message, + * Request PCIe clock in K1 page770_17[10:9] =01b + */ + e1e_rphy(hw, HV_PM_CTRL, &phy_data); + phy_data &= 0xFBFF; + phy_data |= HV_PM_CTRL_K1_CLK_REQ; + e1e_wphy(hw, HV_PM_CTRL, phy_data); - /* Cancel disable disconnected cable conditioning - * for Power Gating - */ - mac_data = er32(DPGFR); - mac_data &= ~BIT(2); - ew32(DPGFR, mac_data); + /* Return back configuration + * 772_29[5] = 0 CS_Mode_Stay_In_K1 + */ + e1e_rphy(hw, I217_CGFREG, &phy_data); + phy_data &= 0xFFDF; + e1e_wphy(hw, I217_CGFREG, phy_data); + + /* Change the MAC/PHY interface to Kumeran + * Unforce the SMBus in PHY page769_23[0] = 0 + * Unforce the SMBus in MAC CTRL_EXT[11] = 0 + */ + e1e_rphy(hw, CV_SMB_CTRL, &phy_data); + phy_data &= ~CV_SMB_CTRL_FORCE_SMBUS; + e1e_wphy(hw, CV_SMB_CTRL, phy_data); + mac_data = er32(CTRL_EXT); + mac_data &= ~E1000_CTRL_EXT_FORCE_SMBUS; + ew32(CTRL_EXT, mac_data); + } /* Disable Dynamic Power Gating */ mac_data = er32(CTRL_EXT); mac_data &= 0xFFFFFFF7; ew32(CTRL_EXT, mac_data); - /* Disable the Dynamic Clock Gating in the DMA and MAC */ - mac_data = er32(CTRL_EXT); - mac_data &= 0xFFF7FFFF; - ew32(CTRL_EXT, mac_data); - - /* Revert the lanphypc logic to use the internal Gbe counter - * and not the PMC counter - */ - mac_data = er32(FEXTNVM5); - mac_data &= 0xFFFFFF7F; - ew32(FEXTNVM5, mac_data); - - /* Enable the periodic inband message, - * Request PCIe clock in K1 page770_17[10:9] =01b - */ - e1e_rphy(hw, HV_PM_CTRL, &phy_data); - phy_data &= 0xFBFF; - phy_data |= HV_PM_CTRL_K1_CLK_REQ; - e1e_wphy(hw, HV_PM_CTRL, phy_data); - - /* Return back configuration - * 772_29[5] = 0 CS_Mode_Stay_In_K1 - */ - e1e_rphy(hw, I217_CGFREG, &phy_data); - phy_data &= 0xFFDF; - e1e_wphy(hw, I217_CGFREG, phy_data); - - /* Change the MAC/PHY interface to Kumeran - * Unforce the SMBus in PHY page769_23[0] = 0 - * Unforce the SMBus in MAC CTRL_EXT[11] = 0 - */ - e1e_rphy(hw, CV_SMB_CTRL, &phy_data); - phy_data &= ~CV_SMB_CTRL_FORCE_SMBUS; - e1e_wphy(hw, CV_SMB_CTRL, phy_data); - mac_data = er32(CTRL_EXT); - mac_data &= ~E1000_CTRL_EXT_FORCE_SMBUS; - ew32(CTRL_EXT, mac_data); + /* Enable the time synchronization clock */ + mac_data = er32(FEXTNVM7); + mac_data &= ~BIT(31); + mac_data |= BIT(0); + ew32(FEXTNVM7, mac_data); } static int e1000e_pm_freeze(struct device *dev) @@ -7302,7 +7354,7 @@ static const struct net_device_ops e1000e_netdev_ops = { .ndo_set_rx_mode = e1000e_set_rx_mode, .ndo_set_mac_address = e1000_set_mac, .ndo_change_mtu = e1000_change_mtu, - .ndo_do_ioctl = e1000_ioctl, + .ndo_eth_ioctl = e1000_ioctl, .ndo_tx_timeout = e1000_tx_timeout, .ndo_validate_addr = eth_validate_addr, @@ -7677,7 +7729,7 @@ err_dma: * @pdev: PCI device information struct * * e1000_remove is called by the PCI subsystem to alert the driver - * that it should release a PCI device. The could be caused by a + * that it should release a PCI device. This could be caused by a * Hot-Plug event, or because the driver is going to be removed from * memory. **/ @@ -7850,14 +7902,22 @@ static const struct pci_device_id e1000_pci_tbl[] = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_cnp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_cnp }, { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ }; diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c index 9e79d672f4f1..eb5c014c02fb 100644 --- a/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/drivers/net/ethernet/intel/e1000e/ptp.c @@ -298,6 +298,7 @@ void e1000e_ptp_init(struct e1000_adapter *adapter) case e1000_pch_tgp: case e1000_pch_adp: case e1000_pch_mtp: + case e1000_pch_lnp: if ((hw->mac.type < e1000_pch_lpt) || (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { adapter->ptp_clock_info.max_adj = 24000000 - 1; diff --git a/drivers/net/ethernet/intel/e1000e/regs.h b/drivers/net/ethernet/intel/e1000e/regs.h index 8165ba2619a4..6c0cd8cab3ef 100644 --- a/drivers/net/ethernet/intel/e1000e/regs.h +++ b/drivers/net/ethernet/intel/e1000e/regs.h @@ -213,6 +213,7 @@ #define E1000_FACTPS 0x05B30 /* Function Active and Power State to MNG */ #define E1000_SWSM 0x05B50 /* SW Semaphore */ #define E1000_FWSM 0x05B54 /* FW Semaphore */ +#define E1000_EXFWSM 0x05B58 /* Extended FW Semaphore */ /* Driver-only SW semaphore (not used by BOOT agents) */ #define E1000_SWSM2 0x05B58 #define E1000_FFLT_DBG 0x05F04 /* Debug Register */ diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c index 66776ba7bfb6..0d37f011d0ce 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c @@ -632,7 +632,9 @@ clear_reset: } static int fm10k_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct fm10k_intfc *interface = netdev_priv(dev); @@ -646,7 +648,9 @@ static int fm10k_get_coalesce(struct net_device *dev, } static int fm10k_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct fm10k_intfc *interface = netdev_priv(dev); u16 tx_itr, rx_itr; diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index b9417dc0007c..39fb3d57c057 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -428,6 +428,8 @@ struct i40e_channel { struct i40e_vsi *parent_vsi; }; +struct i40e_ptp_pins_settings; + static inline bool i40e_is_channel_macvlan(struct i40e_channel *ch) { return !!ch->fwd; @@ -644,12 +646,83 @@ struct i40e_pf { struct i40e_rx_pb_config pb_cfg; /* Current Rx packet buffer config */ struct i40e_dcbx_config tmp_cfg; +/* GPIO defines used by PTP */ +#define I40E_SDP3_2 18 +#define I40E_SDP3_3 19 +#define I40E_GPIO_4 20 +#define I40E_LED2_0 26 +#define I40E_LED2_1 27 +#define I40E_LED3_0 28 +#define I40E_LED3_1 29 +#define I40E_GLGEN_GPIO_SET_SDP_DATA_HI \ + (1 << I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT) +#define I40E_GLGEN_GPIO_SET_DRV_SDP_DATA \ + (1 << I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PRT_NUM_0 \ + (0 << I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PRT_NUM_1 \ + (1 << I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT) +#define I40E_GLGEN_GPIO_CTL_RESERVED BIT(2) +#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_Z \ + (1 << I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT) +#define I40E_GLGEN_GPIO_CTL_DIR_OUT \ + (1 << I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT) +#define I40E_GLGEN_GPIO_CTL_TRI_DRV_HI \ + (1 << I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT) +#define I40E_GLGEN_GPIO_CTL_OUT_HI_RST \ + (1 << I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT) +#define I40E_GLGEN_GPIO_CTL_TIMESYNC_0 \ + (3 << I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) +#define I40E_GLGEN_GPIO_CTL_TIMESYNC_1 \ + (4 << I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) +#define I40E_GLGEN_GPIO_CTL_NOT_FOR_PHY_CONN \ + (0x3F << I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT) +#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT \ + (1 << I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PORT_0_IN_TIMESYNC_0 \ + (I40E_GLGEN_GPIO_CTL_NOT_FOR_PHY_CONN | \ + I40E_GLGEN_GPIO_CTL_TIMESYNC_0 | \ + I40E_GLGEN_GPIO_CTL_RESERVED | I40E_GLGEN_GPIO_CTL_PRT_NUM_0) +#define I40E_GLGEN_GPIO_CTL_PORT_1_IN_TIMESYNC_0 \ + (I40E_GLGEN_GPIO_CTL_NOT_FOR_PHY_CONN | \ + I40E_GLGEN_GPIO_CTL_TIMESYNC_0 | \ + I40E_GLGEN_GPIO_CTL_RESERVED | I40E_GLGEN_GPIO_CTL_PRT_NUM_1) +#define I40E_GLGEN_GPIO_CTL_PORT_0_OUT_TIMESYNC_1 \ + (I40E_GLGEN_GPIO_CTL_NOT_FOR_PHY_CONN | \ + I40E_GLGEN_GPIO_CTL_TIMESYNC_1 | I40E_GLGEN_GPIO_CTL_OUT_HI_RST | \ + I40E_GLGEN_GPIO_CTL_TRI_DRV_HI | I40E_GLGEN_GPIO_CTL_DIR_OUT | \ + I40E_GLGEN_GPIO_CTL_RESERVED | I40E_GLGEN_GPIO_CTL_PRT_NUM_0) +#define I40E_GLGEN_GPIO_CTL_PORT_1_OUT_TIMESYNC_1 \ + (I40E_GLGEN_GPIO_CTL_NOT_FOR_PHY_CONN | \ + I40E_GLGEN_GPIO_CTL_TIMESYNC_1 | I40E_GLGEN_GPIO_CTL_OUT_HI_RST | \ + I40E_GLGEN_GPIO_CTL_TRI_DRV_HI | I40E_GLGEN_GPIO_CTL_DIR_OUT | \ + I40E_GLGEN_GPIO_CTL_RESERVED | I40E_GLGEN_GPIO_CTL_PRT_NUM_1) +#define I40E_GLGEN_GPIO_CTL_LED_INIT \ + (I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_Z | \ + I40E_GLGEN_GPIO_CTL_DIR_OUT | \ + I40E_GLGEN_GPIO_CTL_TRI_DRV_HI | \ + I40E_GLGEN_GPIO_CTL_OUT_HI_RST | \ + I40E_GLGEN_GPIO_CTL_OUT_DEFAULT | \ + I40E_GLGEN_GPIO_CTL_NOT_FOR_PHY_CONN) +#define I40E_PRTTSYN_AUX_1_INSTNT \ + (1 << I40E_PRTTSYN_AUX_1_INSTNT_SHIFT) +#define I40E_PRTTSYN_AUX_0_OUT_ENABLE \ + (1 << I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT) +#define I40E_PRTTSYN_AUX_0_OUT_CLK_MOD (3 << I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT) +#define I40E_PRTTSYN_AUX_0_OUT_ENABLE_CLK_MOD \ + (I40E_PRTTSYN_AUX_0_OUT_ENABLE | I40E_PRTTSYN_AUX_0_OUT_CLK_MOD) +#define I40E_PTP_HALF_SECOND 500000000LL /* nano seconds */ +#define I40E_PTP_2_SEC_DELAY 2 + struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_caps; struct sk_buff *ptp_tx_skb; unsigned long ptp_tx_start; struct hwtstamp_config tstamp_config; struct timespec64 ptp_prev_hw_time; + struct work_struct ptp_pps_work; + struct work_struct ptp_extts0_work; + struct work_struct ptp_extts1_work; ktime_t ptp_reset_start; struct mutex tmreg_lock; /* Used to protect the SYSTIME registers. */ u32 ptp_adj_mult; @@ -657,10 +730,14 @@ struct i40e_pf { u32 tx_hwtstamp_skipped; u32 rx_hwtstamp_cleared; u32 latch_event_flags; + u64 ptp_pps_start; + u32 pps_delay; spinlock_t ptp_rx_lock; /* Used to protect Rx timestamp registers. */ + struct ptp_pin_desc ptp_pin[3]; unsigned long latch_events[4]; bool ptp_tx; bool ptp_rx; + struct i40e_ptp_pins_settings *ptp_pins; u16 rss_table_size; /* HW RSS table size */ u32 max_bw; u32 min_bw; @@ -1169,6 +1246,7 @@ void i40e_ptp_save_hw_time(struct i40e_pf *pf); void i40e_ptp_restore_hw_time(struct i40e_pf *pf); void i40e_ptp_init(struct i40e_pf *pf); void i40e_ptp_stop(struct i40e_pf *pf); +int i40e_ptp_alloc_pins(struct i40e_pf *pf); int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi); i40e_status i40e_get_partition_bw_setting(struct i40e_pf *pf); i40e_status i40e_set_partition_bw_setting(struct i40e_pf *pf); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 2c9e4eeb7270..513ba6974355 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -2812,13 +2812,17 @@ static int __i40e_get_coalesce(struct net_device *netdev, * i40e_get_coalesce - get a netdev's coalesce settings * @netdev: the netdev to check * @ec: ethtool coalesce data structure + * @kernel_coal: ethtool CQE mode setting structure + * @extack: extack for reporting error messages * * Gets the coalesce settings for a particular netdev. Note that if user has * modified per-queue settings, this only guarantees to represent queue 0. See * __i40e_get_coalesce for more details. **/ static int i40e_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { return __i40e_get_coalesce(netdev, ec, -1); } @@ -2986,11 +2990,15 @@ static int __i40e_set_coalesce(struct net_device *netdev, * i40e_set_coalesce - set coalesce settings for every queue on the netdev * @netdev: the netdev to change * @ec: ethtool coalesce settings + * @kernel_coal: ethtool CQE mode setting structure + * @extack: extack for reporting error messages * * This will set each queue to the same coalesce settings. **/ static int i40e_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { return __i40e_set_coalesce(netdev, ec, -1); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 1d1f52756a93..2f20980dd9a5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -4079,10 +4079,13 @@ static irqreturn_t i40e_intr(int irq, void *data) if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) { u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0); - if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) { - icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; + if (prttsyn_stat & I40E_PRTTSYN_STAT_0_EVENT0_MASK) + schedule_work(&pf->ptp_extts0_work); + + if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) i40e_ptp_tx_hwtstamp(pf); - } + + icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK; } /* If a critical error is pending we have no choice but to reset the @@ -4635,7 +4638,7 @@ void i40e_vsi_stop_rings(struct i40e_vsi *vsi) err = i40e_control_wait_rx_q(pf, pf_q, false); if (err) dev_info(&pf->pdev->dev, - "VSI seid %d Rx ring %d dissable timeout\n", + "VSI seid %d Rx ring %d disable timeout\n", vsi->seid, pf_q); } @@ -13265,7 +13268,7 @@ static const struct net_device_ops i40e_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = i40e_set_mac, .ndo_change_mtu = i40e_change_mtu, - .ndo_do_ioctl = i40e_ioctl, + .ndo_eth_ioctl = i40e_ioctl, .ndo_tx_timeout = i40e_tx_timeout, .ndo_vlan_rx_add_vid = i40e_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = i40e_vlan_rx_kill_vid, @@ -15181,6 +15184,22 @@ err_switch_setup: } /** + * i40e_set_subsystem_device_id - set subsystem device id + * @hw: pointer to the hardware info + * + * Set PCI subsystem device id either from a pci_dev structure or + * a specific FW register. + **/ +static inline void i40e_set_subsystem_device_id(struct i40e_hw *hw) +{ + struct pci_dev *pdev = ((struct i40e_pf *)hw->back)->pdev; + + hw->subsystem_device_id = pdev->subsystem_device ? + pdev->subsystem_device : + (ushort)(rd32(hw, I40E_PFPCI_SUBSYSID) & USHRT_MAX); +} + +/** * i40e_probe - Device initialization routine * @pdev: PCI device information struct * @ent: entry in i40e_pci_tbl @@ -15275,7 +15294,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) hw->device_id = pdev->device; pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); hw->subsystem_vendor_id = pdev->subsystem_vendor; - hw->subsystem_device_id = pdev->subsystem_device; + i40e_set_subsystem_device_id(hw); hw->bus.device = PCI_SLOT(pdev->devfn); hw->bus.func = PCI_FUNC(pdev->devfn); hw->bus.bus_id = pdev->bus->number; @@ -15455,6 +15474,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (is_valid_ether_addr(hw->mac.port_addr)) pf->hw_features |= I40E_HW_PORT_ID_VALID; + i40e_ptp_alloc_pins(pf); pci_set_drvdata(pdev, pf); pci_save_state(pdev); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 7b971b205d36..09b1d5aed1c9 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -3,6 +3,7 @@ #include "i40e.h" #include <linux/ptp_classify.h> +#include <linux/posix-clock.h> /* The XL710 timesync is very much like Intel's 82599 design when it comes to * the fundamental clock design. However, the clock operations are much simpler @@ -20,10 +21,252 @@ #define I40E_PTP_10GB_INCVAL_MULT 2 #define I40E_PTP_5GB_INCVAL_MULT 2 #define I40E_PTP_1GB_INCVAL_MULT 20 +#define I40E_ISGN 0x80000000 #define I40E_PRTTSYN_CTL1_TSYNTYPE_V1 BIT(I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) #define I40E_PRTTSYN_CTL1_TSYNTYPE_V2 (2 << \ I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT) +#define I40E_SUBDEV_ID_25G_PTP_PIN 0xB +#define to_dev(obj) container_of(obj, struct device, kobj) + +enum i40e_ptp_pin { + SDP3_2 = 0, + SDP3_3, + GPIO_4 +}; + +enum i40e_can_set_pins_t { + CANT_DO_PINS = -1, + CAN_SET_PINS, + CAN_DO_PINS +}; + +static struct ptp_pin_desc sdp_desc[] = { + /* name idx func chan */ + {"SDP3_2", SDP3_2, PTP_PF_NONE, 0}, + {"SDP3_3", SDP3_3, PTP_PF_NONE, 1}, + {"GPIO_4", GPIO_4, PTP_PF_NONE, 1}, +}; + +enum i40e_ptp_gpio_pin_state { + end = -2, + invalid, + off, + in_A, + in_B, + out_A, + out_B, +}; + +static const char * const i40e_ptp_gpio_pin_state2str[] = { + "off", "in_A", "in_B", "out_A", "out_B" +}; + +enum i40e_ptp_led_pin_state { + led_end = -2, + low = 0, + high, +}; + +struct i40e_ptp_pins_settings { + enum i40e_ptp_gpio_pin_state sdp3_2; + enum i40e_ptp_gpio_pin_state sdp3_3; + enum i40e_ptp_gpio_pin_state gpio_4; + enum i40e_ptp_led_pin_state led2_0; + enum i40e_ptp_led_pin_state led2_1; + enum i40e_ptp_led_pin_state led3_0; + enum i40e_ptp_led_pin_state led3_1; +}; + +static const struct i40e_ptp_pins_settings + i40e_ptp_pin_led_allowed_states[] = { + {off, off, off, high, high, high, high}, + {off, in_A, off, high, high, high, low}, + {off, out_A, off, high, low, high, high}, + {off, in_B, off, high, high, high, low}, + {off, out_B, off, high, low, high, high}, + {in_A, off, off, high, high, high, low}, + {in_A, in_B, off, high, high, high, low}, + {in_A, out_B, off, high, low, high, high}, + {out_A, off, off, high, low, high, high}, + {out_A, in_B, off, high, low, high, high}, + {in_B, off, off, high, high, high, low}, + {in_B, in_A, off, high, high, high, low}, + {in_B, out_A, off, high, low, high, high}, + {out_B, off, off, high, low, high, high}, + {out_B, in_A, off, high, low, high, high}, + {off, off, in_A, high, high, low, high}, + {off, out_A, in_A, high, low, low, high}, + {off, in_B, in_A, high, high, low, low}, + {off, out_B, in_A, high, low, low, high}, + {out_A, off, in_A, high, low, low, high}, + {out_A, in_B, in_A, high, low, low, high}, + {in_B, off, in_A, high, high, low, low}, + {in_B, out_A, in_A, high, low, low, high}, + {out_B, off, in_A, high, low, low, high}, + {off, off, out_A, low, high, high, high}, + {off, in_A, out_A, low, high, high, low}, + {off, in_B, out_A, low, high, high, low}, + {off, out_B, out_A, low, low, high, high}, + {in_A, off, out_A, low, high, high, low}, + {in_A, in_B, out_A, low, high, high, low}, + {in_A, out_B, out_A, low, low, high, high}, + {in_B, off, out_A, low, high, high, low}, + {in_B, in_A, out_A, low, high, high, low}, + {out_B, off, out_A, low, low, high, high}, + {out_B, in_A, out_A, low, low, high, high}, + {off, off, in_B, high, high, low, high}, + {off, in_A, in_B, high, high, low, low}, + {off, out_A, in_B, high, low, low, high}, + {off, out_B, in_B, high, low, low, high}, + {in_A, off, in_B, high, high, low, low}, + {in_A, out_B, in_B, high, low, low, high}, + {out_A, off, in_B, high, low, low, high}, + {out_B, off, in_B, high, low, low, high}, + {out_B, in_A, in_B, high, low, low, high}, + {off, off, out_B, low, high, high, high}, + {off, in_A, out_B, low, high, high, low}, + {off, out_A, out_B, low, low, high, high}, + {off, in_B, out_B, low, high, high, low}, + {in_A, off, out_B, low, high, high, low}, + {in_A, in_B, out_B, low, high, high, low}, + {out_A, off, out_B, low, low, high, high}, + {out_A, in_B, out_B, low, low, high, high}, + {in_B, off, out_B, low, high, high, low}, + {in_B, in_A, out_B, low, high, high, low}, + {in_B, out_A, out_B, low, low, high, high}, + {end, end, end, led_end, led_end, led_end, led_end} +}; + +static int i40e_ptp_set_pins(struct i40e_pf *pf, + struct i40e_ptp_pins_settings *pins); + +/** + * i40e_ptp_extts0_work - workqueue task function + * @work: workqueue task structure + * + * Service for PTP external clock event + **/ +static void i40e_ptp_extts0_work(struct work_struct *work) +{ + struct i40e_pf *pf = container_of(work, struct i40e_pf, + ptp_extts0_work); + struct i40e_hw *hw = &pf->hw; + struct ptp_clock_event event; + u32 hi, lo; + + /* Event time is captured by one of the two matched registers + * PRTTSYN_EVNT_L: 32 LSB of sampled time event + * PRTTSYN_EVNT_H: 32 MSB of sampled time event + * Event is defined in PRTTSYN_EVNT_0 register + */ + lo = rd32(hw, I40E_PRTTSYN_EVNT_L(0)); + hi = rd32(hw, I40E_PRTTSYN_EVNT_H(0)); + + event.timestamp = (((u64)hi) << 32) | lo; + + event.type = PTP_CLOCK_EXTTS; + event.index = hw->pf_id; + + /* fire event */ + ptp_clock_event(pf->ptp_clock, &event); +} + +/** + * i40e_is_ptp_pin_dev - check if device supports PTP pins + * @hw: pointer to the hardware structure + * + * Return true if device supports PTP pins, false otherwise. + **/ +static bool i40e_is_ptp_pin_dev(struct i40e_hw *hw) +{ + return hw->device_id == I40E_DEV_ID_25G_SFP28 && + hw->subsystem_device_id == I40E_SUBDEV_ID_25G_PTP_PIN; +} + +/** + * i40e_can_set_pins - check possibility of manipulating the pins + * @pf: board private structure + * + * Check if all conditions are satisfied to manipulate PTP pins. + * Return CAN_SET_PINS if pins can be set on a specific PF or + * return CAN_DO_PINS if pins can be manipulated within a NIC or + * return CANT_DO_PINS otherwise. + **/ +static enum i40e_can_set_pins_t i40e_can_set_pins(struct i40e_pf *pf) +{ + if (!i40e_is_ptp_pin_dev(&pf->hw)) { + dev_warn(&pf->pdev->dev, + "PTP external clock not supported.\n"); + return CANT_DO_PINS; + } + + if (!pf->ptp_pins) { + dev_warn(&pf->pdev->dev, + "PTP PIN manipulation not allowed.\n"); + return CANT_DO_PINS; + } + + if (pf->hw.pf_id) { + dev_warn(&pf->pdev->dev, + "PTP PINs should be accessed via PF0.\n"); + return CAN_DO_PINS; + } + + return CAN_SET_PINS; +} + +/** + * i40_ptp_reset_timing_events - Reset PTP timing events + * @pf: Board private structure + * + * This function resets timing events for pf. + **/ +static void i40_ptp_reset_timing_events(struct i40e_pf *pf) +{ + u32 i; + + spin_lock_bh(&pf->ptp_rx_lock); + for (i = 0; i <= I40E_PRTTSYN_RXTIME_L_MAX_INDEX; i++) { + /* reading and automatically clearing timing events registers */ + rd32(&pf->hw, I40E_PRTTSYN_RXTIME_L(i)); + rd32(&pf->hw, I40E_PRTTSYN_RXTIME_H(i)); + pf->latch_events[i] = 0; + } + /* reading and automatically clearing timing events registers */ + rd32(&pf->hw, I40E_PRTTSYN_TXTIME_L); + rd32(&pf->hw, I40E_PRTTSYN_TXTIME_H); + + pf->tx_hwtstamp_timeouts = 0; + pf->tx_hwtstamp_skipped = 0; + pf->rx_hwtstamp_cleared = 0; + pf->latch_event_flags = 0; + spin_unlock_bh(&pf->ptp_rx_lock); +} + +/** + * i40e_ptp_verify - check pins + * @ptp: ptp clock + * @pin: pin index + * @func: assigned function + * @chan: channel + * + * Check pins consistency. + * Return 0 on success or error on failure. + **/ +static int i40e_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin, + enum ptp_pin_function func, unsigned int chan) +{ + switch (func) { + case PTP_PF_NONE: + case PTP_PF_EXTTS: + case PTP_PF_PEROUT: + break; + case PTP_PF_PHYSYNC: + return -EOPNOTSUPP; + } + return 0; +} /** * i40e_ptp_read - Read the PHC time from the device @@ -137,6 +380,37 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) } /** + * i40e_ptp_set_1pps_signal_hw - configure 1PPS PTP signal for pins + * @pf: the PF private data structure + * + * Configure 1PPS signal used for PTP pins + **/ +static void i40e_ptp_set_1pps_signal_hw(struct i40e_pf *pf) +{ + struct i40e_hw *hw = &pf->hw; + struct timespec64 now; + u64 ns; + + wr32(hw, I40E_PRTTSYN_AUX_0(1), 0); + wr32(hw, I40E_PRTTSYN_AUX_1(1), I40E_PRTTSYN_AUX_1_INSTNT); + wr32(hw, I40E_PRTTSYN_AUX_0(1), I40E_PRTTSYN_AUX_0_OUT_ENABLE); + + i40e_ptp_read(pf, &now, NULL); + now.tv_sec += I40E_PTP_2_SEC_DELAY; + now.tv_nsec = 0; + ns = timespec64_to_ns(&now); + + /* I40E_PRTTSYN_TGT_L(1) */ + wr32(hw, I40E_PRTTSYN_TGT_L(1), ns & 0xFFFFFFFF); + /* I40E_PRTTSYN_TGT_H(1) */ + wr32(hw, I40E_PRTTSYN_TGT_H(1), ns >> 32); + wr32(hw, I40E_PRTTSYN_CLKO(1), I40E_PTP_HALF_SECOND); + wr32(hw, I40E_PRTTSYN_AUX_1(1), I40E_PRTTSYN_AUX_1_INSTNT); + wr32(hw, I40E_PRTTSYN_AUX_0(1), + I40E_PRTTSYN_AUX_0_OUT_ENABLE_CLK_MOD); +} + +/** * i40e_ptp_adjtime - Adjust the PHC time * @ptp: The PTP clock structure * @delta: Offset in nanoseconds to adjust the PHC time by @@ -146,14 +420,35 @@ static int i40e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) { struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); - struct timespec64 now, then; + struct i40e_hw *hw = &pf->hw; - then = ns_to_timespec64(delta); mutex_lock(&pf->tmreg_lock); - i40e_ptp_read(pf, &now, NULL); - now = timespec64_add(now, then); - i40e_ptp_write(pf, (const struct timespec64 *)&now); + if (delta > -999999900LL && delta < 999999900LL) { + int neg_adj = 0; + u32 timadj; + u64 tohw; + + if (delta < 0) { + neg_adj = 1; + tohw = -delta; + } else { + tohw = delta; + } + + timadj = tohw & 0x3FFFFFFF; + if (neg_adj) + timadj |= I40E_ISGN; + wr32(hw, I40E_PRTTSYN_ADJ, timadj); + } else { + struct timespec64 then, now; + + then = ns_to_timespec64(delta); + i40e_ptp_read(pf, &now, NULL); + now = timespec64_add(now, then); + i40e_ptp_write(pf, (const struct timespec64 *)&now); + i40e_ptp_set_1pps_signal_hw(pf); + } mutex_unlock(&pf->tmreg_lock); @@ -184,7 +479,7 @@ static int i40e_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, /** * i40e_ptp_settime - Set the time of the PHC * @ptp: The PTP clock structure - * @ts: timespec structure that holds the new time value + * @ts: timespec64 structure that holds the new time value * * Set the device clock to the user input value. The conversion from timespec * to ns happens in the write function. @@ -202,18 +497,145 @@ static int i40e_ptp_settime(struct ptp_clock_info *ptp, } /** - * i40e_ptp_feature_enable - Enable/disable ancillary features of the PHC subsystem + * i40e_pps_configure - configure PPS events + * @ptp: ptp clock + * @rq: clock request + * @on: status + * + * Configure PPS events for external clock source. + * Return 0 on success or error on failure. + **/ +static int i40e_pps_configure(struct ptp_clock_info *ptp, + struct ptp_clock_request *rq, + int on) +{ + struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); + + if (!!on) + i40e_ptp_set_1pps_signal_hw(pf); + + return 0; +} + +/** + * i40e_pin_state - determine PIN state + * @index: PIN index + * @func: function assigned to PIN + * + * Determine PIN state based on PIN index and function assigned. + * Return PIN state. + **/ +static enum i40e_ptp_gpio_pin_state i40e_pin_state(int index, int func) +{ + enum i40e_ptp_gpio_pin_state state = off; + + if (index == 0 && func == PTP_PF_EXTTS) + state = in_A; + if (index == 1 && func == PTP_PF_EXTTS) + state = in_B; + if (index == 0 && func == PTP_PF_PEROUT) + state = out_A; + if (index == 1 && func == PTP_PF_PEROUT) + state = out_B; + + return state; +} + +/** + * i40e_ptp_enable_pin - enable PINs. + * @pf: private board structure + * @chan: channel + * @func: PIN function + * @on: state + * + * Enable PTP pins for external clock source. + * Return 0 on success or error code on failure. + **/ +static int i40e_ptp_enable_pin(struct i40e_pf *pf, unsigned int chan, + enum ptp_pin_function func, int on) +{ + enum i40e_ptp_gpio_pin_state *pin = NULL; + struct i40e_ptp_pins_settings pins; + int pin_index; + + /* Use PF0 to set pins. Return success for user space tools */ + if (pf->hw.pf_id) + return 0; + + /* Preserve previous state of pins that we don't touch */ + pins.sdp3_2 = pf->ptp_pins->sdp3_2; + pins.sdp3_3 = pf->ptp_pins->sdp3_3; + pins.gpio_4 = pf->ptp_pins->gpio_4; + + /* To turn on the pin - find the corresponding one based on + * the given index. To to turn the function off - find + * which pin had it assigned. Don't use ptp_find_pin here + * because it tries to lock the pincfg_mux which is locked by + * ptp_pin_store() that calls here. + */ + if (on) { + pin_index = ptp_find_pin(pf->ptp_clock, func, chan); + if (pin_index < 0) + return -EBUSY; + + switch (pin_index) { + case SDP3_2: + pin = &pins.sdp3_2; + break; + case SDP3_3: + pin = &pins.sdp3_3; + break; + case GPIO_4: + pin = &pins.gpio_4; + break; + default: + return -EINVAL; + } + + *pin = i40e_pin_state(chan, func); + } else { + pins.sdp3_2 = off; + pins.sdp3_3 = off; + pins.gpio_4 = off; + } + + return i40e_ptp_set_pins(pf, &pins) ? -EINVAL : 0; +} + +/** + * i40e_ptp_feature_enable - Enable external clock pins * @ptp: The PTP clock structure - * @rq: The requested feature to change - * @on: Enable/disable flag + * @rq: The PTP clock request structure + * @on: To turn feature on/off * - * The XL710 does not support any of the ancillary features of the PHC - * subsystem, so this function may just return. + * Setting on/off PTP PPS feature for pin. **/ static int i40e_ptp_feature_enable(struct ptp_clock_info *ptp, - struct ptp_clock_request *rq, int on) + struct ptp_clock_request *rq, + int on) { - return -EOPNOTSUPP; + struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); + + enum ptp_pin_function func; + unsigned int chan; + + /* TODO: Implement flags handling for EXTTS and PEROUT */ + switch (rq->type) { + case PTP_CLK_REQ_EXTTS: + func = PTP_PF_EXTTS; + chan = rq->extts.index; + break; + case PTP_CLK_REQ_PEROUT: + func = PTP_PF_PEROUT; + chan = rq->perout.index; + break; + case PTP_CLK_REQ_PPS: + return i40e_pps_configure(ptp, rq, on); + default: + return -EOPNOTSUPP; + } + + return i40e_ptp_enable_pin(pf, chan, func, on); } /** @@ -528,6 +950,229 @@ int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr) } /** + * i40e_ptp_free_pins - free memory used by PTP pins + * @pf: Board private structure + * + * Release memory allocated for PTP pins. + **/ +static void i40e_ptp_free_pins(struct i40e_pf *pf) +{ + if (i40e_is_ptp_pin_dev(&pf->hw)) { + kfree(pf->ptp_pins); + kfree(pf->ptp_caps.pin_config); + pf->ptp_pins = NULL; + } +} + +/** + * i40e_ptp_set_pin_hw - Set HW GPIO pin + * @hw: pointer to the hardware structure + * @pin: pin index + * @state: pin state + * + * Set status of GPIO pin for external clock handling. + **/ +static void i40e_ptp_set_pin_hw(struct i40e_hw *hw, + unsigned int pin, + enum i40e_ptp_gpio_pin_state state) +{ + switch (state) { + case off: + wr32(hw, I40E_GLGEN_GPIO_CTL(pin), 0); + break; + case in_A: + wr32(hw, I40E_GLGEN_GPIO_CTL(pin), + I40E_GLGEN_GPIO_CTL_PORT_0_IN_TIMESYNC_0); + break; + case in_B: + wr32(hw, I40E_GLGEN_GPIO_CTL(pin), + I40E_GLGEN_GPIO_CTL_PORT_1_IN_TIMESYNC_0); + break; + case out_A: + wr32(hw, I40E_GLGEN_GPIO_CTL(pin), + I40E_GLGEN_GPIO_CTL_PORT_0_OUT_TIMESYNC_1); + break; + case out_B: + wr32(hw, I40E_GLGEN_GPIO_CTL(pin), + I40E_GLGEN_GPIO_CTL_PORT_1_OUT_TIMESYNC_1); + break; + default: + break; + } +} + +/** + * i40e_ptp_set_led_hw - Set HW GPIO led + * @hw: pointer to the hardware structure + * @led: led index + * @state: led state + * + * Set status of GPIO led for external clock handling. + **/ +static void i40e_ptp_set_led_hw(struct i40e_hw *hw, + unsigned int led, + enum i40e_ptp_led_pin_state state) +{ + switch (state) { + case low: + wr32(hw, I40E_GLGEN_GPIO_SET, + I40E_GLGEN_GPIO_SET_DRV_SDP_DATA | led); + break; + case high: + wr32(hw, I40E_GLGEN_GPIO_SET, + I40E_GLGEN_GPIO_SET_DRV_SDP_DATA | + I40E_GLGEN_GPIO_SET_SDP_DATA_HI | led); + break; + default: + break; + } +} + +/** + * i40e_ptp_init_leds_hw - init LEDs + * @hw: pointer to a hardware structure + * + * Set initial state of LEDs + **/ +static void i40e_ptp_init_leds_hw(struct i40e_hw *hw) +{ + wr32(hw, I40E_GLGEN_GPIO_CTL(I40E_LED2_0), + I40E_GLGEN_GPIO_CTL_LED_INIT); + wr32(hw, I40E_GLGEN_GPIO_CTL(I40E_LED2_1), + I40E_GLGEN_GPIO_CTL_LED_INIT); + wr32(hw, I40E_GLGEN_GPIO_CTL(I40E_LED3_0), + I40E_GLGEN_GPIO_CTL_LED_INIT); + wr32(hw, I40E_GLGEN_GPIO_CTL(I40E_LED3_1), + I40E_GLGEN_GPIO_CTL_LED_INIT); +} + +/** + * i40e_ptp_set_pins_hw - Set HW GPIO pins + * @pf: Board private structure + * + * This function sets GPIO pins for PTP + **/ +static void i40e_ptp_set_pins_hw(struct i40e_pf *pf) +{ + const struct i40e_ptp_pins_settings *pins = pf->ptp_pins; + struct i40e_hw *hw = &pf->hw; + + /* pin must be disabled before it may be used */ + i40e_ptp_set_pin_hw(hw, I40E_SDP3_2, off); + i40e_ptp_set_pin_hw(hw, I40E_SDP3_3, off); + i40e_ptp_set_pin_hw(hw, I40E_GPIO_4, off); + + i40e_ptp_set_pin_hw(hw, I40E_SDP3_2, pins->sdp3_2); + i40e_ptp_set_pin_hw(hw, I40E_SDP3_3, pins->sdp3_3); + i40e_ptp_set_pin_hw(hw, I40E_GPIO_4, pins->gpio_4); + + i40e_ptp_set_led_hw(hw, I40E_LED2_0, pins->led2_0); + i40e_ptp_set_led_hw(hw, I40E_LED2_1, pins->led2_1); + i40e_ptp_set_led_hw(hw, I40E_LED3_0, pins->led3_0); + i40e_ptp_set_led_hw(hw, I40E_LED3_1, pins->led3_1); + + dev_info(&pf->pdev->dev, + "PTP configuration set to: SDP3_2: %s, SDP3_3: %s, GPIO_4: %s.\n", + i40e_ptp_gpio_pin_state2str[pins->sdp3_2], + i40e_ptp_gpio_pin_state2str[pins->sdp3_3], + i40e_ptp_gpio_pin_state2str[pins->gpio_4]); +} + +/** + * i40e_ptp_set_pins - set PTP pins in HW + * @pf: Board private structure + * @pins: PTP pins to be applied + * + * Validate and set PTP pins in HW for specific PF. + * Return 0 on success or negative value on error. + **/ +static int i40e_ptp_set_pins(struct i40e_pf *pf, + struct i40e_ptp_pins_settings *pins) +{ + enum i40e_can_set_pins_t pin_caps = i40e_can_set_pins(pf); + int i = 0; + + if (pin_caps == CANT_DO_PINS) + return -EOPNOTSUPP; + else if (pin_caps == CAN_DO_PINS) + return 0; + + if (pins->sdp3_2 == invalid) + pins->sdp3_2 = pf->ptp_pins->sdp3_2; + if (pins->sdp3_3 == invalid) + pins->sdp3_3 = pf->ptp_pins->sdp3_3; + if (pins->gpio_4 == invalid) + pins->gpio_4 = pf->ptp_pins->gpio_4; + while (i40e_ptp_pin_led_allowed_states[i].sdp3_2 != end) { + if (pins->sdp3_2 == i40e_ptp_pin_led_allowed_states[i].sdp3_2 && + pins->sdp3_3 == i40e_ptp_pin_led_allowed_states[i].sdp3_3 && + pins->gpio_4 == i40e_ptp_pin_led_allowed_states[i].gpio_4) { + pins->led2_0 = + i40e_ptp_pin_led_allowed_states[i].led2_0; + pins->led2_1 = + i40e_ptp_pin_led_allowed_states[i].led2_1; + pins->led3_0 = + i40e_ptp_pin_led_allowed_states[i].led3_0; + pins->led3_1 = + i40e_ptp_pin_led_allowed_states[i].led3_1; + break; + } + i++; + } + if (i40e_ptp_pin_led_allowed_states[i].sdp3_2 == end) { + dev_warn(&pf->pdev->dev, + "Unsupported PTP pin configuration: SDP3_2: %s, SDP3_3: %s, GPIO_4: %s.\n", + i40e_ptp_gpio_pin_state2str[pins->sdp3_2], + i40e_ptp_gpio_pin_state2str[pins->sdp3_3], + i40e_ptp_gpio_pin_state2str[pins->gpio_4]); + + return -EPERM; + } + memcpy(pf->ptp_pins, pins, sizeof(*pins)); + i40e_ptp_set_pins_hw(pf); + i40_ptp_reset_timing_events(pf); + + return 0; +} + +/** + * i40e_ptp_alloc_pins - allocate PTP pins structure + * @pf: Board private structure + * + * allocate PTP pins structure + **/ +int i40e_ptp_alloc_pins(struct i40e_pf *pf) +{ + if (!i40e_is_ptp_pin_dev(&pf->hw)) + return 0; + + pf->ptp_pins = + kzalloc(sizeof(struct i40e_ptp_pins_settings), GFP_KERNEL); + + if (!pf->ptp_pins) { + dev_warn(&pf->pdev->dev, "Cannot allocate memory for PTP pins structure.\n"); + return -I40E_ERR_NO_MEMORY; + } + + pf->ptp_pins->sdp3_2 = off; + pf->ptp_pins->sdp3_3 = off; + pf->ptp_pins->gpio_4 = off; + pf->ptp_pins->led2_0 = high; + pf->ptp_pins->led2_1 = high; + pf->ptp_pins->led3_0 = high; + pf->ptp_pins->led3_1 = high; + + /* Use PF0 to set pins in HW. Return success for user space tools */ + if (pf->hw.pf_id) + return 0; + + i40e_ptp_init_leds_hw(&pf->hw); + i40e_ptp_set_pins_hw(pf); + + return 0; +} + +/** * i40e_ptp_set_timestamp_mode - setup hardware for requested timestamp mode * @pf: Board private structure * @config: hwtstamp settings requested or saved @@ -545,6 +1190,21 @@ static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf, struct i40e_hw *hw = &pf->hw; u32 tsyntype, regval; + /* Selects external trigger to cause event */ + regval = rd32(hw, I40E_PRTTSYN_AUX_0(0)); + /* Bit 17:16 is EVNTLVL, 01B rising edge */ + regval &= 0; + regval |= (1 << I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT); + /* regval: 0001 0000 0000 0000 0000 */ + wr32(hw, I40E_PRTTSYN_AUX_0(0), regval); + + /* Enabel interrupts */ + regval = rd32(hw, I40E_PRTTSYN_CTL0); + regval |= 1 << I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT; + wr32(hw, I40E_PRTTSYN_CTL0, regval); + + INIT_WORK(&pf->ptp_extts0_work, i40e_ptp_extts0_work); + /* Reserved for future extensions. */ if (config->flags) return -EINVAL; @@ -688,6 +1348,45 @@ int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr) } /** + * i40e_init_pin_config - initialize pins. + * @pf: private board structure + * + * Initialize pins for external clock source. + * Return 0 on success or error code on failure. + **/ +static int i40e_init_pin_config(struct i40e_pf *pf) +{ + int i; + + pf->ptp_caps.n_pins = 3; + pf->ptp_caps.n_ext_ts = 2; + pf->ptp_caps.pps = 1; + pf->ptp_caps.n_per_out = 2; + + pf->ptp_caps.pin_config = kcalloc(pf->ptp_caps.n_pins, + sizeof(*pf->ptp_caps.pin_config), + GFP_KERNEL); + if (!pf->ptp_caps.pin_config) + return -ENOMEM; + + for (i = 0; i < pf->ptp_caps.n_pins; i++) { + snprintf(pf->ptp_caps.pin_config[i].name, + sizeof(pf->ptp_caps.pin_config[i].name), + "%s", sdp_desc[i].name); + pf->ptp_caps.pin_config[i].index = sdp_desc[i].index; + pf->ptp_caps.pin_config[i].func = PTP_PF_NONE; + pf->ptp_caps.pin_config[i].chan = sdp_desc[i].chan; + } + + pf->ptp_caps.verify = i40e_ptp_verify; + pf->ptp_caps.enable = i40e_ptp_feature_enable; + + pf->ptp_caps.pps = 1; + + return 0; +} + +/** * i40e_ptp_create_clock - Create PTP clock device for userspace * @pf: Board private structure * @@ -707,13 +1406,16 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf) sizeof(pf->ptp_caps.name) - 1); pf->ptp_caps.owner = THIS_MODULE; pf->ptp_caps.max_adj = 999999999; - pf->ptp_caps.n_ext_ts = 0; - pf->ptp_caps.pps = 0; pf->ptp_caps.adjfreq = i40e_ptp_adjfreq; pf->ptp_caps.adjtime = i40e_ptp_adjtime; pf->ptp_caps.gettimex64 = i40e_ptp_gettimex; pf->ptp_caps.settime64 = i40e_ptp_settime; - pf->ptp_caps.enable = i40e_ptp_feature_enable; + if (i40e_is_ptp_pin_dev(&pf->hw)) { + int err = i40e_init_pin_config(pf); + + if (err) + return err; + } /* Attempt to register the clock before enabling the hardware. */ pf->ptp_clock = ptp_clock_register(&pf->ptp_caps, &pf->pdev->dev); @@ -843,6 +1545,8 @@ void i40e_ptp_init(struct i40e_pf *pf) /* Restore the clock time based on last known value */ i40e_ptp_restore_hw_time(pf); } + + i40e_ptp_set_1pps_signal_hw(pf); } /** @@ -854,6 +1558,9 @@ void i40e_ptp_init(struct i40e_pf *pf) **/ void i40e_ptp_stop(struct i40e_pf *pf) { + struct i40e_hw *hw = &pf->hw; + u32 regval; + pf->flags &= ~I40E_FLAG_PTP; pf->ptp_tx = false; pf->ptp_rx = false; @@ -872,4 +1579,21 @@ void i40e_ptp_stop(struct i40e_pf *pf) dev_info(&pf->pdev->dev, "%s: removed PHC on %s\n", __func__, pf->vsi[pf->lan_vsi]->netdev->name); } + + if (i40e_is_ptp_pin_dev(&pf->hw)) { + i40e_ptp_set_pin_hw(hw, I40E_SDP3_2, off); + i40e_ptp_set_pin_hw(hw, I40E_SDP3_3, off); + i40e_ptp_set_pin_hw(hw, I40E_GPIO_4, off); + } + + regval = rd32(hw, I40E_PRTTSYN_AUX_0(0)); + regval &= ~I40E_PRTTSYN_AUX_0_PTPFLAG_MASK; + wr32(hw, I40E_PRTTSYN_AUX_0(0), regval); + + /* Disable interrupts */ + regval = rd32(hw, I40E_PRTTSYN_CTL0); + regval &= ~I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK; + wr32(hw, I40E_PRTTSYN_CTL0, regval); + + i40e_ptp_free_pins(pf); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_register.h b/drivers/net/ethernet/intel/i40e/i40e_register.h index 36f7b27a04ae..8d0588a27a05 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_register.h +++ b/drivers/net/ethernet/intel/i40e/i40e_register.h @@ -182,11 +182,20 @@ #define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT) #define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3 #define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4 +#define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5 +#define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6 #define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7 #define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK I40E_MASK(0x7, I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT) #define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11 #define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12 #define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT) +#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19 +#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT) +#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20 +#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */ +#define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5 +#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6 #define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ #define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */ #define I40E_GLGEN_MSCA_MDIADD_SHIFT 0 @@ -540,6 +549,7 @@ #define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12 #define I40E_PF_PCI_CIAD 0x0009C100 /* Reset: FLR */ #define I40E_PRTPM_EEE_STAT 0x001E4320 /* Reset: GLOBR */ +#define I40E_PFPCI_SUBSYSID 0x000BE100 /* Reset: PCIR */ #define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30 #define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT) #define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31 @@ -742,6 +752,8 @@ #define I40E_PRTTSYN_CTL0 0x001E4200 /* Reset: GLOBR */ #define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1 #define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT) +#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2 +#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT) #define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8 #define I40E_PRTTSYN_CTL0_PF_ID_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL0_PF_ID_SHIFT) #define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31 @@ -760,7 +772,10 @@ #define I40E_PRTTSYN_INC_L 0x001E4040 /* Reset: GLOBR */ #define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */ #define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */ +#define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3 #define I40E_PRTTSYN_STAT_0 0x001E4220 /* Reset: GLOBR */ +#define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0 +#define I40E_PRTTSYN_STAT_0_EVENT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT0_SHIFT) #define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4 #define I40E_PRTTSYN_STAT_0_TXTIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TXTIME_SHIFT) #define I40E_PRTTSYN_STAT_1 0x00085140 /* Reset: CORER */ @@ -768,6 +783,20 @@ #define I40E_PRTTSYN_TIME_L 0x001E4100 /* Reset: GLOBR */ #define I40E_PRTTSYN_TXTIME_H 0x001E41E0 /* Reset: GLOBR */ #define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */ +#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0 +#define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1 +#define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16 +#define I40E_PRTTSYN_AUX_0_PTPFLAG_SHIFT 17 +#define I40E_PRTTSYN_AUX_0_PTPFLAG_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_PTPFLAG_SHIFT) +#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0 +#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */ +#define I40E_PRTTSYN_ADJ 0x001E4280 /* Reset: GLOBR */ #define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */ #define I40E_GL_MDET_RX_FUNCTION_SHIFT 0 #define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT) diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index eff0a30790dd..472f56b360b8 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1160,12 +1160,12 @@ static int i40e_quiesce_vf_pci(struct i40e_vf *vf) } /** - * i40e_getnum_vf_vsi_vlan_filters + * __i40e_getnum_vf_vsi_vlan_filters * @vsi: pointer to the vsi * * called to get the number of VLANs offloaded on this VF **/ -static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) +static int __i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) { struct i40e_mac_filter *f; u16 num_vlans = 0, bkt; @@ -1179,6 +1179,23 @@ static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) } /** + * i40e_getnum_vf_vsi_vlan_filters + * @vsi: pointer to the vsi + * + * wrapper for __i40e_getnum_vf_vsi_vlan_filters() with spinlock held + **/ +static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) +{ + int num_vlans; + + spin_lock_bh(&vsi->mac_filter_hash_lock); + num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi); + spin_unlock_bh(&vsi->mac_filter_hash_lock); + + return num_vlans; +} + +/** * i40e_get_vlan_list_sync * @vsi: pointer to the VSI * @num_vlans: number of VLANs in mac_filter_hash, returned to caller @@ -1195,7 +1212,7 @@ static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, u16 *num_vlans, int bkt; spin_lock_bh(&vsi->mac_filter_hash_lock); - *num_vlans = i40e_getnum_vf_vsi_vlan_filters(vsi); + *num_vlans = __i40e_getnum_vf_vsi_vlan_filters(vsi); *vlan_list = kcalloc(*num_vlans, sizeof(**vlan_list), GFP_ATOMIC); if (!(*vlan_list)) goto err; diff --git a/drivers/net/ethernet/intel/iavf/iavf.h b/drivers/net/ethernet/intel/iavf/iavf.h index 90793b36126e..68c80f04113c 100644 --- a/drivers/net/ethernet/intel/iavf/iavf.h +++ b/drivers/net/ethernet/intel/iavf/iavf.h @@ -186,12 +186,6 @@ enum iavf_state_t { __IAVF_RUNNING, /* opened, working */ }; -enum iavf_critical_section_t { - __IAVF_IN_CRITICAL_TASK, /* cannot be interrupted */ - __IAVF_IN_CLIENT_TASK, - __IAVF_IN_REMOVE_TASK, /* device being removed */ -}; - #define IAVF_CLOUD_FIELD_OMAC 0x01 #define IAVF_CLOUD_FIELD_IMAC 0x02 #define IAVF_CLOUD_FIELD_IVLAN 0x04 @@ -236,6 +230,9 @@ struct iavf_adapter { struct iavf_q_vector *q_vectors; struct list_head vlan_filter_list; struct list_head mac_filter_list; + struct mutex crit_lock; + struct mutex client_lock; + struct mutex remove_lock; /* Lock to protect accesses to MAC and VLAN lists */ spinlock_t mac_vlan_list_lock; char misc_vector_name[IFNAMSIZ + 9]; diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index af43fbd8cb75..5a359a0a20ec 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -685,6 +685,8 @@ static int __iavf_get_coalesce(struct net_device *netdev, * iavf_get_coalesce - Get interrupt coalescing settings * @netdev: network interface device structure * @ec: ethtool coalesce structure + * @kernel_coal: ethtool CQE mode setting structure + * @extack: extack for reporting error messages * * Returns current coalescing settings. This is referred to elsewhere in the * driver as Interrupt Throttle Rate, as this is how the hardware describes @@ -692,7 +694,9 @@ static int __iavf_get_coalesce(struct net_device *netdev, * only represents the settings of queue 0. **/ static int iavf_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { return __iavf_get_coalesce(netdev, ec, -1); } @@ -804,11 +808,15 @@ static int __iavf_set_coalesce(struct net_device *netdev, * iavf_set_coalesce - Set interrupt coalescing settings * @netdev: network interface device structure * @ec: ethtool coalesce structure + * @kernel_coal: ethtool CQE mode setting structure + * @extack: extack for reporting error messages * * Change current coalescing settings for every queue. **/ static int iavf_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { return __iavf_set_coalesce(netdev, ec, -1); } @@ -1352,8 +1360,7 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx if (!fltr) return -ENOMEM; - while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, - &adapter->crit_section)) { + while (!mutex_trylock(&adapter->crit_lock)) { if (--count == 0) { kfree(fltr); return -EINVAL; @@ -1378,7 +1385,7 @@ ret: if (err && fltr) kfree(fltr); - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + mutex_unlock(&adapter->crit_lock); return err; } @@ -1563,8 +1570,7 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter, return -EINVAL; } - while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, - &adapter->crit_section)) { + while (!mutex_trylock(&adapter->crit_lock)) { if (--count == 0) { kfree(rss_new); return -EINVAL; @@ -1600,7 +1606,7 @@ iavf_set_adv_rss_hash_opt(struct iavf_adapter *adapter, if (!err) mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0); - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + mutex_unlock(&adapter->crit_lock); if (!rss_new_add) kfree(rss_new); diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 606a01ce4073..23762a7ef740 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -132,6 +132,27 @@ enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw, } /** + * iavf_lock_timeout - try to lock mutex but give up after timeout + * @lock: mutex that should be locked + * @msecs: timeout in msecs + * + * Returns 0 on success, negative on failure + **/ +static int iavf_lock_timeout(struct mutex *lock, unsigned int msecs) +{ + unsigned int wait, delay = 10; + + for (wait = 0; wait < msecs; wait += delay) { + if (mutex_trylock(lock)) + return 0; + + msleep(delay); + } + + return -1; +} + +/** * iavf_schedule_reset - Set the flags and schedule a reset event * @adapter: board private structure **/ @@ -1916,7 +1937,7 @@ static void iavf_watchdog_task(struct work_struct *work) struct iavf_hw *hw = &adapter->hw; u32 reg_val; - if (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section)) + if (!mutex_trylock(&adapter->crit_lock)) goto restart_watchdog; if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) @@ -1934,8 +1955,7 @@ static void iavf_watchdog_task(struct work_struct *work) adapter->state = __IAVF_STARTUP; adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED; queue_delayed_work(iavf_wq, &adapter->init_task, 10); - clear_bit(__IAVF_IN_CRITICAL_TASK, - &adapter->crit_section); + mutex_unlock(&adapter->crit_lock); /* Don't reschedule the watchdog, since we've restarted * the init task. When init_task contacts the PF and * gets everything set up again, it'll restart the @@ -1945,14 +1965,13 @@ static void iavf_watchdog_task(struct work_struct *work) } adapter->aq_required = 0; adapter->current_op = VIRTCHNL_OP_UNKNOWN; - clear_bit(__IAVF_IN_CRITICAL_TASK, - &adapter->crit_section); + mutex_unlock(&adapter->crit_lock); queue_delayed_work(iavf_wq, &adapter->watchdog_task, msecs_to_jiffies(10)); goto watchdog_done; case __IAVF_RESETTING: - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + mutex_unlock(&adapter->crit_lock); queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2); return; case __IAVF_DOWN: @@ -1975,7 +1994,7 @@ static void iavf_watchdog_task(struct work_struct *work) } break; case __IAVF_REMOVE: - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + mutex_unlock(&adapter->crit_lock); return; default: goto restart_watchdog; @@ -1984,7 +2003,6 @@ static void iavf_watchdog_task(struct work_struct *work) /* check for hw reset */ reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK; if (!reg_val) { - adapter->state = __IAVF_RESETTING; adapter->flags |= IAVF_FLAG_RESET_PENDING; adapter->aq_required = 0; adapter->current_op = VIRTCHNL_OP_UNKNOWN; @@ -1998,7 +2016,7 @@ watchdog_done: if (adapter->state == __IAVF_RUNNING || adapter->state == __IAVF_COMM_FAILED) iavf_detect_recover_hung(&adapter->vsi); - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + mutex_unlock(&adapter->crit_lock); restart_watchdog: if (adapter->aq_required) queue_delayed_work(iavf_wq, &adapter->watchdog_task, @@ -2062,7 +2080,7 @@ static void iavf_disable_vf(struct iavf_adapter *adapter) memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE); iavf_shutdown_adminq(&adapter->hw); adapter->netdev->flags &= ~IFF_UP; - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + mutex_unlock(&adapter->crit_lock); adapter->flags &= ~IAVF_FLAG_RESET_PENDING; adapter->state = __IAVF_DOWN; wake_up(&adapter->down_waitqueue); @@ -2095,11 +2113,14 @@ static void iavf_reset_task(struct work_struct *work) /* When device is being removed it doesn't make sense to run the reset * task, just return in such a case. */ - if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section)) + if (mutex_is_locked(&adapter->remove_lock)) return; - while (test_and_set_bit(__IAVF_IN_CLIENT_TASK, - &adapter->crit_section)) + if (iavf_lock_timeout(&adapter->crit_lock, 200)) { + schedule_work(&adapter->reset_task); + return; + } + while (!mutex_trylock(&adapter->client_lock)) usleep_range(500, 1000); if (CLIENT_ENABLED(adapter)) { adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN | @@ -2151,7 +2172,7 @@ static void iavf_reset_task(struct work_struct *work) dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n", reg_val); iavf_disable_vf(adapter); - clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); + mutex_unlock(&adapter->client_lock); return; /* Do not attempt to reinit. It's dead, Jim. */ } @@ -2278,13 +2299,13 @@ continue_reset: adapter->state = __IAVF_DOWN; wake_up(&adapter->down_waitqueue); } - clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + mutex_unlock(&adapter->client_lock); + mutex_unlock(&adapter->crit_lock); return; reset_err: - clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + mutex_unlock(&adapter->client_lock); + mutex_unlock(&adapter->crit_lock); dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n"); iavf_close(netdev); } @@ -2312,6 +2333,8 @@ static void iavf_adminq_task(struct work_struct *work) if (!event.msg_buf) goto out; + if (iavf_lock_timeout(&adapter->crit_lock, 200)) + goto freedom; do { ret = iavf_clean_arq_element(hw, &event, &pending); v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high); @@ -2325,6 +2348,7 @@ static void iavf_adminq_task(struct work_struct *work) if (pending != 0) memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE); } while (pending); + mutex_unlock(&adapter->crit_lock); if ((adapter->flags & (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) || @@ -2391,7 +2415,7 @@ static void iavf_client_task(struct work_struct *work) * later. */ - if (test_and_set_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section)) + if (!mutex_trylock(&adapter->client_lock)) return; if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) { @@ -2414,7 +2438,7 @@ static void iavf_client_task(struct work_struct *work) adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN; } out: - clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section); + mutex_unlock(&adapter->client_lock); } /** @@ -3017,8 +3041,7 @@ static int iavf_configure_clsflower(struct iavf_adapter *adapter, if (!filter) return -ENOMEM; - while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, - &adapter->crit_section)) { + while (!mutex_trylock(&adapter->crit_lock)) { if (--count == 0) goto err; udelay(1); @@ -3049,7 +3072,7 @@ err: if (err) kfree(filter); - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + mutex_unlock(&adapter->crit_lock); return err; } @@ -3196,8 +3219,7 @@ static int iavf_open(struct net_device *netdev) return -EIO; } - while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, - &adapter->crit_section)) + while (!mutex_trylock(&adapter->crit_lock)) usleep_range(500, 1000); if (adapter->state != __IAVF_DOWN) { @@ -3232,7 +3254,7 @@ static int iavf_open(struct net_device *netdev) iavf_irq_enable(adapter, true); - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + mutex_unlock(&adapter->crit_lock); return 0; @@ -3244,7 +3266,7 @@ err_setup_rx: err_setup_tx: iavf_free_all_tx_resources(adapter); err_unlock: - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + mutex_unlock(&adapter->crit_lock); return err; } @@ -3268,8 +3290,7 @@ static int iavf_close(struct net_device *netdev) if (adapter->state <= __IAVF_DOWN_PENDING) return 0; - while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, - &adapter->crit_section)) + while (!mutex_trylock(&adapter->crit_lock)) usleep_range(500, 1000); set_bit(__IAVF_VSI_DOWN, adapter->vsi.state); @@ -3280,7 +3301,7 @@ static int iavf_close(struct net_device *netdev) adapter->state = __IAVF_DOWN_PENDING; iavf_free_traffic_irqs(adapter); - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + mutex_unlock(&adapter->crit_lock); /* We explicitly don't free resources here because the hardware is * still active and can DMA into memory. Resources are cleared in @@ -3629,6 +3650,10 @@ static void iavf_init_task(struct work_struct *work) init_task.work); struct iavf_hw *hw = &adapter->hw; + if (iavf_lock_timeout(&adapter->crit_lock, 5000)) { + dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__); + return; + } switch (adapter->state) { case __IAVF_STARTUP: if (iavf_startup(adapter) < 0) @@ -3641,14 +3666,14 @@ static void iavf_init_task(struct work_struct *work) case __IAVF_INIT_GET_RESOURCES: if (iavf_init_get_resources(adapter) < 0) goto init_failed; - return; + goto out; default: goto init_failed; } queue_delayed_work(iavf_wq, &adapter->init_task, msecs_to_jiffies(30)); - return; + goto out; init_failed: if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) { dev_err(&adapter->pdev->dev, @@ -3657,9 +3682,11 @@ init_failed: iavf_shutdown_adminq(hw); adapter->state = __IAVF_STARTUP; queue_delayed_work(iavf_wq, &adapter->init_task, HZ * 5); - return; + goto out; } queue_delayed_work(iavf_wq, &adapter->init_task, HZ); +out: + mutex_unlock(&adapter->crit_lock); } /** @@ -3676,9 +3703,12 @@ static void iavf_shutdown(struct pci_dev *pdev) if (netif_running(netdev)) iavf_close(netdev); + if (iavf_lock_timeout(&adapter->crit_lock, 5000)) + dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__); /* Prevent the watchdog from running. */ adapter->state = __IAVF_REMOVE; adapter->aq_required = 0; + mutex_unlock(&adapter->crit_lock); #ifdef CONFIG_PM pci_save_state(pdev); @@ -3772,6 +3802,9 @@ static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) /* set up the locks for the AQ, do this only once in probe * and destroy them only once in remove */ + mutex_init(&adapter->crit_lock); + mutex_init(&adapter->client_lock); + mutex_init(&adapter->remove_lock); mutex_init(&hw->aq.asq_mutex); mutex_init(&hw->aq.arq_mutex); @@ -3823,8 +3856,7 @@ static int __maybe_unused iavf_suspend(struct device *dev_d) netif_device_detach(netdev); - while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, - &adapter->crit_section)) + while (!mutex_trylock(&adapter->crit_lock)) usleep_range(500, 1000); if (netif_running(netdev)) { @@ -3835,7 +3867,7 @@ static int __maybe_unused iavf_suspend(struct device *dev_d) iavf_free_misc_irq(adapter); iavf_reset_interrupt_capability(adapter); - clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section); + mutex_unlock(&adapter->crit_lock); return 0; } @@ -3897,7 +3929,7 @@ static void iavf_remove(struct pci_dev *pdev) struct iavf_hw *hw = &adapter->hw; int err; /* Indicate we are in remove and not to run reset_task */ - set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section); + mutex_lock(&adapter->remove_lock); cancel_delayed_work_sync(&adapter->init_task); cancel_work_sync(&adapter->reset_task); cancel_delayed_work_sync(&adapter->client_task); @@ -3912,10 +3944,6 @@ static void iavf_remove(struct pci_dev *pdev) err); } - /* Shut down all the garbage mashers on the detention level */ - adapter->state = __IAVF_REMOVE; - adapter->aq_required = 0; - adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; iavf_request_reset(adapter); msleep(50); /* If the FW isn't responding, kick it once, but only once. */ @@ -3923,6 +3951,13 @@ static void iavf_remove(struct pci_dev *pdev) iavf_request_reset(adapter); msleep(50); } + if (iavf_lock_timeout(&adapter->crit_lock, 5000)) + dev_warn(&adapter->pdev->dev, "failed to acquire crit_lock in %s\n", __FUNCTION__); + + /* Shut down all the garbage mashers on the detention level */ + adapter->state = __IAVF_REMOVE; + adapter->aq_required = 0; + adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED; iavf_free_all_tx_resources(adapter); iavf_free_all_rx_resources(adapter); iavf_misc_irq_disable(adapter); @@ -3942,6 +3977,11 @@ static void iavf_remove(struct pci_dev *pdev) /* destroy the locks only once, here */ mutex_destroy(&hw->aq.arq_mutex); mutex_destroy(&hw->aq.asq_mutex); + mutex_destroy(&adapter->client_lock); + mutex_unlock(&adapter->crit_lock); + mutex_destroy(&adapter->crit_lock); + mutex_unlock(&adapter->remove_lock); + mutex_destroy(&adapter->remove_lock); iounmap(hw->hw_addr); pci_release_regions(pdev); diff --git a/drivers/net/ethernet/intel/ice/ice_devlink.c b/drivers/net/ethernet/intel/ice/ice_devlink.c index 91b545ab8b8f..14afce82ef63 100644 --- a/drivers/net/ethernet/intel/ice/ice_devlink.c +++ b/drivers/net/ethernet/intel/ice/ice_devlink.c @@ -42,7 +42,9 @@ static int ice_info_pba(struct ice_pf *pf, struct ice_info_ctx *ctx) status = ice_read_pba_string(hw, (u8 *)ctx->buf, sizeof(ctx->buf)); if (status) - return -EIO; + /* We failed to locate the PBA, so just skip this entry */ + dev_dbg(ice_pf_to_dev(pf), "Failed to read Product Board Assembly string, status %s\n", + ice_stat_str(status)); return 0; } @@ -475,7 +477,7 @@ struct ice_pf *ice_allocate_pf(struct device *dev) { struct devlink *devlink; - devlink = devlink_alloc(&ice_devlink_ops, sizeof(struct ice_pf)); + devlink = devlink_alloc(&ice_devlink_ops, sizeof(struct ice_pf), dev); if (!devlink) return NULL; @@ -502,7 +504,7 @@ int ice_devlink_register(struct ice_pf *pf) struct device *dev = ice_pf_to_dev(pf); int err; - err = devlink_register(devlink, dev); + err = devlink_register(devlink); if (err) { dev_err(dev, "devlink registration failed: %d\n", err); return err; diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index d95a5daca114..c451cf401e63 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -3568,8 +3568,10 @@ __ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec, return 0; } -static int -ice_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) +static int ice_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { return __ice_get_coalesce(netdev, ec, -1); } @@ -3787,8 +3789,10 @@ set_complete: return 0; } -static int -ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec) +static int ice_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { return __ice_set_coalesce(netdev, ec, -1); } diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index fe2ded775f25..0d6c143f6653 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -5122,6 +5122,7 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi) struct ice_hw *hw = &pf->hw; struct sockaddr *addr = pi; enum ice_status status; + u8 old_mac[ETH_ALEN]; u8 flags = 0; int err = 0; u8 *mac; @@ -5144,8 +5145,13 @@ static int ice_set_mac_address(struct net_device *netdev, void *pi) } netif_addr_lock_bh(netdev); + ether_addr_copy(old_mac, netdev->dev_addr); + /* change the netdev's MAC address */ + memcpy(netdev->dev_addr, mac, netdev->addr_len); + netif_addr_unlock_bh(netdev); + /* Clean up old MAC filter. Not an error if old filter doesn't exist */ - status = ice_fltr_remove_mac(vsi, netdev->dev_addr, ICE_FWD_TO_VSI); + status = ice_fltr_remove_mac(vsi, old_mac, ICE_FWD_TO_VSI); if (status && status != ICE_ERR_DOES_NOT_EXIST) { err = -EADDRNOTAVAIL; goto err_update_filters; @@ -5168,13 +5174,12 @@ err_update_filters: if (err) { netdev_err(netdev, "can't set MAC %pM. filter update failed\n", mac); + netif_addr_lock_bh(netdev); + ether_addr_copy(netdev->dev_addr, old_mac); netif_addr_unlock_bh(netdev); return err; } - /* change the netdev's MAC address */ - memcpy(netdev->dev_addr, mac, netdev->addr_len); - netif_addr_unlock_bh(netdev); netdev_dbg(vsi->netdev, "updated MAC address to %pM\n", netdev->dev_addr); @@ -6570,12 +6575,12 @@ event_after: } /** - * ice_do_ioctl - Access the hwtstamp interface + * ice_eth_ioctl - Access the hwtstamp interface * @netdev: network interface device structure * @ifr: interface request data * @cmd: ioctl command */ -static int ice_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +static int ice_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_pf *pf = np->vsi->back; @@ -7241,7 +7246,7 @@ static const struct net_device_ops ice_netdev_ops = { .ndo_change_mtu = ice_change_mtu, .ndo_get_stats64 = ice_get_stats64, .ndo_set_tx_maxrate = ice_set_tx_maxrate, - .ndo_do_ioctl = ice_do_ioctl, + .ndo_eth_ioctl = ice_eth_ioctl, .ndo_set_vf_spoofchk = ice_set_vf_spoofchk, .ndo_set_vf_mac = ice_set_vf_mac, .ndo_get_vf_config = ice_get_vf_cfg, diff --git a/drivers/net/ethernet/intel/ice/ice_ptp.c b/drivers/net/ethernet/intel/ice/ice_ptp.c index 9e3ddb9b8b51..05cc5870e4ef 100644 --- a/drivers/net/ethernet/intel/ice/ice_ptp.c +++ b/drivers/net/ethernet/intel/ice/ice_ptp.c @@ -22,7 +22,7 @@ static void ice_set_tx_tstamp(struct ice_pf *pf, bool on) return; /* Set the timestamp enable flag for all the Tx rings */ - ice_for_each_rxq(vsi, i) { + ice_for_each_txq(vsi, i) { if (!vsi->tx_rings[i]) continue; vsi->tx_rings[i]->ptp_tx = on; @@ -689,6 +689,41 @@ err: } /** + * ice_ptp_disable_all_clkout - Disable all currently configured outputs + * @pf: pointer to the PF structure + * + * Disable all currently configured clock outputs. This is necessary before + * certain changes to the PTP hardware clock. Use ice_ptp_enable_all_clkout to + * re-enable the clocks again. + */ +static void ice_ptp_disable_all_clkout(struct ice_pf *pf) +{ + uint i; + + for (i = 0; i < pf->ptp.info.n_per_out; i++) + if (pf->ptp.perout_channels[i].ena) + ice_ptp_cfg_clkout(pf, i, NULL, false); +} + +/** + * ice_ptp_enable_all_clkout - Enable all configured periodic clock outputs + * @pf: pointer to the PF structure + * + * Enable all currently configured clock outputs. Use this after + * ice_ptp_disable_all_clkout to reconfigure the output signals according to + * their configuration. + */ +static void ice_ptp_enable_all_clkout(struct ice_pf *pf) +{ + uint i; + + for (i = 0; i < pf->ptp.info.n_per_out; i++) + if (pf->ptp.perout_channels[i].ena) + ice_ptp_cfg_clkout(pf, i, &pf->ptp.perout_channels[i], + false); +} + +/** * ice_ptp_gpio_enable_e810 - Enable/disable ancillary features of PHC * @info: the driver's PTP info structure * @rq: The requested feature to change @@ -783,12 +818,17 @@ ice_ptp_settime64(struct ptp_clock_info *info, const struct timespec64 *ts) goto exit; } + /* Disable periodic outputs */ + ice_ptp_disable_all_clkout(pf); + err = ice_ptp_write_init(pf, &ts64); ice_ptp_unlock(hw); if (!err) ice_ptp_update_cached_phctime(pf); + /* Reenable periodic outputs */ + ice_ptp_enable_all_clkout(pf); exit: if (err) { dev_err(ice_pf_to_dev(pf), "PTP failed to set time %d\n", err); @@ -842,8 +882,14 @@ static int ice_ptp_adjtime(struct ptp_clock_info *info, s64 delta) return -EBUSY; } + /* Disable periodic outputs */ + ice_ptp_disable_all_clkout(pf); + err = ice_ptp_write_adj(pf, delta); + /* Reenable periodic outputs */ + ice_ptp_enable_all_clkout(pf); + ice_ptp_unlock(hw); if (err) { @@ -1064,17 +1110,6 @@ static long ice_ptp_create_clock(struct ice_pf *pf) info = &pf->ptp.info; dev = ice_pf_to_dev(pf); - /* Allocate memory for kernel pins interface */ - if (info->n_pins) { - info->pin_config = devm_kcalloc(dev, info->n_pins, - sizeof(*info->pin_config), - GFP_KERNEL); - if (!info->pin_config) { - info->n_pins = 0; - return -ENOMEM; - } - } - /* Attempt to register the clock before enabling the hardware. */ clock = ptp_clock_register(info, dev); if (IS_ERR(clock)) @@ -1278,6 +1313,8 @@ ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) { u8 idx; + spin_lock(&tx->lock); + for (idx = 0; idx < tx->len; idx++) { u8 phy_idx = idx + tx->quad_offset; @@ -1290,6 +1327,8 @@ ice_ptp_flush_tx_tracker(struct ice_pf *pf, struct ice_ptp_tx *tx) tx->tstamps[idx].skb = NULL; } } + + spin_unlock(&tx->lock); } /** @@ -1550,6 +1589,9 @@ void ice_ptp_release(struct ice_pf *pf) if (!pf->ptp.clock) return; + /* Disable periodic outputs */ + ice_ptp_disable_all_clkout(pf); + ice_clear_ptp_clock_index(pf); ptp_clock_unregister(pf->ptp.clock); pf->ptp.clock = NULL; diff --git a/drivers/net/ethernet/intel/igb/e1000_mac.c b/drivers/net/ethernet/intel/igb/e1000_mac.c index e63ee3cca5ea..1277c5c7d099 100644 --- a/drivers/net/ethernet/intel/igb/e1000_mac.c +++ b/drivers/net/ethernet/intel/igb/e1000_mac.c @@ -492,6 +492,7 @@ static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr) **/ static void igb_i21x_hw_doublecheck(struct e1000_hw *hw) { + int failed_cnt = 3; bool is_failed; int i; @@ -502,9 +503,12 @@ static void igb_i21x_hw_doublecheck(struct e1000_hw *hw) is_failed = true; array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]); wrfl(); - break; } } + if (is_failed && --failed_cnt <= 0) { + hw_dbg("Failed to update MTA_REGISTER, too many retries"); + break; + } } while (is_failed); } diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 636a1b1fb7e1..fb1029352c3e 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2182,7 +2182,9 @@ static int igb_set_phys_id(struct net_device *netdev, } static int igb_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct igb_adapter *adapter = netdev_priv(netdev); int i; @@ -2238,7 +2240,9 @@ static int igb_set_coalesce(struct net_device *netdev, } static int igb_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct igb_adapter *adapter = netdev_priv(netdev); @@ -2343,8 +2347,7 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) switch (stringset) { case ETH_SS_TEST: - memcpy(data, *igb_gstrings_test, - IGB_TEST_LEN*ETH_GSTRING_LEN); + memcpy(data, igb_gstrings_test, sizeof(igb_gstrings_test)); break; case ETH_SS_STATS: for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 171a7a629b20..751de06019a0 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -2991,7 +2991,7 @@ static const struct net_device_ops igb_netdev_ops = { .ndo_set_rx_mode = igb_set_rx_mode, .ndo_set_mac_address = igb_set_mac, .ndo_change_mtu = igb_change_mtu, - .ndo_do_ioctl = igb_ioctl, + .ndo_eth_ioctl = igb_ioctl, .ndo_tx_timeout = igb_tx_timeout, .ndo_validate_addr = eth_validate_addr, .ndo_vlan_rx_add_vid = igb_vlan_rx_add_vid, diff --git a/drivers/net/ethernet/intel/igbvf/ethtool.c b/drivers/net/ethernet/intel/igbvf/ethtool.c index f4835eb62fee..06e5bd646a0e 100644 --- a/drivers/net/ethernet/intel/igbvf/ethtool.c +++ b/drivers/net/ethernet/intel/igbvf/ethtool.c @@ -314,7 +314,9 @@ static int igbvf_set_wol(struct net_device *netdev, } static int igbvf_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct igbvf_adapter *adapter = netdev_priv(netdev); @@ -327,7 +329,9 @@ static int igbvf_get_coalesce(struct net_device *netdev, } static int igbvf_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct igbvf_adapter *adapter = netdev_priv(netdev); struct e1000_hw *hw = &adapter->hw; diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 1bbe9862a758..d32e72d953c8 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -2657,7 +2657,7 @@ static const struct net_device_ops igbvf_netdev_ops = { .ndo_set_rx_mode = igbvf_set_rx_mode, .ndo_set_mac_address = igbvf_set_mac, .ndo_change_mtu = igbvf_change_mtu, - .ndo_do_ioctl = igbvf_ioctl, + .ndo_eth_ioctl = igbvf_ioctl, .ndo_tx_timeout = igbvf_tx_timeout, .ndo_vlan_rx_add_vid = igbvf_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = igbvf_vlan_rx_kill_vid, diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 5901ed9fb545..3e386c38d016 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -33,6 +33,8 @@ void igc_ethtool_set_ops(struct net_device *); #define IGC_N_PEROUT 2 #define IGC_N_SDP 4 +#define MAX_FLEX_FILTER 32 + enum igc_mac_filter_type { IGC_MAC_FILTER_TYPE_DST = 0, IGC_MAC_FILTER_TYPE_SRC @@ -96,6 +98,13 @@ struct igc_ring { u32 start_time; u32 end_time; + /* CBS parameters */ + bool cbs_enable; /* indicates if CBS is enabled */ + s32 idleslope; /* idleSlope in kbps */ + s32 sendslope; /* sendSlope in kbps */ + s32 hicredit; /* hiCredit in bytes */ + s32 locredit; /* loCredit in bytes */ + /* everything past this point are written often */ u16 next_to_clean; u16 next_to_use; @@ -225,6 +234,7 @@ struct igc_adapter { struct timecounter tc; struct timespec64 prev_ptp_time; /* Pre-reset PTP clock */ ktime_t ptp_reset_start; /* Reset time in clock mono */ + struct system_time_snapshot snapshot; char fw_version[32]; @@ -287,6 +297,10 @@ extern char igc_driver_name[]; #define IGC_FLAG_VLAN_PROMISC BIT(15) #define IGC_FLAG_RX_LEGACY BIT(16) #define IGC_FLAG_TSN_QBV_ENABLED BIT(17) +#define IGC_FLAG_TSN_QAV_ENABLED BIT(18) + +#define IGC_FLAG_TSN_ANY_ENABLED \ + (IGC_FLAG_TSN_QBV_ENABLED | IGC_FLAG_TSN_QAV_ENABLED) #define IGC_FLAG_RSS_FIELD_IPV4_UDP BIT(6) #define IGC_FLAG_RSS_FIELD_IPV6_UDP BIT(7) @@ -476,18 +490,28 @@ struct igc_q_vector { }; enum igc_filter_match_flags { - IGC_FILTER_FLAG_ETHER_TYPE = 0x1, - IGC_FILTER_FLAG_VLAN_TCI = 0x2, - IGC_FILTER_FLAG_SRC_MAC_ADDR = 0x4, - IGC_FILTER_FLAG_DST_MAC_ADDR = 0x8, + IGC_FILTER_FLAG_ETHER_TYPE = BIT(0), + IGC_FILTER_FLAG_VLAN_TCI = BIT(1), + IGC_FILTER_FLAG_SRC_MAC_ADDR = BIT(2), + IGC_FILTER_FLAG_DST_MAC_ADDR = BIT(3), + IGC_FILTER_FLAG_USER_DATA = BIT(4), + IGC_FILTER_FLAG_VLAN_ETYPE = BIT(5), }; struct igc_nfc_filter { u8 match_flags; u16 etype; + __be16 vlan_etype; u16 vlan_tci; u8 src_addr[ETH_ALEN]; u8 dst_addr[ETH_ALEN]; + u8 user_data[8]; + u8 user_mask[8]; + u8 flex_index; + u8 rx_queue; + u8 prio; + u8 immediate_irq; + u8 drop; }; struct igc_nfc_rule { @@ -495,12 +519,24 @@ struct igc_nfc_rule { struct igc_nfc_filter filter; u32 location; u16 action; + bool flex; }; -/* IGC supports a total of 32 NFC rules: 16 MAC address based,, 8 VLAN priority - * based, and 8 ethertype based. +/* IGC supports a total of 32 NFC rules: 16 MAC address based, 8 VLAN priority + * based, 8 ethertype based and 32 Flex filter based rules. */ -#define IGC_MAX_RXNFC_RULES 32 +#define IGC_MAX_RXNFC_RULES 64 + +struct igc_flex_filter { + u8 index; + u8 data[128]; + u8 mask[16]; + u8 length; + u8 rx_queue; + u8 prio; + u8 immediate_irq; + u8 drop; +}; /* igc_desc_unused - calculate if we have unused descriptors */ static inline u16 igc_desc_unused(const struct igc_ring *ring) diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c index d0700d48ecf9..84f142f5e472 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.c +++ b/drivers/net/ethernet/intel/igc/igc_base.c @@ -187,15 +187,7 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw) igc_check_for_copper_link(hw); - /* Verify phy id and set remaining function pointers */ - switch (phy->id) { - case I225_I_PHY_ID: - phy->type = igc_phy_i225; - break; - default: - ret_val = -IGC_ERR_PHY; - goto out; - } + phy->type = igc_phy_i225; out: return ret_val; diff --git a/drivers/net/ethernet/intel/igc/igc_defines.h b/drivers/net/ethernet/intel/igc/igc_defines.h index c3a5a5518790..a4bbee748798 100644 --- a/drivers/net/ethernet/intel/igc/igc_defines.h +++ b/drivers/net/ethernet/intel/igc/igc_defines.h @@ -17,11 +17,22 @@ #define IGC_WUC_PME_EN 0x00000002 /* PME Enable */ /* Wake Up Filter Control */ -#define IGC_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ -#define IGC_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ -#define IGC_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ -#define IGC_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ -#define IGC_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define IGC_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +#define IGC_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +#define IGC_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +#define IGC_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +#define IGC_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +#define IGC_WUFC_FLEX_HQ BIT(14) /* Flex Filters Host Queuing */ +#define IGC_WUFC_FLX0 BIT(16) /* Flexible Filter 0 Enable */ +#define IGC_WUFC_FLX1 BIT(17) /* Flexible Filter 1 Enable */ +#define IGC_WUFC_FLX2 BIT(18) /* Flexible Filter 2 Enable */ +#define IGC_WUFC_FLX3 BIT(19) /* Flexible Filter 3 Enable */ +#define IGC_WUFC_FLX4 BIT(20) /* Flexible Filter 4 Enable */ +#define IGC_WUFC_FLX5 BIT(21) /* Flexible Filter 5 Enable */ +#define IGC_WUFC_FLX6 BIT(22) /* Flexible Filter 6 Enable */ +#define IGC_WUFC_FLX7 BIT(23) /* Flexible Filter 7 Enable */ + +#define IGC_WUFC_FILTER_MASK GENMASK(23, 14) #define IGC_CTRL_ADVD3WUC 0x00100000 /* D3 WUC */ @@ -46,6 +57,37 @@ /* Wake Up Packet Memory stores the first 128 bytes of the wake up packet */ #define IGC_WUPM_BYTES 128 +/* Wakeup Filter Control Extended */ +#define IGC_WUFC_EXT_FLX8 BIT(8) /* Flexible Filter 8 Enable */ +#define IGC_WUFC_EXT_FLX9 BIT(9) /* Flexible Filter 9 Enable */ +#define IGC_WUFC_EXT_FLX10 BIT(10) /* Flexible Filter 10 Enable */ +#define IGC_WUFC_EXT_FLX11 BIT(11) /* Flexible Filter 11 Enable */ +#define IGC_WUFC_EXT_FLX12 BIT(12) /* Flexible Filter 12 Enable */ +#define IGC_WUFC_EXT_FLX13 BIT(13) /* Flexible Filter 13 Enable */ +#define IGC_WUFC_EXT_FLX14 BIT(14) /* Flexible Filter 14 Enable */ +#define IGC_WUFC_EXT_FLX15 BIT(15) /* Flexible Filter 15 Enable */ +#define IGC_WUFC_EXT_FLX16 BIT(16) /* Flexible Filter 16 Enable */ +#define IGC_WUFC_EXT_FLX17 BIT(17) /* Flexible Filter 17 Enable */ +#define IGC_WUFC_EXT_FLX18 BIT(18) /* Flexible Filter 18 Enable */ +#define IGC_WUFC_EXT_FLX19 BIT(19) /* Flexible Filter 19 Enable */ +#define IGC_WUFC_EXT_FLX20 BIT(20) /* Flexible Filter 20 Enable */ +#define IGC_WUFC_EXT_FLX21 BIT(21) /* Flexible Filter 21 Enable */ +#define IGC_WUFC_EXT_FLX22 BIT(22) /* Flexible Filter 22 Enable */ +#define IGC_WUFC_EXT_FLX23 BIT(23) /* Flexible Filter 23 Enable */ +#define IGC_WUFC_EXT_FLX24 BIT(24) /* Flexible Filter 24 Enable */ +#define IGC_WUFC_EXT_FLX25 BIT(25) /* Flexible Filter 25 Enable */ +#define IGC_WUFC_EXT_FLX26 BIT(26) /* Flexible Filter 26 Enable */ +#define IGC_WUFC_EXT_FLX27 BIT(27) /* Flexible Filter 27 Enable */ +#define IGC_WUFC_EXT_FLX28 BIT(28) /* Flexible Filter 28 Enable */ +#define IGC_WUFC_EXT_FLX29 BIT(29) /* Flexible Filter 29 Enable */ +#define IGC_WUFC_EXT_FLX30 BIT(30) /* Flexible Filter 30 Enable */ +#define IGC_WUFC_EXT_FLX31 BIT(31) /* Flexible Filter 31 Enable */ + +#define IGC_WUFC_EXT_FILTER_MASK GENMASK(31, 8) + +/* Physical Func Reset Done Indication */ +#define IGC_CTRL_EXT_LINK_MODE_MASK 0x00C00000 + /* Loop limit on how long we wait for auto-negotiation to complete */ #define COPPER_LINK_UP_LIMIT 10 #define PHY_AUTO_NEG_LIMIT 45 @@ -476,11 +518,50 @@ #define IGC_TXQCTL_QUEUE_MODE_LAUNCHT 0x00000001 #define IGC_TXQCTL_STRICT_CYCLE 0x00000002 #define IGC_TXQCTL_STRICT_END 0x00000004 +#define IGC_TXQCTL_QAV_SEL_MASK 0x000000C0 +#define IGC_TXQCTL_QAV_SEL_CBS0 0x00000080 +#define IGC_TXQCTL_QAV_SEL_CBS1 0x000000C0 + +#define IGC_TQAVCC_IDLESLOPE_MASK 0xFFFF +#define IGC_TQAVCC_KEEP_CREDITS BIT(30) + +#define IGC_MAX_SR_QUEUES 2 /* Receive Checksum Control */ #define IGC_RXCSUM_CRCOFL 0x00000800 /* CRC32 offload enable */ #define IGC_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ +/* PCIe PTM Control */ +#define IGC_PTM_CTRL_START_NOW BIT(29) /* Start PTM Now */ +#define IGC_PTM_CTRL_EN BIT(30) /* Enable PTM */ +#define IGC_PTM_CTRL_TRIG BIT(31) /* PTM Cycle trigger */ +#define IGC_PTM_CTRL_SHRT_CYC(usec) (((usec) & 0x2f) << 2) +#define IGC_PTM_CTRL_PTM_TO(usec) (((usec) & 0xff) << 8) + +#define IGC_PTM_SHORT_CYC_DEFAULT 10 /* Default Short/interrupted cycle interval */ +#define IGC_PTM_CYC_TIME_DEFAULT 5 /* Default PTM cycle time */ +#define IGC_PTM_TIMEOUT_DEFAULT 255 /* Default timeout for PTM errors */ + +/* PCIe Digital Delay */ +#define IGC_PCIE_DIG_DELAY_DEFAULT 0x01440000 + +/* PCIe PHY Delay */ +#define IGC_PCIE_PHY_DELAY_DEFAULT 0x40900000 + +#define IGC_TIMADJ_ADJUST_METH 0x40000000 + +/* PCIe PTM Status */ +#define IGC_PTM_STAT_VALID BIT(0) /* PTM Status */ +#define IGC_PTM_STAT_RET_ERR BIT(1) /* Root port timeout */ +#define IGC_PTM_STAT_BAD_PTM_RES BIT(2) /* PTM Response msg instead of PTM Response Data */ +#define IGC_PTM_STAT_T4M1_OVFL BIT(3) /* T4 minus T1 overflow */ +#define IGC_PTM_STAT_ADJUST_1ST BIT(4) /* 1588 timer adjusted during 1st PTM cycle */ +#define IGC_PTM_STAT_ADJUST_CYC BIT(5) /* 1588 timer adjusted during non-1st PTM cycle */ + +/* PCIe PTM Cycle Control */ +#define IGC_PTM_CYCLE_CTRL_CYC_TIME(msec) ((msec) & 0x3ff) /* PTM Cycle Time (msec) */ +#define IGC_PTM_CYCLE_CTRL_AUTO_CYC_EN BIT(31) /* PTM Cycle Control */ + /* GPY211 - I225 defines */ #define GPY_MMD_MASK 0xFFFF0000 #define GPY_MMD_SHIFT 16 diff --git a/drivers/net/ethernet/intel/igc/igc_ethtool.c b/drivers/net/ethernet/intel/igc/igc_ethtool.c index fa4171860623..e0a76ac1bbbc 100644 --- a/drivers/net/ethernet/intel/igc/igc_ethtool.c +++ b/drivers/net/ethernet/intel/igc/igc_ethtool.c @@ -862,7 +862,9 @@ static void igc_ethtool_get_stats(struct net_device *netdev, } static int igc_ethtool_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct igc_adapter *adapter = netdev_priv(netdev); @@ -882,7 +884,9 @@ static int igc_ethtool_get_coalesce(struct net_device *netdev, } static int igc_ethtool_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct igc_adapter *adapter = netdev_priv(netdev); int i; @@ -979,6 +983,12 @@ static int igc_ethtool_get_nfc_rule(struct igc_adapter *adapter, eth_broadcast_addr(fsp->m_u.ether_spec.h_source); } + if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) { + fsp->flow_type |= FLOW_EXT; + memcpy(fsp->h_ext.data, rule->filter.user_data, sizeof(fsp->h_ext.data)); + memcpy(fsp->m_ext.data, rule->filter.user_mask, sizeof(fsp->m_ext.data)); + } + mutex_unlock(&adapter->nfc_rule_lock); return 0; @@ -1215,6 +1225,30 @@ static void igc_ethtool_init_nfc_rule(struct igc_nfc_rule *rule, ether_addr_copy(rule->filter.dst_addr, fsp->h_u.ether_spec.h_dest); } + + /* VLAN etype matching */ + if ((fsp->flow_type & FLOW_EXT) && fsp->h_ext.vlan_etype) { + rule->filter.vlan_etype = fsp->h_ext.vlan_etype; + rule->filter.match_flags |= IGC_FILTER_FLAG_VLAN_ETYPE; + } + + /* Check for user defined data */ + if ((fsp->flow_type & FLOW_EXT) && + (fsp->h_ext.data[0] || fsp->h_ext.data[1])) { + rule->filter.match_flags |= IGC_FILTER_FLAG_USER_DATA; + memcpy(rule->filter.user_data, fsp->h_ext.data, sizeof(fsp->h_ext.data)); + memcpy(rule->filter.user_mask, fsp->m_ext.data, sizeof(fsp->m_ext.data)); + } + + /* When multiple filter options or user data or vlan etype is set, use a + * flex filter. + */ + if ((rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) || + (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) || + (rule->filter.match_flags & (rule->filter.match_flags - 1))) + rule->flex = true; + else + rule->flex = false; } /** @@ -1244,11 +1278,6 @@ static int igc_ethtool_check_nfc_rule(struct igc_adapter *adapter, return -EINVAL; } - if (flags & (flags - 1)) { - netdev_dbg(dev, "Rule with multiple matches not supported\n"); - return -EOPNOTSUPP; - } - list_for_each_entry(tmp, &adapter->nfc_rule_list, list) { if (!memcmp(&rule->filter, &tmp->filter, sizeof(rule->filter)) && @@ -1280,12 +1309,6 @@ static int igc_ethtool_add_nfc_rule(struct igc_adapter *adapter, return -EOPNOTSUPP; } - if ((fsp->flow_type & FLOW_EXT) && - fsp->m_ext.vlan_tci != htons(VLAN_PRIO_MASK)) { - netdev_dbg(netdev, "VLAN mask not supported\n"); - return -EOPNOTSUPP; - } - if (fsp->ring_cookie >= adapter->num_rx_queues) { netdev_dbg(netdev, "Invalid action\n"); return -EINVAL; diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index e29aadbc6744..b877efae61df 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -12,6 +12,8 @@ #include <net/pkt_sched.h> #include <linux/bpf_trace.h> #include <net/xdp_sock_drv.h> +#include <linux/pci.h> + #include <net/ipv6.h> #include "igc.h" @@ -118,7 +120,7 @@ void igc_reset(struct igc_adapter *adapter) igc_ptp_reset(adapter); /* Re-enable TSN offloading, where applicable. */ - igc_tsn_offload_apply(adapter); + igc_tsn_reset(adapter); igc_get_phy_info(hw); } @@ -149,6 +151,9 @@ static void igc_release_hw_control(struct igc_adapter *adapter) struct igc_hw *hw = &adapter->hw; u32 ctrl_ext; + if (!pci_device_is_present(adapter->pdev)) + return; + /* Let firmware take over control of h/w */ ctrl_ext = rd32(IGC_CTRL_EXT); wr32(IGC_CTRL_EXT, @@ -3075,11 +3080,320 @@ static void igc_del_etype_filter(struct igc_adapter *adapter, u16 etype) etype); } +static int igc_flex_filter_select(struct igc_adapter *adapter, + struct igc_flex_filter *input, + u32 *fhft) +{ + struct igc_hw *hw = &adapter->hw; + u8 fhft_index; + u32 fhftsl; + + if (input->index >= MAX_FLEX_FILTER) { + dev_err(&adapter->pdev->dev, "Wrong Flex Filter index selected!\n"); + return -EINVAL; + } + + /* Indirect table select register */ + fhftsl = rd32(IGC_FHFTSL); + fhftsl &= ~IGC_FHFTSL_FTSL_MASK; + switch (input->index) { + case 0 ... 7: + fhftsl |= 0x00; + break; + case 8 ... 15: + fhftsl |= 0x01; + break; + case 16 ... 23: + fhftsl |= 0x02; + break; + case 24 ... 31: + fhftsl |= 0x03; + break; + } + wr32(IGC_FHFTSL, fhftsl); + + /* Normalize index down to host table register */ + fhft_index = input->index % 8; + + *fhft = (fhft_index < 4) ? IGC_FHFT(fhft_index) : + IGC_FHFT_EXT(fhft_index - 4); + + return 0; +} + +static int igc_write_flex_filter_ll(struct igc_adapter *adapter, + struct igc_flex_filter *input) +{ + struct device *dev = &adapter->pdev->dev; + struct igc_hw *hw = &adapter->hw; + u8 *data = input->data; + u8 *mask = input->mask; + u32 queuing; + u32 fhft; + u32 wufc; + int ret; + int i; + + /* Length has to be aligned to 8. Otherwise the filter will fail. Bail + * out early to avoid surprises later. + */ + if (input->length % 8 != 0) { + dev_err(dev, "The length of a flex filter has to be 8 byte aligned!\n"); + return -EINVAL; + } + + /* Select corresponding flex filter register and get base for host table. */ + ret = igc_flex_filter_select(adapter, input, &fhft); + if (ret) + return ret; + + /* When adding a filter globally disable flex filter feature. That is + * recommended within the datasheet. + */ + wufc = rd32(IGC_WUFC); + wufc &= ~IGC_WUFC_FLEX_HQ; + wr32(IGC_WUFC, wufc); + + /* Configure filter */ + queuing = input->length & IGC_FHFT_LENGTH_MASK; + queuing |= (input->rx_queue << IGC_FHFT_QUEUE_SHIFT) & IGC_FHFT_QUEUE_MASK; + queuing |= (input->prio << IGC_FHFT_PRIO_SHIFT) & IGC_FHFT_PRIO_MASK; + + if (input->immediate_irq) + queuing |= IGC_FHFT_IMM_INT; + + if (input->drop) + queuing |= IGC_FHFT_DROP; + + wr32(fhft + 0xFC, queuing); + + /* Write data (128 byte) and mask (128 bit) */ + for (i = 0; i < 16; ++i) { + const size_t data_idx = i * 8; + const size_t row_idx = i * 16; + u32 dw0 = + (data[data_idx + 0] << 0) | + (data[data_idx + 1] << 8) | + (data[data_idx + 2] << 16) | + (data[data_idx + 3] << 24); + u32 dw1 = + (data[data_idx + 4] << 0) | + (data[data_idx + 5] << 8) | + (data[data_idx + 6] << 16) | + (data[data_idx + 7] << 24); + u32 tmp; + + /* Write row: dw0, dw1 and mask */ + wr32(fhft + row_idx, dw0); + wr32(fhft + row_idx + 4, dw1); + + /* mask is only valid for MASK(7, 0) */ + tmp = rd32(fhft + row_idx + 8); + tmp &= ~GENMASK(7, 0); + tmp |= mask[i]; + wr32(fhft + row_idx + 8, tmp); + } + + /* Enable filter. */ + wufc |= IGC_WUFC_FLEX_HQ; + if (input->index > 8) { + /* Filter 0-7 are enabled via WUFC. The other 24 filters are not. */ + u32 wufc_ext = rd32(IGC_WUFC_EXT); + + wufc_ext |= (IGC_WUFC_EXT_FLX8 << (input->index - 8)); + + wr32(IGC_WUFC_EXT, wufc_ext); + } else { + wufc |= (IGC_WUFC_FLX0 << input->index); + } + wr32(IGC_WUFC, wufc); + + dev_dbg(&adapter->pdev->dev, "Added flex filter %u to HW.\n", + input->index); + + return 0; +} + +static void igc_flex_filter_add_field(struct igc_flex_filter *flex, + const void *src, unsigned int offset, + size_t len, const void *mask) +{ + int i; + + /* data */ + memcpy(&flex->data[offset], src, len); + + /* mask */ + for (i = 0; i < len; ++i) { + const unsigned int idx = i + offset; + const u8 *ptr = mask; + + if (mask) { + if (ptr[i] & 0xff) + flex->mask[idx / 8] |= BIT(idx % 8); + + continue; + } + + flex->mask[idx / 8] |= BIT(idx % 8); + } +} + +static int igc_find_avail_flex_filter_slot(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 wufc, wufc_ext; + int i; + + wufc = rd32(IGC_WUFC); + wufc_ext = rd32(IGC_WUFC_EXT); + + for (i = 0; i < MAX_FLEX_FILTER; i++) { + if (i < 8) { + if (!(wufc & (IGC_WUFC_FLX0 << i))) + return i; + } else { + if (!(wufc_ext & (IGC_WUFC_EXT_FLX8 << (i - 8)))) + return i; + } + } + + return -ENOSPC; +} + +static bool igc_flex_filter_in_use(struct igc_adapter *adapter) +{ + struct igc_hw *hw = &adapter->hw; + u32 wufc, wufc_ext; + + wufc = rd32(IGC_WUFC); + wufc_ext = rd32(IGC_WUFC_EXT); + + if (wufc & IGC_WUFC_FILTER_MASK) + return true; + + if (wufc_ext & IGC_WUFC_EXT_FILTER_MASK) + return true; + + return false; +} + +static int igc_add_flex_filter(struct igc_adapter *adapter, + struct igc_nfc_rule *rule) +{ + struct igc_flex_filter flex = { }; + struct igc_nfc_filter *filter = &rule->filter; + unsigned int eth_offset, user_offset; + int ret, index; + bool vlan; + + index = igc_find_avail_flex_filter_slot(adapter); + if (index < 0) + return -ENOSPC; + + /* Construct the flex filter: + * -> dest_mac [6] + * -> src_mac [6] + * -> tpid [2] + * -> vlan tci [2] + * -> ether type [2] + * -> user data [8] + * -> = 26 bytes => 32 length + */ + flex.index = index; + flex.length = 32; + flex.rx_queue = rule->action; + + vlan = rule->filter.vlan_tci || rule->filter.vlan_etype; + eth_offset = vlan ? 16 : 12; + user_offset = vlan ? 18 : 14; + + /* Add destination MAC */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_DST_MAC_ADDR) + igc_flex_filter_add_field(&flex, &filter->dst_addr, 0, + ETH_ALEN, NULL); + + /* Add source MAC */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_SRC_MAC_ADDR) + igc_flex_filter_add_field(&flex, &filter->src_addr, 6, + ETH_ALEN, NULL); + + /* Add VLAN etype */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_ETYPE) + igc_flex_filter_add_field(&flex, &filter->vlan_etype, 12, + sizeof(filter->vlan_etype), + NULL); + + /* Add VLAN TCI */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_VLAN_TCI) + igc_flex_filter_add_field(&flex, &filter->vlan_tci, 14, + sizeof(filter->vlan_tci), NULL); + + /* Add Ether type */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { + __be16 etype = cpu_to_be16(filter->etype); + + igc_flex_filter_add_field(&flex, &etype, eth_offset, + sizeof(etype), NULL); + } + + /* Add user data */ + if (rule->filter.match_flags & IGC_FILTER_FLAG_USER_DATA) + igc_flex_filter_add_field(&flex, &filter->user_data, + user_offset, + sizeof(filter->user_data), + filter->user_mask); + + /* Add it down to the hardware and enable it. */ + ret = igc_write_flex_filter_ll(adapter, &flex); + if (ret) + return ret; + + filter->flex_index = index; + + return 0; +} + +static void igc_del_flex_filter(struct igc_adapter *adapter, + u16 reg_index) +{ + struct igc_hw *hw = &adapter->hw; + u32 wufc; + + /* Just disable the filter. The filter table itself is kept + * intact. Another flex_filter_add() should override the "old" data + * then. + */ + if (reg_index > 8) { + u32 wufc_ext = rd32(IGC_WUFC_EXT); + + wufc_ext &= ~(IGC_WUFC_EXT_FLX8 << (reg_index - 8)); + wr32(IGC_WUFC_EXT, wufc_ext); + } else { + wufc = rd32(IGC_WUFC); + + wufc &= ~(IGC_WUFC_FLX0 << reg_index); + wr32(IGC_WUFC, wufc); + } + + if (igc_flex_filter_in_use(adapter)) + return; + + /* No filters are in use, we may disable flex filters */ + wufc = rd32(IGC_WUFC); + wufc &= ~IGC_WUFC_FLEX_HQ; + wr32(IGC_WUFC, wufc); +} + static int igc_enable_nfc_rule(struct igc_adapter *adapter, - const struct igc_nfc_rule *rule) + struct igc_nfc_rule *rule) { int err; + if (rule->flex) { + return igc_add_flex_filter(adapter, rule); + } + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) { err = igc_add_etype_filter(adapter, rule->filter.etype, rule->action); @@ -3116,6 +3430,11 @@ static int igc_enable_nfc_rule(struct igc_adapter *adapter, static void igc_disable_nfc_rule(struct igc_adapter *adapter, const struct igc_nfc_rule *rule) { + if (rule->flex) { + igc_del_flex_filter(adapter, rule->filter.flex_index); + return; + } + if (rule->filter.match_flags & IGC_FILTER_FLAG_ETHER_TYPE) igc_del_etype_filter(adapter, rule->filter.etype); @@ -4449,26 +4768,29 @@ void igc_down(struct igc_adapter *adapter) igc_ptp_suspend(adapter); - /* disable receives in the hardware */ - rctl = rd32(IGC_RCTL); - wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN); - /* flush and sleep below */ - + if (pci_device_is_present(adapter->pdev)) { + /* disable receives in the hardware */ + rctl = rd32(IGC_RCTL); + wr32(IGC_RCTL, rctl & ~IGC_RCTL_EN); + /* flush and sleep below */ + } /* set trans_start so we don't get spurious watchdogs during reset */ netif_trans_update(netdev); netif_carrier_off(netdev); netif_tx_stop_all_queues(netdev); - /* disable transmits in the hardware */ - tctl = rd32(IGC_TCTL); - tctl &= ~IGC_TCTL_EN; - wr32(IGC_TCTL, tctl); - /* flush both disables and wait for them to finish */ - wrfl(); - usleep_range(10000, 20000); + if (pci_device_is_present(adapter->pdev)) { + /* disable transmits in the hardware */ + tctl = rd32(IGC_TCTL); + tctl &= ~IGC_TCTL_EN; + wr32(IGC_TCTL, tctl); + /* flush both disables and wait for them to finish */ + wrfl(); + usleep_range(10000, 20000); - igc_irq_disable(adapter); + igc_irq_disable(adapter); + } adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; @@ -4811,6 +5133,7 @@ static irqreturn_t igc_msix_ring(int irq, void *data) */ static int igc_request_msix(struct igc_adapter *adapter) { + unsigned int num_q_vectors = adapter->num_q_vectors; int i = 0, err = 0, vector = 0, free_vector = 0; struct net_device *netdev = adapter->netdev; @@ -4819,7 +5142,13 @@ static int igc_request_msix(struct igc_adapter *adapter) if (err) goto err_out; - for (i = 0; i < adapter->num_q_vectors; i++) { + if (num_q_vectors > MAX_Q_VECTORS) { + num_q_vectors = MAX_Q_VECTORS; + dev_warn(&adapter->pdev->dev, + "The number of queue vectors (%d) is higher than max allowed (%d)\n", + adapter->num_q_vectors, MAX_Q_VECTORS); + } + for (i = 0; i < num_q_vectors; i++) { struct igc_q_vector *q_vector = adapter->q_vector[i]; vector++; @@ -4898,20 +5227,12 @@ bool igc_has_link(struct igc_adapter *adapter) * false until the igc_check_for_link establishes link * for copper adapters ONLY */ - switch (hw->phy.media_type) { - case igc_media_type_copper: - if (!hw->mac.get_link_status) - return true; - hw->mac.ops.check_for_link(hw); - link_active = !hw->mac.get_link_status; - break; - default: - case igc_media_type_unknown: - break; - } + if (!hw->mac.get_link_status) + return true; + hw->mac.ops.check_for_link(hw); + link_active = !hw->mac.get_link_status; - if (hw->mac.type == igc_i225 && - hw->phy.id == I225_I_PHY_ID) { + if (hw->mac.type == igc_i225) { if (!netif_carrier_ok(adapter->netdev)) { adapter->flags &= ~IGC_FLAG_NEED_LINK_UPDATE; } else if (!(adapter->flags & IGC_FLAG_NEED_LINK_UPDATE)) { @@ -4999,7 +5320,9 @@ static void igc_watchdog_task(struct work_struct *work) adapter->tx_timeout_factor = 14; break; case SPEED_100: - /* maybe add some timeout factor ? */ + case SPEED_1000: + case SPEED_2500: + adapter->tx_timeout_factor = 7; break; } @@ -5426,7 +5749,6 @@ static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue, bool enable) { struct igc_ring *ring; - int i; if (queue < 0 || queue >= adapter->num_tx_queues) return -EINVAL; @@ -5434,17 +5756,6 @@ static int igc_save_launchtime_params(struct igc_adapter *adapter, int queue, ring = adapter->tx_ring[queue]; ring->launchtime_enable = enable; - if (adapter->base_time) - return 0; - - adapter->cycle_time = NSEC_PER_SEC; - - for (i = 0; i < adapter->num_tx_queues; i++) { - ring = adapter->tx_ring[i]; - ring->start_time = 0; - ring->end_time = NSEC_PER_SEC; - } - return 0; } @@ -5489,7 +5800,7 @@ static bool validate_schedule(struct igc_adapter *adapter, if (e->command != TC_TAPRIO_CMD_SET_GATES) return false; - for (i = 0; i < IGC_MAX_TX_QUEUES; i++) { + for (i = 0; i < adapter->num_tx_queues; i++) { if (e->gate_mask & BIT(i)) queue_uses[i]++; @@ -5517,16 +5828,31 @@ static int igc_tsn_enable_launchtime(struct igc_adapter *adapter, return igc_tsn_offload_apply(adapter); } +static int igc_tsn_clear_schedule(struct igc_adapter *adapter) +{ + int i; + + adapter->base_time = 0; + adapter->cycle_time = NSEC_PER_SEC; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + ring->start_time = 0; + ring->end_time = NSEC_PER_SEC; + } + + return 0; +} + static int igc_save_qbv_schedule(struct igc_adapter *adapter, struct tc_taprio_qopt_offload *qopt) { u32 start_time = 0, end_time = 0; size_t n; - if (!qopt->enable) { - adapter->base_time = 0; - return 0; - } + if (!qopt->enable) + return igc_tsn_clear_schedule(adapter); if (adapter->base_time) return -EALREADY; @@ -5546,7 +5872,7 @@ static int igc_save_qbv_schedule(struct igc_adapter *adapter, end_time += e->interval; - for (i = 0; i < IGC_MAX_TX_QUEUES; i++) { + for (i = 0; i < adapter->num_tx_queues; i++) { struct igc_ring *ring = adapter->tx_ring[i]; if (!(e->gate_mask & BIT(i))) @@ -5578,6 +5904,74 @@ static int igc_tsn_enable_qbv_scheduling(struct igc_adapter *adapter, return igc_tsn_offload_apply(adapter); } +static int igc_save_cbs_params(struct igc_adapter *adapter, int queue, + bool enable, int idleslope, int sendslope, + int hicredit, int locredit) +{ + bool cbs_status[IGC_MAX_SR_QUEUES] = { false }; + struct net_device *netdev = adapter->netdev; + struct igc_ring *ring; + int i; + + /* i225 has two sets of credit-based shaper logic. + * Supporting it only on the top two priority queues + */ + if (queue < 0 || queue > 1) + return -EINVAL; + + ring = adapter->tx_ring[queue]; + + for (i = 0; i < IGC_MAX_SR_QUEUES; i++) + if (adapter->tx_ring[i]) + cbs_status[i] = adapter->tx_ring[i]->cbs_enable; + + /* CBS should be enabled on the highest priority queue first in order + * for the CBS algorithm to operate as intended. + */ + if (enable) { + if (queue == 1 && !cbs_status[0]) { + netdev_err(netdev, + "Enabling CBS on queue1 before queue0\n"); + return -EINVAL; + } + } else { + if (queue == 0 && cbs_status[1]) { + netdev_err(netdev, + "Disabling CBS on queue0 before queue1\n"); + return -EINVAL; + } + } + + ring->cbs_enable = enable; + ring->idleslope = idleslope; + ring->sendslope = sendslope; + ring->hicredit = hicredit; + ring->locredit = locredit; + + return 0; +} + +static int igc_tsn_enable_cbs(struct igc_adapter *adapter, + struct tc_cbs_qopt_offload *qopt) +{ + struct igc_hw *hw = &adapter->hw; + int err; + + if (hw->mac.type != igc_i225) + return -EOPNOTSUPP; + + if (qopt->queue < 0 || qopt->queue > 1) + return -EINVAL; + + err = igc_save_cbs_params(adapter, qopt->queue, qopt->enable, + qopt->idleslope, qopt->sendslope, + qopt->hicredit, qopt->locredit); + if (err) + return err; + + return igc_tsn_offload_apply(adapter); +} + static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type, void *type_data) { @@ -5590,6 +5984,9 @@ static int igc_setup_tc(struct net_device *dev, enum tc_setup_type type, case TC_SETUP_QDISC_ETF: return igc_tsn_enable_launchtime(adapter, type_data); + case TC_SETUP_QDISC_CBS: + return igc_tsn_enable_cbs(adapter, type_data); + default: return -EOPNOTSUPP; } @@ -5698,7 +6095,7 @@ static const struct net_device_ops igc_netdev_ops = { .ndo_fix_features = igc_fix_features, .ndo_set_features = igc_set_features, .ndo_features_check = igc_features_check, - .ndo_do_ioctl = igc_ioctl, + .ndo_eth_ioctl = igc_ioctl, .ndo_setup_tc = igc_setup_tc, .ndo_bpf = igc_bpf, .ndo_xdp_xmit = igc_xdp_xmit, @@ -5859,6 +6256,10 @@ static int igc_probe(struct pci_dev *pdev, pci_enable_pcie_error_reporting(pdev); + err = pci_enable_ptm(pdev, NULL); + if (err < 0) + dev_info(&pdev->dev, "PCIe PTM not supported by PCIe bus/controller\n"); + pci_set_master(pdev); err = -ENOMEM; @@ -6012,6 +6413,8 @@ static int igc_probe(struct pci_dev *pdev, igc_ptp_init(adapter); + igc_tsn_clear_schedule(adapter); + /* reset the hardware with the new settings */ igc_reset(adapter); diff --git a/drivers/net/ethernet/intel/igc/igc_phy.c b/drivers/net/ethernet/intel/igc/igc_phy.c index 83aeb5e7076f..5cad31c3c7b0 100644 --- a/drivers/net/ethernet/intel/igc/igc_phy.c +++ b/drivers/net/ethernet/intel/igc/igc_phy.c @@ -249,8 +249,7 @@ static s32 igc_phy_setup_autoneg(struct igc_hw *hw) return ret_val; } - if ((phy->autoneg_mask & ADVERTISE_2500_FULL) && - hw->phy.id == I225_I_PHY_ID) { + if (phy->autoneg_mask & ADVERTISE_2500_FULL) { /* Read the MULTI GBT AN Control Register - reg 7.32 */ ret_val = phy->ops.read_reg(hw, (STANDARD_AN_REG_MASK << MMD_DEVADDR_SHIFT) | @@ -390,8 +389,7 @@ static s32 igc_phy_setup_autoneg(struct igc_hw *hw) ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); - if ((phy->autoneg_mask & ADVERTISE_2500_FULL) && - hw->phy.id == I225_I_PHY_ID) + if (phy->autoneg_mask & ADVERTISE_2500_FULL) ret_val = phy->ops.write_reg(hw, (STANDARD_AN_REG_MASK << MMD_DEVADDR_SHIFT) | diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index 69617d2c1be2..0f021909b430 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -9,6 +9,8 @@ #include <linux/ptp_classify.h> #include <linux/clocksource.h> #include <linux/ktime.h> +#include <linux/delay.h> +#include <linux/iopoll.h> #define INCVALUE_MASK 0x7fffffff #define ISGN 0x80000000 @@ -16,6 +18,9 @@ #define IGC_SYSTIM_OVERFLOW_PERIOD (HZ * 60 * 9) #define IGC_PTP_TX_TIMEOUT (HZ * 15) +#define IGC_PTM_STAT_SLEEP 2 +#define IGC_PTM_STAT_TIMEOUT 100 + /* SYSTIM read access for I225 */ void igc_ptp_read(struct igc_adapter *adapter, struct timespec64 *ts) { @@ -752,6 +757,147 @@ int igc_ptp_get_ts_config(struct net_device *netdev, struct ifreq *ifr) -EFAULT : 0; } +/* The two conditions below must be met for cross timestamping via + * PCIe PTM: + * + * 1. We have an way to convert the timestamps in the PTM messages + * to something related to the system clocks (right now, only + * X86 systems with support for the Always Running Timer allow that); + * + * 2. We have PTM enabled in the path from the device to the PCIe root port. + */ +static bool igc_is_crosststamp_supported(struct igc_adapter *adapter) +{ + return IS_ENABLED(CONFIG_X86_TSC) ? pcie_ptm_enabled(adapter->pdev) : false; +} + +static struct system_counterval_t igc_device_tstamp_to_system(u64 tstamp) +{ +#if IS_ENABLED(CONFIG_X86_TSC) + return convert_art_ns_to_tsc(tstamp); +#else + return (struct system_counterval_t) { }; +#endif +} + +static void igc_ptm_log_error(struct igc_adapter *adapter, u32 ptm_stat) +{ + struct net_device *netdev = adapter->netdev; + + switch (ptm_stat) { + case IGC_PTM_STAT_RET_ERR: + netdev_err(netdev, "PTM Error: Root port timeout\n"); + break; + case IGC_PTM_STAT_BAD_PTM_RES: + netdev_err(netdev, "PTM Error: Bad response, PTM Response Data expected\n"); + break; + case IGC_PTM_STAT_T4M1_OVFL: + netdev_err(netdev, "PTM Error: T4 minus T1 overflow\n"); + break; + case IGC_PTM_STAT_ADJUST_1ST: + netdev_err(netdev, "PTM Error: 1588 timer adjusted during first PTM cycle\n"); + break; + case IGC_PTM_STAT_ADJUST_CYC: + netdev_err(netdev, "PTM Error: 1588 timer adjusted during non-first PTM cycle\n"); + break; + default: + netdev_err(netdev, "PTM Error: Unknown error (%#x)\n", ptm_stat); + break; + } +} + +static int igc_phc_get_syncdevicetime(ktime_t *device, + struct system_counterval_t *system, + void *ctx) +{ + u32 stat, t2_curr_h, t2_curr_l, ctrl; + struct igc_adapter *adapter = ctx; + struct igc_hw *hw = &adapter->hw; + int err, count = 100; + ktime_t t1, t2_curr; + + /* Get a snapshot of system clocks to use as historic value. */ + ktime_get_snapshot(&adapter->snapshot); + + do { + /* Doing this in a loop because in the event of a + * badly timed (ha!) system clock adjustment, we may + * get PTM errors from the PCI root, but these errors + * are transitory. Repeating the process returns valid + * data eventually. + */ + + /* To "manually" start the PTM cycle we need to clear and + * then set again the TRIG bit. + */ + ctrl = rd32(IGC_PTM_CTRL); + ctrl &= ~IGC_PTM_CTRL_TRIG; + wr32(IGC_PTM_CTRL, ctrl); + ctrl |= IGC_PTM_CTRL_TRIG; + wr32(IGC_PTM_CTRL, ctrl); + + /* The cycle only starts "for real" when software notifies + * that it has read the registers, this is done by setting + * VALID bit. + */ + wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID); + + err = readx_poll_timeout(rd32, IGC_PTM_STAT, stat, + stat, IGC_PTM_STAT_SLEEP, + IGC_PTM_STAT_TIMEOUT); + if (err < 0) { + netdev_err(adapter->netdev, "Timeout reading IGC_PTM_STAT register\n"); + return err; + } + + if ((stat & IGC_PTM_STAT_VALID) == IGC_PTM_STAT_VALID) + break; + + if (stat & ~IGC_PTM_STAT_VALID) { + /* An error occurred, log it. */ + igc_ptm_log_error(adapter, stat); + /* The STAT register is write-1-to-clear (W1C), + * so write the previous error status to clear it. + */ + wr32(IGC_PTM_STAT, stat); + continue; + } + } while (--count); + + if (!count) { + netdev_err(adapter->netdev, "Exceeded number of tries for PTM cycle\n"); + return -ETIMEDOUT; + } + + t1 = ktime_set(rd32(IGC_PTM_T1_TIM0_H), rd32(IGC_PTM_T1_TIM0_L)); + + t2_curr_l = rd32(IGC_PTM_CURR_T2_L); + t2_curr_h = rd32(IGC_PTM_CURR_T2_H); + + /* FIXME: When the register that tells the endianness of the + * PTM registers are implemented, check them here and add the + * appropriate conversion. + */ + t2_curr_h = swab32(t2_curr_h); + + t2_curr = ((s64)t2_curr_h << 32 | t2_curr_l); + + *device = t1; + *system = igc_device_tstamp_to_system(t2_curr); + + return 0; +} + +static int igc_ptp_getcrosststamp(struct ptp_clock_info *ptp, + struct system_device_crosststamp *cts) +{ + struct igc_adapter *adapter = container_of(ptp, struct igc_adapter, + ptp_caps); + + return get_device_system_crosststamp(igc_phc_get_syncdevicetime, + adapter, &adapter->snapshot, cts); +} + /** * igc_ptp_init - Initialize PTP functionality * @adapter: Board private structure @@ -788,6 +934,11 @@ void igc_ptp_init(struct igc_adapter *adapter) adapter->ptp_caps.n_per_out = IGC_N_PEROUT; adapter->ptp_caps.n_pins = IGC_N_SDP; adapter->ptp_caps.verify = igc_ptp_verify_pin; + + if (!igc_is_crosststamp_supported(adapter)) + break; + + adapter->ptp_caps.getcrosststamp = igc_ptp_getcrosststamp; break; default: adapter->ptp_clock = NULL; @@ -849,7 +1000,8 @@ void igc_ptp_suspend(struct igc_adapter *adapter) adapter->ptp_tx_skb = NULL; clear_bit_unlock(__IGC_PTP_TX_IN_PROGRESS, &adapter->state); - igc_ptp_time_save(adapter); + if (pci_device_is_present(adapter->pdev)) + igc_ptp_time_save(adapter); } /** @@ -878,7 +1030,9 @@ void igc_ptp_stop(struct igc_adapter *adapter) void igc_ptp_reset(struct igc_adapter *adapter) { struct igc_hw *hw = &adapter->hw; + u32 cycle_ctrl, ctrl; unsigned long flags; + u32 timadj; /* reset the tstamp_config */ igc_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); @@ -887,12 +1041,38 @@ void igc_ptp_reset(struct igc_adapter *adapter) switch (adapter->hw.mac.type) { case igc_i225: + timadj = rd32(IGC_TIMADJ); + timadj |= IGC_TIMADJ_ADJUST_METH; + wr32(IGC_TIMADJ, timadj); + wr32(IGC_TSAUXC, 0x0); wr32(IGC_TSSDP, 0x0); wr32(IGC_TSIM, IGC_TSICR_INTERRUPTS | (adapter->pps_sys_wrap_on ? IGC_TSICR_SYS_WRAP : 0)); wr32(IGC_IMS, IGC_IMS_TS); + + if (!igc_is_crosststamp_supported(adapter)) + break; + + wr32(IGC_PCIE_DIG_DELAY, IGC_PCIE_DIG_DELAY_DEFAULT); + wr32(IGC_PCIE_PHY_DELAY, IGC_PCIE_PHY_DELAY_DEFAULT); + + cycle_ctrl = IGC_PTM_CYCLE_CTRL_CYC_TIME(IGC_PTM_CYC_TIME_DEFAULT); + + wr32(IGC_PTM_CYCLE_CTRL, cycle_ctrl); + + ctrl = IGC_PTM_CTRL_EN | + IGC_PTM_CTRL_START_NOW | + IGC_PTM_CTRL_SHRT_CYC(IGC_PTM_SHORT_CYC_DEFAULT) | + IGC_PTM_CTRL_PTM_TO(IGC_PTM_TIMEOUT_DEFAULT) | + IGC_PTM_CTRL_TRIG; + + wr32(IGC_PTM_CTRL, ctrl); + + /* Force the first cycle to run. */ + wr32(IGC_PTM_STAT, IGC_PTM_STAT_VALID); + break; default: /* No work to do. */ diff --git a/drivers/net/ethernet/intel/igc/igc_regs.h b/drivers/net/ethernet/intel/igc/igc_regs.h index 0f82990567d9..e197a33d93a0 100644 --- a/drivers/net/ethernet/intel/igc/igc_regs.h +++ b/drivers/net/ethernet/intel/igc/igc_regs.h @@ -67,6 +67,9 @@ /* Filtering Registers */ #define IGC_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ +#define IGC_FHFT(_n) (0x09000 + (256 * (_n))) /* Flexible Host Filter */ +#define IGC_FHFT_EXT(_n) (0x09A00 + (256 * (_n))) /* Flexible Host Filter Extended */ +#define IGC_FHFTSL 0x05804 /* Flex Filter indirect table select */ /* ETQF register bit definitions */ #define IGC_ETQF_FILTER_ENABLE BIT(26) @@ -75,6 +78,19 @@ #define IGC_ETQF_QUEUE_MASK 0x00070000 #define IGC_ETQF_ETYPE_MASK 0x0000FFFF +/* FHFT register bit definitions */ +#define IGC_FHFT_LENGTH_MASK GENMASK(7, 0) +#define IGC_FHFT_QUEUE_SHIFT 8 +#define IGC_FHFT_QUEUE_MASK GENMASK(10, 8) +#define IGC_FHFT_PRIO_SHIFT 16 +#define IGC_FHFT_PRIO_MASK GENMASK(18, 16) +#define IGC_FHFT_IMM_INT BIT(24) +#define IGC_FHFT_DROP BIT(25) + +/* FHFTSL register bit definitions */ +#define IGC_FHFTSL_FTSL_SHIFT 0 +#define IGC_FHFTSL_FTSL_MASK GENMASK(1, 0) + /* Redirection Table - RW Array */ #define IGC_RETA(_i) (0x05C00 + ((_i) * 4)) /* RSS Random Key - RW Array */ @@ -220,6 +236,9 @@ #define IGC_ENDQT(_n) (0x3334 + 0x4 * (_n)) #define IGC_DTXMXPKTSZ 0x355C +#define IGC_TQAVCC(_n) (0x3004 + ((_n) * 0x40)) +#define IGC_TQAVHC(_n) (0x300C + ((_n) * 0x40)) + /* System Time Registers */ #define IGC_SYSTIML 0x0B600 /* System time register Low - RO */ #define IGC_SYSTIMH 0x0B604 /* System time register High - RO */ @@ -229,6 +248,29 @@ #define IGC_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */ #define IGC_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */ +#define IGC_TIMADJ 0x0B60C /* Time Adjustment Offset Register */ + +/* PCIe Registers */ +#define IGC_PTM_CTRL 0x12540 /* PTM Control */ +#define IGC_PTM_STAT 0x12544 /* PTM Status */ +#define IGC_PTM_CYCLE_CTRL 0x1254C /* PTM Cycle Control */ + +/* PTM Time registers */ +#define IGC_PTM_T1_TIM0_L 0x12558 /* T1 on Timer 0 Low */ +#define IGC_PTM_T1_TIM0_H 0x1255C /* T1 on Timer 0 High */ + +#define IGC_PTM_CURR_T2_L 0x1258C /* Current T2 Low */ +#define IGC_PTM_CURR_T2_H 0x12590 /* Current T2 High */ +#define IGC_PTM_PREV_T2_L 0x12584 /* Previous T2 Low */ +#define IGC_PTM_PREV_T2_H 0x12588 /* Previous T2 High */ +#define IGC_PTM_PREV_T4M1 0x12578 /* T4 Minus T1 on previous PTM Cycle */ +#define IGC_PTM_CURR_T4M1 0x1257C /* T4 Minus T1 on this PTM Cycle */ +#define IGC_PTM_PREV_T3M2 0x12580 /* T3 Minus T2 on previous PTM Cycle */ +#define IGC_PTM_TDELAY 0x12594 /* PTM PCIe Link Delay */ + +#define IGC_PCIE_DIG_DELAY 0x12550 /* PCIe Digital Delay */ +#define IGC_PCIE_PHY_DELAY 0x12554 /* PCIe PHY Delay */ + /* Management registers */ #define IGC_MANC 0x05820 /* Management Control - RW */ @@ -240,6 +282,7 @@ #define IGC_WUFC 0x05808 /* Wakeup Filter Control - RW */ #define IGC_WUS 0x05810 /* Wakeup Status - R/W1C */ #define IGC_WUPL 0x05900 /* Wakeup Packet Length - RW */ +#define IGC_WUFC_EXT 0x0580C /* Wakeup Filter Control Register Extended - RW */ /* Wake Up packet memory */ #define IGC_WUPM_REG(_i) (0x05A00 + ((_i) * 4)) diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.c b/drivers/net/ethernet/intel/igc/igc_tsn.c index 174103c4bea6..0fce22de2ab8 100644 --- a/drivers/net/ethernet/intel/igc/igc_tsn.c +++ b/drivers/net/ethernet/intel/igc/igc_tsn.c @@ -18,8 +18,38 @@ static bool is_any_launchtime(struct igc_adapter *adapter) return false; } +static bool is_cbs_enabled(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *ring = adapter->tx_ring[i]; + + if (ring->cbs_enable) + return true; + } + + return false; +} + +static unsigned int igc_tsn_new_flags(struct igc_adapter *adapter) +{ + unsigned int new_flags = adapter->flags & ~IGC_FLAG_TSN_ANY_ENABLED; + + if (adapter->base_time) + new_flags |= IGC_FLAG_TSN_QBV_ENABLED; + + if (is_any_launchtime(adapter)) + new_flags |= IGC_FLAG_TSN_QBV_ENABLED; + + if (is_cbs_enabled(adapter)) + new_flags |= IGC_FLAG_TSN_QAV_ENABLED; + + return new_flags; +} + /* Returns the TSN specific registers to their default values after - * TSN offloading is disabled. + * the adapter is reset. */ static int igc_tsn_disable_offload(struct igc_adapter *adapter) { @@ -27,11 +57,6 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter) u32 tqavctrl; int i; - if (!(adapter->flags & IGC_FLAG_TSN_QBV_ENABLED)) - return 0; - - adapter->cycle_time = 0; - wr32(IGC_TXPBS, I225_TXPBSIZE_DEFAULT); wr32(IGC_DTXMXPKTSZ, IGC_DTXMXPKTSZ_DEFAULT); @@ -41,18 +66,12 @@ static int igc_tsn_disable_offload(struct igc_adapter *adapter) wr32(IGC_TQAVCTRL, tqavctrl); for (i = 0; i < adapter->num_tx_queues; i++) { - struct igc_ring *ring = adapter->tx_ring[i]; - - ring->start_time = 0; - ring->end_time = 0; - ring->launchtime_enable = false; - wr32(IGC_TXQCTL(i), 0); wr32(IGC_STQT(i), 0); wr32(IGC_ENDQT(i), NSEC_PER_SEC); } - wr32(IGC_QBVCYCLET_S, NSEC_PER_SEC); + wr32(IGC_QBVCYCLET_S, 0); wr32(IGC_QBVCYCLET, NSEC_PER_SEC); adapter->flags &= ~IGC_FLAG_TSN_QBV_ENABLED; @@ -68,9 +87,6 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter) ktime_t base_time, systim; int i; - if (adapter->flags & IGC_FLAG_TSN_QBV_ENABLED) - return 0; - cycle = adapter->cycle_time; base_time = adapter->base_time; @@ -88,6 +104,8 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter) for (i = 0; i < adapter->num_tx_queues; i++) { struct igc_ring *ring = adapter->tx_ring[i]; u32 txqctl = 0; + u16 cbs_value; + u32 tqavcc; wr32(IGC_STQT(i), ring->start_time); wr32(IGC_ENDQT(i), ring->end_time); @@ -105,6 +123,90 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter) if (ring->launchtime_enable) txqctl |= IGC_TXQCTL_QUEUE_MODE_LAUNCHT; + /* Skip configuring CBS for Q2 and Q3 */ + if (i > 1) + goto skip_cbs; + + if (ring->cbs_enable) { + if (i == 0) + txqctl |= IGC_TXQCTL_QAV_SEL_CBS0; + else + txqctl |= IGC_TXQCTL_QAV_SEL_CBS1; + + /* According to i225 datasheet section 7.5.2.7, we + * should set the 'idleSlope' field from TQAVCC + * register following the equation: + * + * value = link-speed 0x7736 * BW * 0.2 + * ---------- * ----------------- (E1) + * 100Mbps 2.5 + * + * Note that 'link-speed' is in Mbps. + * + * 'BW' is the percentage bandwidth out of full + * link speed which can be found with the + * following equation. Note that idleSlope here + * is the parameter from this function + * which is in kbps. + * + * BW = idleSlope + * ----------------- (E2) + * link-speed * 1000 + * + * That said, we can come up with a generic + * equation to calculate the value we should set + * it TQAVCC register by replacing 'BW' in E1 by E2. + * The resulting equation is: + * + * value = link-speed * 0x7736 * idleSlope * 0.2 + * ------------------------------------- (E3) + * 100 * 2.5 * link-speed * 1000 + * + * 'link-speed' is present in both sides of the + * fraction so it is canceled out. The final + * equation is the following: + * + * value = idleSlope * 61036 + * ----------------- (E4) + * 2500000 + * + * NOTE: For i225, given the above, we can see + * that idleslope is represented in + * 40.959433 kbps units by the value at + * the TQAVCC register (2.5Gbps / 61036), + * which reduces the granularity for + * idleslope increments. + * + * In i225 controller, the sendSlope and loCredit + * parameters from CBS are not configurable + * by software so we don't do any + * 'controller configuration' in respect to + * these parameters. + */ + cbs_value = DIV_ROUND_UP_ULL(ring->idleslope + * 61036ULL, 2500000); + + tqavcc = rd32(IGC_TQAVCC(i)); + tqavcc &= ~IGC_TQAVCC_IDLESLOPE_MASK; + tqavcc |= cbs_value | IGC_TQAVCC_KEEP_CREDITS; + wr32(IGC_TQAVCC(i), tqavcc); + + wr32(IGC_TQAVHC(i), + 0x80000000 + ring->hicredit * 0x7735); + } else { + /* Disable any CBS for the queue */ + txqctl &= ~(IGC_TXQCTL_QAV_SEL_MASK); + + /* Set idleSlope to zero. */ + tqavcc = rd32(IGC_TQAVCC(i)); + tqavcc &= ~(IGC_TQAVCC_IDLESLOPE_MASK | + IGC_TQAVCC_KEEP_CREDITS); + wr32(IGC_TQAVCC(i), tqavcc); + + /* Set hiCredit to zero. */ + wr32(IGC_TQAVHC(i), 0); + } +skip_cbs: wr32(IGC_TXQCTL(i), txqctl); } @@ -125,33 +227,41 @@ static int igc_tsn_enable_offload(struct igc_adapter *adapter) wr32(IGC_BASET_H, baset_h); wr32(IGC_BASET_L, baset_l); - adapter->flags |= IGC_FLAG_TSN_QBV_ENABLED; - return 0; } -int igc_tsn_offload_apply(struct igc_adapter *adapter) +int igc_tsn_reset(struct igc_adapter *adapter) { - bool is_any_enabled = adapter->base_time || is_any_launchtime(adapter); + unsigned int new_flags; + int err = 0; - if (!(adapter->flags & IGC_FLAG_TSN_QBV_ENABLED) && !is_any_enabled) - return 0; + new_flags = igc_tsn_new_flags(adapter); - if (!is_any_enabled) { - int err = igc_tsn_disable_offload(adapter); + if (!(new_flags & IGC_FLAG_TSN_ANY_ENABLED)) + return igc_tsn_disable_offload(adapter); - if (err < 0) - return err; + err = igc_tsn_enable_offload(adapter); + if (err < 0) + return err; - /* The BASET registers aren't cleared when writing - * into them, force a reset if the interface is - * running. - */ - if (netif_running(adapter->netdev)) - schedule_work(&adapter->reset_task); + adapter->flags = new_flags; + return err; +} + +int igc_tsn_offload_apply(struct igc_adapter *adapter) +{ + int err; + + if (netif_running(adapter->netdev)) { + schedule_work(&adapter->reset_task); return 0; } - return igc_tsn_enable_offload(adapter); + err = igc_tsn_enable_offload(adapter); + if (err < 0) + return err; + + adapter->flags = igc_tsn_new_flags(adapter); + return 0; } diff --git a/drivers/net/ethernet/intel/igc/igc_tsn.h b/drivers/net/ethernet/intel/igc/igc_tsn.h index f76bc86ddccd..1512307f5a52 100644 --- a/drivers/net/ethernet/intel/igc/igc_tsn.h +++ b/drivers/net/ethernet/intel/igc/igc_tsn.h @@ -5,5 +5,6 @@ #define _IGC_TSN_H_ int igc_tsn_offload_apply(struct igc_adapter *adapter); +int igc_tsn_reset(struct igc_adapter *adapter); #endif /* _IGC_BASE_H */ diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 4ceaca0f6ce3..fc26e4ddeb0d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -2358,7 +2358,9 @@ static int ixgbe_set_phys_id(struct net_device *netdev, } static int ixgbe_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct ixgbe_adapter *adapter = netdev_priv(netdev); @@ -2412,7 +2414,9 @@ static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter) } static int ixgbe_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_q_vector *q_vector; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 14aea40da50f..24e06ba6f5e9 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -10247,7 +10247,7 @@ static const struct net_device_ops ixgbe_netdev_ops = { .ndo_set_tx_maxrate = ixgbe_tx_maxrate, .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, - .ndo_do_ioctl = ixgbe_ioctl, + .ndo_eth_ioctl = ixgbe_ioctl, .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac, .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw, diff --git a/drivers/net/ethernet/intel/ixgbevf/ethtool.c b/drivers/net/ethernet/intel/ixgbevf/ethtool.c index e49fb1cd9a99..8380f905e708 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ethtool.c +++ b/drivers/net/ethernet/intel/ixgbevf/ethtool.c @@ -787,7 +787,9 @@ static int ixgbevf_nway_reset(struct net_device *netdev) } static int ixgbevf_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); @@ -811,7 +813,9 @@ static int ixgbevf_get_coalesce(struct net_device *netdev, } static int ixgbevf_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct ixgbevf_adapter *adapter = netdev_priv(netdev); struct ixgbevf_q_vector *q_vector; diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c index f1b9284e0bea..1bdc4f23e1e5 100644 --- a/drivers/net/ethernet/jme.c +++ b/drivers/net/ethernet/jme.c @@ -734,17 +734,17 @@ jme_make_new_rx_buf(struct jme_adapter *jme, int i) if (unlikely(!skb)) return -ENOMEM; - mapping = pci_map_page(jme->pdev, virt_to_page(skb->data), + mapping = dma_map_page(&jme->pdev->dev, virt_to_page(skb->data), offset_in_page(skb->data), skb_tailroom(skb), - PCI_DMA_FROMDEVICE); - if (unlikely(pci_dma_mapping_error(jme->pdev, mapping))) { + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&jme->pdev->dev, mapping))) { dev_kfree_skb(skb); return -ENOMEM; } if (likely(rxbi->mapping)) - pci_unmap_page(jme->pdev, rxbi->mapping, - rxbi->len, PCI_DMA_FROMDEVICE); + dma_unmap_page(&jme->pdev->dev, rxbi->mapping, rxbi->len, + DMA_FROM_DEVICE); rxbi->skb = skb; rxbi->len = skb_tailroom(skb); @@ -760,10 +760,8 @@ jme_free_rx_buf(struct jme_adapter *jme, int i) rxbi += i; if (rxbi->skb) { - pci_unmap_page(jme->pdev, - rxbi->mapping, - rxbi->len, - PCI_DMA_FROMDEVICE); + dma_unmap_page(&jme->pdev->dev, rxbi->mapping, rxbi->len, + DMA_FROM_DEVICE); dev_kfree_skb(rxbi->skb); rxbi->skb = NULL; rxbi->mapping = 0; @@ -1005,16 +1003,12 @@ jme_alloc_and_feed_skb(struct jme_adapter *jme, int idx) rxbi += idx; skb = rxbi->skb; - pci_dma_sync_single_for_cpu(jme->pdev, - rxbi->mapping, - rxbi->len, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_cpu(&jme->pdev->dev, rxbi->mapping, rxbi->len, + DMA_FROM_DEVICE); if (unlikely(jme_make_new_rx_buf(jme, idx))) { - pci_dma_sync_single_for_device(jme->pdev, - rxbi->mapping, - rxbi->len, - PCI_DMA_FROMDEVICE); + dma_sync_single_for_device(&jme->pdev->dev, rxbi->mapping, + rxbi->len, DMA_FROM_DEVICE); ++(NET_STAT(jme).rx_dropped); } else { @@ -1453,10 +1447,9 @@ static void jme_tx_clean_tasklet(struct tasklet_struct *t) ttxbi = txbi + ((i + j) & (mask)); txdesc[(i + j) & (mask)].dw[0] = 0; - pci_unmap_page(jme->pdev, - ttxbi->mapping, - ttxbi->len, - PCI_DMA_TODEVICE); + dma_unmap_page(&jme->pdev->dev, + ttxbi->mapping, ttxbi->len, + DMA_TO_DEVICE); ttxbi->mapping = 0; ttxbi->len = 0; @@ -1966,19 +1959,13 @@ jme_fill_tx_map(struct pci_dev *pdev, { dma_addr_t dmaaddr; - dmaaddr = pci_map_page(pdev, - page, - page_offset, - len, - PCI_DMA_TODEVICE); + dmaaddr = dma_map_page(&pdev->dev, page, page_offset, len, + DMA_TO_DEVICE); - if (unlikely(pci_dma_mapping_error(pdev, dmaaddr))) + if (unlikely(dma_mapping_error(&pdev->dev, dmaaddr))) return -EINVAL; - pci_dma_sync_single_for_device(pdev, - dmaaddr, - len, - PCI_DMA_TODEVICE); + dma_sync_single_for_device(&pdev->dev, dmaaddr, len, DMA_TO_DEVICE); txdesc->dw[0] = 0; txdesc->dw[1] = 0; @@ -2003,10 +1990,8 @@ static void jme_drop_tx_map(struct jme_adapter *jme, int startidx, int count) for (j = 0 ; j < count ; j++) { ctxbi = txbi + ((startidx + j + 2) & (mask)); - pci_unmap_page(jme->pdev, - ctxbi->mapping, - ctxbi->len, - PCI_DMA_TODEVICE); + dma_unmap_page(&jme->pdev->dev, ctxbi->mapping, ctxbi->len, + DMA_TO_DEVICE); ctxbi->mapping = 0; ctxbi->len = 0; @@ -2400,8 +2385,10 @@ jme_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) mdio_memcpy(jme, p32, JME_PHY_REG_NR); } -static int -jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd) +static int jme_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ecmd, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct jme_adapter *jme = netdev_priv(netdev); @@ -2437,8 +2424,10 @@ jme_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd) return 0; } -static int -jme_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd) +static int jme_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ecmd, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct jme_adapter *jme = netdev_priv(netdev); struct dynpcc_info *dpi = &(jme->dpi); @@ -2859,18 +2848,15 @@ static int jme_pci_dma64(struct pci_dev *pdev) { if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && - !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) - if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) - return 1; + !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) + return 1; if (pdev->device == PCI_DEVICE_ID_JMICRON_JMC250 && - !pci_set_dma_mask(pdev, DMA_BIT_MASK(40))) - if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40))) - return 1; + !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(40))) + return 1; - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) - if (!pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) - return 0; + if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) + return 0; return -1; } @@ -2901,7 +2887,7 @@ static const struct net_device_ops jme_netdev_ops = { .ndo_open = jme_open, .ndo_stop = jme_close, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = jme_ioctl, + .ndo_eth_ioctl = jme_ioctl, .ndo_start_xmit = jme_start_xmit, .ndo_set_mac_address = jme_set_macaddr, .ndo_set_rx_mode = jme_set_multi, diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index b30a45725374..3e9f324f1061 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -1272,7 +1272,7 @@ static const struct net_device_ops korina_netdev_ops = { .ndo_start_xmit = korina_send_packet, .ndo_set_rx_mode = korina_multicast_list, .ndo_tx_timeout = korina_tx_timeout, - .ndo_do_ioctl = korina_ioctl, + .ndo_eth_ioctl = korina_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 2d0c52f7106b..62f8c5212182 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -609,7 +609,7 @@ static const struct net_device_ops ltq_eth_netdev_ops = { .ndo_stop = ltq_etop_stop, .ndo_start_xmit = ltq_etop_tx, .ndo_change_mtu = ltq_etop_change_mtu, - .ndo_do_ioctl = phy_do_ioctl, + .ndo_eth_ioctl = phy_do_ioctl, .ndo_set_mac_address = ltq_etop_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = ltq_etop_set_multicast_list, diff --git a/drivers/net/ethernet/litex/Kconfig b/drivers/net/ethernet/litex/Kconfig new file mode 100644 index 000000000000..63bf01d28f0c --- /dev/null +++ b/drivers/net/ethernet/litex/Kconfig @@ -0,0 +1,28 @@ +# +# LiteX device configuration +# + +config NET_VENDOR_LITEX + bool "LiteX devices" + default y + help + If you have a network (Ethernet) card belonging to this class, say Y. + + Note that the answer to this question doesn't directly affect the + kernel: saying N will just cause the configurator to skip all + the questions about LiteX devices. If you say Y, you will be asked + for your specific card in the following questions. + +if NET_VENDOR_LITEX + +config LITEX_LITEETH + tristate "LiteX Ethernet support" + depends on OF_NET + help + If you wish to compile a kernel for hardware with a LiteX LiteEth + device then you should answer Y to this. + + LiteX is a soft system-on-chip that targets FPGAs. LiteETH is a basic + network device that is commonly used in LiteX designs. + +endif # NET_VENDOR_LITEX diff --git a/drivers/net/ethernet/litex/Makefile b/drivers/net/ethernet/litex/Makefile new file mode 100644 index 000000000000..9343b73b8e49 --- /dev/null +++ b/drivers/net/ethernet/litex/Makefile @@ -0,0 +1,5 @@ +# +# Makefile for the LiteX network device drivers. +# + +obj-$(CONFIG_LITEX_LITEETH) += litex_liteeth.o diff --git a/drivers/net/ethernet/litex/litex_liteeth.c b/drivers/net/ethernet/litex/litex_liteeth.c new file mode 100644 index 000000000000..a9bdbf0dcfe1 --- /dev/null +++ b/drivers/net/ethernet/litex/litex_liteeth.c @@ -0,0 +1,314 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * LiteX Liteeth Ethernet + * + * Copyright 2017 Joel Stanley <joel@jms.id.au> + * + */ + +#include <linux/etherdevice.h> +#include <linux/interrupt.h> +#include <linux/litex.h> +#include <linux/module.h> +#include <linux/of_net.h> +#include <linux/platform_device.h> + +#define LITEETH_WRITER_SLOT 0x00 +#define LITEETH_WRITER_LENGTH 0x04 +#define LITEETH_WRITER_ERRORS 0x08 +#define LITEETH_WRITER_EV_STATUS 0x0C +#define LITEETH_WRITER_EV_PENDING 0x10 +#define LITEETH_WRITER_EV_ENABLE 0x14 +#define LITEETH_READER_START 0x18 +#define LITEETH_READER_READY 0x1C +#define LITEETH_READER_LEVEL 0x20 +#define LITEETH_READER_SLOT 0x24 +#define LITEETH_READER_LENGTH 0x28 +#define LITEETH_READER_EV_STATUS 0x2C +#define LITEETH_READER_EV_PENDING 0x30 +#define LITEETH_READER_EV_ENABLE 0x34 +#define LITEETH_PREAMBLE_CRC 0x38 +#define LITEETH_PREAMBLE_ERRORS 0x3C +#define LITEETH_CRC_ERRORS 0x40 + +#define LITEETH_PHY_CRG_RESET 0x00 +#define LITEETH_MDIO_W 0x04 +#define LITEETH_MDIO_R 0x0C + +#define DRV_NAME "liteeth" + +struct liteeth { + void __iomem *base; + struct net_device *netdev; + struct device *dev; + u32 slot_size; + + /* Tx */ + u32 tx_slot; + u32 num_tx_slots; + void __iomem *tx_base; + + /* Rx */ + u32 rx_slot; + u32 num_rx_slots; + void __iomem *rx_base; +}; + +static int liteeth_rx(struct net_device *netdev) +{ + struct liteeth *priv = netdev_priv(netdev); + struct sk_buff *skb; + unsigned char *data; + u8 rx_slot; + int len; + + rx_slot = litex_read8(priv->base + LITEETH_WRITER_SLOT); + len = litex_read32(priv->base + LITEETH_WRITER_LENGTH); + + if (len == 0 || len > 2048) + goto rx_drop; + + skb = netdev_alloc_skb_ip_align(netdev, len); + if (!skb) { + netdev_err(netdev, "couldn't get memory\n"); + goto rx_drop; + } + + data = skb_put(skb, len); + memcpy_fromio(data, priv->rx_base + rx_slot * priv->slot_size, len); + skb->protocol = eth_type_trans(skb, netdev); + + netdev->stats.rx_packets++; + netdev->stats.rx_bytes += len; + + return netif_rx(skb); + +rx_drop: + netdev->stats.rx_dropped++; + netdev->stats.rx_errors++; + + return NET_RX_DROP; +} + +static irqreturn_t liteeth_interrupt(int irq, void *dev_id) +{ + struct net_device *netdev = dev_id; + struct liteeth *priv = netdev_priv(netdev); + u8 reg; + + reg = litex_read8(priv->base + LITEETH_READER_EV_PENDING); + if (reg) { + if (netif_queue_stopped(netdev)) + netif_wake_queue(netdev); + litex_write8(priv->base + LITEETH_READER_EV_PENDING, reg); + } + + reg = litex_read8(priv->base + LITEETH_WRITER_EV_PENDING); + if (reg) { + liteeth_rx(netdev); + litex_write8(priv->base + LITEETH_WRITER_EV_PENDING, reg); + } + + return IRQ_HANDLED; +} + +static int liteeth_open(struct net_device *netdev) +{ + struct liteeth *priv = netdev_priv(netdev); + int err; + + /* Clear pending events */ + litex_write8(priv->base + LITEETH_WRITER_EV_PENDING, 1); + litex_write8(priv->base + LITEETH_READER_EV_PENDING, 1); + + err = request_irq(netdev->irq, liteeth_interrupt, 0, netdev->name, netdev); + if (err) { + netdev_err(netdev, "failed to request irq %d\n", netdev->irq); + return err; + } + + /* Enable IRQs */ + litex_write8(priv->base + LITEETH_WRITER_EV_ENABLE, 1); + litex_write8(priv->base + LITEETH_READER_EV_ENABLE, 1); + + netif_carrier_on(netdev); + netif_start_queue(netdev); + + return 0; +} + +static int liteeth_stop(struct net_device *netdev) +{ + struct liteeth *priv = netdev_priv(netdev); + + netif_stop_queue(netdev); + netif_carrier_off(netdev); + + litex_write8(priv->base + LITEETH_WRITER_EV_ENABLE, 0); + litex_write8(priv->base + LITEETH_READER_EV_ENABLE, 0); + + free_irq(netdev->irq, netdev); + + return 0; +} + +static int liteeth_start_xmit(struct sk_buff *skb, struct net_device *netdev) +{ + struct liteeth *priv = netdev_priv(netdev); + void __iomem *txbuffer; + + if (!litex_read8(priv->base + LITEETH_READER_READY)) { + if (net_ratelimit()) + netdev_err(netdev, "LITEETH_READER_READY not ready\n"); + + netif_stop_queue(netdev); + + return NETDEV_TX_BUSY; + } + + /* Reject oversize packets */ + if (unlikely(skb->len > priv->slot_size)) { + if (net_ratelimit()) + netdev_err(netdev, "tx packet too big\n"); + + dev_kfree_skb_any(skb); + netdev->stats.tx_dropped++; + netdev->stats.tx_errors++; + + return NETDEV_TX_OK; + } + + txbuffer = priv->tx_base + priv->tx_slot * priv->slot_size; + memcpy_toio(txbuffer, skb->data, skb->len); + litex_write8(priv->base + LITEETH_READER_SLOT, priv->tx_slot); + litex_write16(priv->base + LITEETH_READER_LENGTH, skb->len); + litex_write8(priv->base + LITEETH_READER_START, 1); + + netdev->stats.tx_bytes += skb->len; + netdev->stats.tx_packets++; + + priv->tx_slot = (priv->tx_slot + 1) % priv->num_tx_slots; + dev_kfree_skb_any(skb); + + return NETDEV_TX_OK; +} + +static const struct net_device_ops liteeth_netdev_ops = { + .ndo_open = liteeth_open, + .ndo_stop = liteeth_stop, + .ndo_start_xmit = liteeth_start_xmit, +}; + +static void liteeth_setup_slots(struct liteeth *priv) +{ + struct device_node *np = priv->dev->of_node; + int err; + + err = of_property_read_u32(np, "litex,rx-slots", &priv->num_rx_slots); + if (err) { + dev_dbg(priv->dev, "unable to get litex,rx-slots, using 2\n"); + priv->num_rx_slots = 2; + } + + err = of_property_read_u32(np, "litex,tx-slots", &priv->num_tx_slots); + if (err) { + dev_dbg(priv->dev, "unable to get litex,tx-slots, using 2\n"); + priv->num_tx_slots = 2; + } + + err = of_property_read_u32(np, "litex,slot-size", &priv->slot_size); + if (err) { + dev_dbg(priv->dev, "unable to get litex,slot-size, using 0x800\n"); + priv->slot_size = 0x800; + } +} + +static int liteeth_probe(struct platform_device *pdev) +{ + struct net_device *netdev; + void __iomem *buf_base; + struct liteeth *priv; + int irq, err; + + netdev = devm_alloc_etherdev(&pdev->dev, sizeof(*priv)); + if (!netdev) + return -ENOMEM; + + SET_NETDEV_DEV(netdev, &pdev->dev); + platform_set_drvdata(pdev, netdev); + + priv = netdev_priv(netdev); + priv->netdev = netdev; + priv->dev = &pdev->dev; + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "Failed to get IRQ %d\n", irq); + return irq; + } + netdev->irq = irq; + + priv->base = devm_platform_ioremap_resource_byname(pdev, "mac"); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + buf_base = devm_platform_ioremap_resource_byname(pdev, "buffer"); + if (IS_ERR(buf_base)) + return PTR_ERR(buf_base); + + liteeth_setup_slots(priv); + + /* Rx slots */ + priv->rx_base = buf_base; + priv->rx_slot = 0; + + /* Tx slots come after Rx slots */ + priv->tx_base = buf_base + priv->num_rx_slots * priv->slot_size; + priv->tx_slot = 0; + + err = of_get_mac_address(pdev->dev.of_node, netdev->dev_addr); + if (err) + eth_hw_addr_random(netdev); + + netdev->netdev_ops = &liteeth_netdev_ops; + + err = register_netdev(netdev); + if (err) { + dev_err(&pdev->dev, "Failed to register netdev %d\n", err); + return err; + } + + netdev_info(netdev, "irq %d slots: tx %d rx %d size %d\n", + netdev->irq, priv->num_tx_slots, priv->num_rx_slots, priv->slot_size); + + return 0; +} + +static int liteeth_remove(struct platform_device *pdev) +{ + struct net_device *netdev = platform_get_drvdata(pdev); + + unregister_netdev(netdev); + free_netdev(netdev); + + return 0; +} + +static const struct of_device_id liteeth_of_match[] = { + { .compatible = "litex,liteeth" }, + { } +}; +MODULE_DEVICE_TABLE(of, liteeth_of_match); + +static struct platform_driver liteeth_driver = { + .probe = liteeth_probe, + .remove = liteeth_remove, + .driver = { + .name = DRV_NAME, + .of_match_table = liteeth_of_match, + }, +}; +module_platform_driver(liteeth_driver); + +MODULE_AUTHOR("Joel Stanley <joel@jms.id.au>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index d207bfcaf31d..28d5ad296646 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -1611,8 +1611,10 @@ static void mv643xx_eth_get_drvinfo(struct net_device *dev, strlcpy(drvinfo->bus_info, "platform", sizeof(drvinfo->bus_info)); } -static int -mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) +static int mv643xx_eth_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct mv643xx_eth_private *mp = netdev_priv(dev); @@ -1622,8 +1624,10 @@ mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) return 0; } -static int -mv643xx_eth_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) +static int mv643xx_eth_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct mv643xx_eth_private *mp = netdev_priv(dev); @@ -3060,7 +3064,7 @@ static const struct net_device_ops mv643xx_eth_netdev_ops = { .ndo_set_rx_mode = mv643xx_eth_set_rx_mode, .ndo_set_mac_address = mv643xx_eth_set_mac_address, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = mv643xx_eth_ioctl, + .ndo_eth_ioctl = mv643xx_eth_ioctl, .ndo_change_mtu = mv643xx_eth_change_mtu, .ndo_set_features = mv643xx_eth_set_features, .ndo_tx_timeout = mv643xx_eth_tx_timeout, diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 76a7777c746d..9d460a270601 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -105,7 +105,7 @@ #define MVNETA_VLAN_PRIO_TO_RXQ 0x2440 #define MVNETA_VLAN_PRIO_RXQ_MAP(prio, rxq) ((rxq) << ((prio) * 3)) #define MVNETA_PORT_STATUS 0x2444 -#define MVNETA_TX_IN_PRGRS BIT(1) +#define MVNETA_TX_IN_PRGRS BIT(0) #define MVNETA_TX_FIFO_EMPTY BIT(8) #define MVNETA_RX_MIN_FRAME_SIZE 0x247c /* Only exists on Armada XP and Armada 370 */ @@ -2327,7 +2327,7 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool, if (!skb) return ERR_PTR(-ENOMEM); - skb_mark_for_recycle(skb, virt_to_page(xdp->data), pool); + skb_mark_for_recycle(skb); skb_reserve(skb, xdp->data - xdp->data_hard_start); skb_put(skb, xdp->data_end - xdp->data); @@ -2339,10 +2339,6 @@ mvneta_swbm_build_skb(struct mvneta_port *pp, struct page_pool *pool, skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, skb_frag_page(frag), skb_frag_off(frag), skb_frag_size(frag), PAGE_SIZE); - /* We don't need to reset pp_recycle here. It's already set, so - * just mark fragments for recycling. - */ - page_pool_store_mem_info(skb_frag_page(frag), pool); } return skb; @@ -2666,7 +2662,7 @@ static int mvneta_tx_tso(struct sk_buff *skb, struct net_device *dev, return 0; if (skb_headlen(skb) < (skb_transport_offset(skb) + tcp_hdrlen(skb))) { - pr_info("*** Is this even possible???!?!?\n"); + pr_info("*** Is this even possible?\n"); return 0; } @@ -3832,12 +3828,20 @@ static void mvneta_validate(struct phylink_config *config, struct mvneta_port *pp = netdev_priv(ndev); __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; - /* We only support QSGMII, SGMII, 802.3z and RGMII modes */ - if (state->interface != PHY_INTERFACE_MODE_NA && - state->interface != PHY_INTERFACE_MODE_QSGMII && - state->interface != PHY_INTERFACE_MODE_SGMII && - !phy_interface_mode_is_8023z(state->interface) && - !phy_interface_mode_is_rgmii(state->interface)) { + /* We only support QSGMII, SGMII, 802.3z and RGMII modes. + * When in 802.3z mode, we must have AN enabled: + * "Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ... + * When <PortType> = 1 (1000BASE-X) this field must be set to 1." + */ + if (phy_interface_mode_is_8023z(state->interface)) { + if (!phylink_test(state->advertising, Autoneg)) { + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + return; + } + } else if (state->interface != PHY_INTERFACE_MODE_NA && + state->interface != PHY_INTERFACE_MODE_QSGMII && + state->interface != PHY_INTERFACE_MODE_SGMII && + !phy_interface_mode_is_rgmii(state->interface)) { bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); return; } @@ -4496,8 +4500,11 @@ static int mvneta_ethtool_nway_reset(struct net_device *dev) } /* Set interrupt coalescing for ethtools */ -static int mvneta_ethtool_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *c) +static int +mvneta_ethtool_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *c, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct mvneta_port *pp = netdev_priv(dev); int queue; @@ -4520,8 +4527,11 @@ static int mvneta_ethtool_set_coalesce(struct net_device *dev, } /* get coalescing for ethtools */ -static int mvneta_ethtool_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *c) +static int +mvneta_ethtool_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *c, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct mvneta_port *pp = netdev_priv(dev); @@ -4986,7 +4996,7 @@ static const struct net_device_ops mvneta_netdev_ops = { .ndo_change_mtu = mvneta_change_mtu, .ndo_fix_features = mvneta_fix_features, .ndo_get_stats64 = mvneta_get_stats64, - .ndo_do_ioctl = mvneta_ioctl, + .ndo_eth_ioctl = mvneta_ioctl, .ndo_bpf = mvneta_xdp, .ndo_xdp_xmit = mvneta_xdp_xmit, .ndo_setup_tc = mvneta_setup_tc, diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 3229bafa2a2c..d5c92e43f89e 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -3995,7 +3995,7 @@ static int mvpp2_rx(struct mvpp2_port *port, struct napi_struct *napi, } if (pp) - skb_mark_for_recycle(skb, page, pp); + skb_mark_for_recycle(skb); else dma_unmap_single_attrs(dev->dev.parent, dma_addr, bm_pool->buf_size, DMA_FROM_DEVICE, @@ -5367,8 +5367,11 @@ static int mvpp2_ethtool_nway_reset(struct net_device *dev) } /* Set interrupt coalescing for ethtools */ -static int mvpp2_ethtool_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *c) +static int +mvpp2_ethtool_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *c, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct mvpp2_port *port = netdev_priv(dev); int queue; @@ -5400,8 +5403,11 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev, } /* get coalescing for ethtools */ -static int mvpp2_ethtool_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *c) +static int +mvpp2_ethtool_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *c, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct mvpp2_port *port = netdev_priv(dev); @@ -5702,7 +5708,7 @@ static const struct net_device_ops mvpp2_netdev_ops = { .ndo_set_mac_address = mvpp2_set_mac_address, .ndo_change_mtu = mvpp2_change_mtu, .ndo_get_stats64 = mvpp2_get_stats64, - .ndo_do_ioctl = mvpp2_ioctl, + .ndo_eth_ioctl = mvpp2_ioctl, .ndo_vlan_rx_add_vid = mvpp2_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = mvpp2_vlan_rx_kill_vid, .ndo_set_features = mvpp2_set_features, @@ -6269,6 +6275,15 @@ static void mvpp2_phylink_validate(struct phylink_config *config, if (!mvpp2_port_supports_rgmii(port)) goto empty_set; break; + case PHY_INTERFACE_MODE_1000BASEX: + case PHY_INTERFACE_MODE_2500BASEX: + /* When in 802.3z mode, we must have AN enabled: + * Bit 2 Field InBandAnEn In-band Auto-Negotiation enable. ... + * When <PortType> = 1 (1000BASE-X) this field must be set to 1. + */ + if (!phylink_test(state->advertising, Autoneg)) + goto empty_set; + break; default: break; } diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig index 16caa02095fe..3f982ccf2c85 100644 --- a/drivers/net/ethernet/marvell/octeontx2/Kconfig +++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only # -# Marvell OcteonTX2 drivers configuration +# Marvell RVU Network drivers configuration # config OCTEONTX2_MBOX @@ -12,6 +12,7 @@ config OCTEONTX2_AF select NET_DEVLINK depends on (64BIT && COMPILE_TEST) || ARM64 depends on PCI + depends on PTP_1588_CLOCK_OPTIONAL help This driver supports Marvell's OcteonTX2 Resource Virtualization Unit's admin function manager which manages all RVU HW resources @@ -32,6 +33,7 @@ config OCTEONTX2_PF select OCTEONTX2_MBOX depends on (64BIT && COMPILE_TEST) || ARM64 depends on PCI + depends on PTP_1588_CLOCK_OPTIONAL help This driver supports Marvell's OcteonTX2 NIC physical function. diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile index cc8ac36cf687..7f4a4ca9af78 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 # -# Makefile for Marvell's OcteonTX2 RVU Admin Function driver +# Makefile for Marvell's RVU Admin Function driver # ccflags-y += -I$(src) @@ -10,4 +10,5 @@ obj-$(CONFIG_OCTEONTX2_AF) += rvu_af.o rvu_mbox-y := mbox.o rvu_trace.o rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \ rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \ - rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o + rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o \ + rvu_sdp.o diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index 544c96c8fe1d..7f3d01059e19 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* Marvell OcteonTx2 CGX driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/acpi.h> diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h index 237ba2b56210..ab1e4abdea38 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h @@ -1,11 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 CGX driver +/* Marvell OcteonTx2 CGX driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef CGX_H diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h index aa4e42f78f13..f72ec0e2506f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h @@ -1,11 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 CGX driver +/* Marvell OcteonTx2 CGX driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef __CGX_FW_INTF_H__ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h index 47f5ed006a93..d9bea13f15b8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h @@ -1,11 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. + * Copyright (C) 2018 Marvell. */ #ifndef COMMON_H @@ -64,8 +60,8 @@ static inline int qmem_alloc(struct device *dev, struct qmem **q, qmem->entry_sz = entry_sz; qmem->alloc_sz = (qsize * entry_sz) + OTX2_ALIGN; - qmem->base = dma_alloc_coherent(dev, qmem->alloc_sz, - &qmem->iova, GFP_KERNEL); + qmem->base = dma_alloc_attrs(dev, qmem->alloc_sz, &qmem->iova, + GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS); if (!qmem->base) return -ENOMEM; @@ -84,9 +80,10 @@ static inline void qmem_free(struct device *dev, struct qmem *qmem) return; if (qmem->base) - dma_free_coherent(dev, qmem->alloc_sz, - qmem->base - qmem->align, - qmem->iova - qmem->align); + dma_free_attrs(dev, qmem->alloc_sz, + qmem->base - qmem->align, + qmem->iova - qmem->align, + DMA_ATTR_FORCE_CONTIGUOUS); devm_kfree(dev, qmem); } @@ -146,10 +143,7 @@ enum nix_scheduler { #define TXSCH_RR_QTM_MAX ((1 << 24) - 1) #define TXSCH_TL1_DFLT_RR_QTM TXSCH_RR_QTM_MAX #define TXSCH_TL1_DFLT_RR_PRIO (0x1ull) -#define MAX_SCHED_WEIGHT 0xFF -#define DFLT_RR_WEIGHT 71 -#define DFLT_RR_QTM ((DFLT_RR_WEIGHT * TXSCH_RR_QTM_MAX) \ - / MAX_SCHED_WEIGHT) +#define CN10K_MAX_DWRR_WEIGHT 16384 /* Weight is 14bit on CN10K */ /* Min/Max packet sizes, excluding FCS */ #define NIC_HW_MIN_FRS 40 @@ -187,15 +181,16 @@ enum nix_scheduler { #define NIX_INTF_TYPE_CGX 0 #define NIX_INTF_TYPE_LBK 1 +#define NIX_INTF_TYPE_SDP 2 #define MAX_LMAC_PKIND 12 #define NIX_LINK_CGX_LMAC(a, b) (0 + 4 * (a) + (b)) #define NIX_LINK_LBK(a) (12 + (a)) #define NIX_CHAN_CGX_LMAC_CHX(a, b, c) (0x800 + 0x100 * (a) + 0x10 * (b) + (c)) #define NIX_CHAN_LBK_CHX(a, b) (0 + 0x100 * (a) + (b)) -#define NIX_CHAN_SDP_CH_START (0x700ull) - -#define SDP_CHANNELS 256 +#define NIX_CHAN_SDP_CH_START (0x700ull) +#define NIX_CHAN_SDP_CHX(a) (NIX_CHAN_SDP_CH_START + (a)) +#define NIX_CHAN_SDP_NUM_CHANS 256 /* The mask is to extract lower 10-bits of channel number * which CPT will pass to X2P. diff --git a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h index a8b7b1c7a1d5..c38306b3384a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/lmac_common.h @@ -1,7 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RPM driver +/* Marvell CN10K RPM driver * * Copyright (C) 2020 Marvell. + * */ #ifndef LMAC_COMMON_H diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c index 0a37ca96aab8..2898931d5260 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/module.h> @@ -412,5 +409,5 @@ const char *otx2_mbox_id2name(u16 id) } EXPORT_SYMBOL(otx2_mbox_id2name); -MODULE_AUTHOR("Marvell International Ltd."); +MODULE_AUTHOR("Marvell."); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index f5ec39de026a..154877706a0e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -1,11 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef MBOX_H @@ -87,7 +84,7 @@ struct mbox_msghdr { #define OTX2_MBOX_REQ_SIG (0xdead) #define OTX2_MBOX_RSP_SIG (0xbeef) u16 sig; /* Signature, for validating corrupted msgs */ -#define OTX2_MBOX_VERSION (0x0007) +#define OTX2_MBOX_VERSION (0x0009) u16 ver; /* Version of msg's structure for this ID */ u16 next_msgoff; /* Offset of next msg within mailbox region */ int rc; /* Msg process'ed response code */ @@ -130,6 +127,7 @@ static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox, M(READY, 0x001, ready, msg_req, ready_msg_rsp) \ M(ATTACH_RESOURCES, 0x002, attach_resources, rsrc_attach, msg_rsp) \ M(DETACH_RESOURCES, 0x003, detach_resources, rsrc_detach, msg_rsp) \ +M(FREE_RSRC_CNT, 0x004, free_rsrc_cnt, msg_req, free_rsrcs_rsp) \ M(MSIX_OFFSET, 0x005, msix_offset, msg_req, msix_offset_rsp) \ M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \ M(PTP_OP, 0x007, ptp_op, ptp_req, ptp_rsp) \ @@ -191,6 +189,9 @@ M(CPT_RD_WR_REGISTER, 0xA02, cpt_rd_wr_register, cpt_rd_wr_reg_msg, \ M(CPT_STATS, 0xA05, cpt_sts, cpt_sts_req, cpt_sts_rsp) \ M(CPT_RXC_TIME_CFG, 0xA06, cpt_rxc_time_cfg, cpt_rxc_time_cfg_req, \ msg_rsp) \ +/* SDP mbox IDs (range 0x1000 - 0x11FF) */ \ +M(SET_SDP_CHAN_INFO, 0x1000, set_sdp_chan_info, sdp_chan_info_msg, msg_rsp) \ +M(GET_SDP_CHAN_INFO, 0x1001, get_sdp_chan_info, msg_req, sdp_get_chan_info_msg) \ /* NPC mbox IDs (range 0x6000 - 0x7FFF) */ \ M(NPC_MCAM_ALLOC_ENTRY, 0x6000, npc_mcam_alloc_entry, npc_mcam_alloc_entry_req,\ npc_mcam_alloc_entry_rsp) \ @@ -243,7 +244,8 @@ M(NIX_HWCTX_DISABLE, 0x8003, nix_hwctx_disable, \ M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc, \ nix_txsch_alloc_req, nix_txsch_alloc_rsp) \ M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free, nix_txsch_free_req, msg_rsp) \ -M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_cfg, nix_txschq_config, msg_rsp) \ +M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_cfg, nix_txschq_config, \ + nix_txschq_config) \ M(NIX_STATS_RST, 0x8007, nix_stats_rst, msg_req, msg_rsp) \ M(NIX_VTAG_CFG, 0x8008, nix_vtag_cfg, nix_vtag_config, \ nix_vtag_config_rsp) \ @@ -268,13 +270,15 @@ M(NIX_BP_ENABLE, 0x8016, nix_bp_enable, nix_bp_cfg_req, \ nix_bp_cfg_rsp) \ M(NIX_BP_DISABLE, 0x8017, nix_bp_disable, nix_bp_cfg_req, msg_rsp) \ M(NIX_GET_MAC_ADDR, 0x8018, nix_get_mac_addr, msg_req, nix_get_mac_addr_rsp) \ -M(NIX_CN10K_AQ_ENQ, 0x8019, nix_cn10k_aq_enq, nix_cn10k_aq_enq_req, \ +M(NIX_CN10K_AQ_ENQ, 0x801b, nix_cn10k_aq_enq, nix_cn10k_aq_enq_req, \ nix_cn10k_aq_enq_rsp) \ M(NIX_GET_HW_INFO, 0x801c, nix_get_hw_info, msg_req, nix_hw_info) \ M(NIX_BANDPROF_ALLOC, 0x801d, nix_bandprof_alloc, nix_bandprof_alloc_req, \ nix_bandprof_alloc_rsp) \ M(NIX_BANDPROF_FREE, 0x801e, nix_bandprof_free, nix_bandprof_free_req, \ - msg_rsp) + msg_rsp) \ +M(NIX_BANDPROF_GET_HWINFO, 0x801f, nix_bandprof_get_hwinfo, msg_req, \ + nix_bandprof_get_hwinfo_rsp) /* Messages initiated by AF (range 0xC00 - 0xDFF) */ #define MBOX_UP_CGX_MESSAGES \ @@ -363,6 +367,25 @@ struct rsrc_detach { u8 cptlfs:1; }; +/* Number of resources available to the caller. + * In reply to MBOX_MSG_FREE_RSRC_CNT. + */ +struct free_rsrcs_rsp { + struct mbox_msghdr hdr; + u16 schq[NIX_TXSCH_LVL_CNT]; + u16 sso; + u16 tim; + u16 ssow; + u16 cpt; + u8 npa; + u8 nix; + u16 schq_nix1[NIX_TXSCH_LVL_CNT]; + u8 nix1; + u8 cpt1; + u8 ree0; + u8 ree1; +}; + #define MSIX_VECTOR_INVALID 0xFFFF #define MAX_RVU_BLKLF_CNT 256 @@ -370,16 +393,20 @@ struct msix_offset_rsp { struct mbox_msghdr hdr; u16 npa_msixoff; u16 nix_msixoff; - u8 sso; - u8 ssow; - u8 timlfs; - u8 cptlfs; + u16 sso; + u16 ssow; + u16 timlfs; + u16 cptlfs; u16 sso_msixoff[MAX_RVU_BLKLF_CNT]; u16 ssow_msixoff[MAX_RVU_BLKLF_CNT]; u16 timlf_msixoff[MAX_RVU_BLKLF_CNT]; u16 cptlf_msixoff[MAX_RVU_BLKLF_CNT]; - u8 cpt1_lfs; + u16 cpt1_lfs; + u16 ree0_lfs; + u16 ree1_lfs; u16 cpt1_lf_msixoff[MAX_RVU_BLKLF_CNT]; + u16 ree0_lf_msixoff[MAX_RVU_BLKLF_CNT]; + u16 ree1_lf_msixoff[MAX_RVU_BLKLF_CNT]; }; struct get_hw_cap_rsp { @@ -594,6 +621,7 @@ struct npa_lf_alloc_rsp { u32 stack_pg_ptrs; /* No of ptrs per stack page */ u32 stack_pg_bytes; /* Size of stack page */ u16 qints; /* NPA_AF_CONST::QINTS */ + u8 cache_lines; /*BATCH ALLOC DMA */ }; /* NPA AQ enqueue msg */ @@ -698,6 +726,9 @@ struct nix_lf_alloc_req { u16 sso_func; u64 rx_cfg; /* See NIX_AF_LF(0..127)_RX_CFG */ u64 way_mask; +#define NIX_LF_RSS_TAG_LSB_AS_ADDER BIT_ULL(0) +#define NIX_LF_LBK_BLK_SEL BIT_ULL(1) + u64 flags; }; struct nix_lf_alloc_rsp { @@ -717,6 +748,7 @@ struct nix_lf_alloc_rsp { u8 cgx_links; /* No. of CGX links present in HW */ u8 lbk_links; /* No. of LBK links present in HW */ u8 sdp_links; /* No. of SDP links present in HW */ + u8 tx_link; /* Transmit channel link number */ }; struct nix_lf_free_req { @@ -835,6 +867,7 @@ struct nix_txsch_free_req { struct nix_txschq_config { struct mbox_msghdr hdr; u8 lvl; /* SMQ/MDQ/TL4/TL3/TL2/TL1 */ + u8 read; #define TXSCHQ_IDX_SHIFT 16 #define TXSCHQ_IDX_MASK (BIT_ULL(10) - 1) #define TXSCHQ_IDX(reg, shift) (((reg) >> (shift)) & TXSCHQ_IDX_MASK) @@ -842,6 +875,8 @@ struct nix_txschq_config { #define MAX_REGS_PER_MBOX_MSG 20 u64 reg[MAX_REGS_PER_MBOX_MSG]; u64 regval[MAX_REGS_PER_MBOX_MSG]; + /* All 0's => overwrite with new value */ + u64 regval_mask[MAX_REGS_PER_MBOX_MSG]; }; struct nix_vtag_config { @@ -1032,8 +1067,12 @@ struct nix_bp_cfg_rsp { struct nix_hw_info { struct mbox_msghdr hdr; + u16 rsvs16; u16 max_mtu; u16 min_mtu; + u32 rpm_dwrr_mtu; + u32 sdp_dwrr_mtu; + u64 rsvd[16]; /* Add reserved fields for future expansion */ }; struct nix_bandprof_alloc_req { @@ -1061,6 +1100,12 @@ struct nix_bandprof_free_req { u16 prof_idx[BAND_PROF_NUM_LAYERS][MAX_BANDPROF_PER_PFFUNC]; }; +struct nix_bandprof_get_hwinfo_rsp { + struct mbox_msghdr hdr; + u16 prof_count[BAND_PROF_NUM_LAYERS]; + u32 policer_timeunit; +}; + /* NPC mbox message structs */ #define NPC_MCAM_ENTRY_INVALID 0xFFFF @@ -1074,6 +1119,13 @@ enum npc_af_status { NPC_MCAM_ALLOC_DENIED = -702, NPC_MCAM_ALLOC_FAILED = -703, NPC_MCAM_PERM_DENIED = -704, + NPC_FLOW_INTF_INVALID = -707, + NPC_FLOW_CHAN_INVALID = -708, + NPC_FLOW_NO_NIXLF = -709, + NPC_FLOW_NOT_SUPPORTED = -710, + NPC_FLOW_VF_PERM_DENIED = -711, + NPC_FLOW_VF_NOT_INIT = -712, + NPC_FLOW_VF_OVERLAP = -713, }; struct npc_mcam_alloc_entry_req { @@ -1328,6 +1380,10 @@ struct set_vf_perm { struct lmtst_tbl_setup_req { struct mbox_msghdr hdr; + u64 dis_sched_early_comp :1; + u64 sch_ena :1; + u64 dis_line_pref :1; + u64 ssow_pf_func :13; u16 base_pcifunc; u8 use_local_lmt_region; u64 lmt_iova; @@ -1422,4 +1478,34 @@ struct cpt_rxc_time_cfg_req { u16 active_limit; }; +struct sdp_node_info { + /* Node to which this PF belons to */ + u8 node_id; + u8 max_vfs; + u8 num_pf_rings; + u8 pf_srn; +#define SDP_MAX_VFS 128 + u8 vf_rings[SDP_MAX_VFS]; +}; + +struct sdp_chan_info_msg { + struct mbox_msghdr hdr; + struct sdp_node_info info; +}; + +struct sdp_get_chan_info_msg { + struct mbox_msghdr hdr; + u16 chan_base; + u16 num_chan; +}; + +/* CGX mailbox error codes + * Range 1101 - 1200. + */ +enum cgx_af_status { + LMAC_AF_ERR_INVALID_PARAM = -1101, + LMAC_AF_ERR_PF_NOT_MAPPED = -1102, + LMAC_AF_ERR_PERM_DENIED = -1103, +}; + #endif /* MBOX_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h index 243cf8070e77..3a819b24accc 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h @@ -1,11 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef NPC_H @@ -172,6 +169,8 @@ enum key_fields { NPC_DMAC, NPC_SMAC, NPC_ETYPE, + NPC_VLAN_ETYPE_CTAG, /* 0x8100 */ + NPC_VLAN_ETYPE_STAG, /* 0x88A8 */ NPC_OUTER_VID, NPC_TOS, NPC_SIP_IPV4, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h index fee655cc7523..588822a0cf21 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc_profile.h @@ -1,11 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef NPC_PROFILE_H diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c index 1ee37853f338..9b8e59f4c206 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 /* Marvell PTP driver * - * Copyright (C) 2020 Marvell International Ltd. + * Copyright (C) 2020 Marvell. + * */ #include <linux/bitfield.h> @@ -19,12 +20,11 @@ #define PCI_SUBSYS_DEVID_OCTX2_98xx_PTP 0xB100 #define PCI_SUBSYS_DEVID_OCTX2_96XX_PTP 0xB200 #define PCI_SUBSYS_DEVID_OCTX2_95XX_PTP 0xB300 -#define PCI_SUBSYS_DEVID_OCTX2_LOKI_PTP 0xB400 +#define PCI_SUBSYS_DEVID_OCTX2_95XXN_PTP 0xB400 #define PCI_SUBSYS_DEVID_OCTX2_95MM_PTP 0xB500 -#define PCI_SUBSYS_DEVID_CN10K_A_PTP 0xB900 -#define PCI_SUBSYS_DEVID_CNF10K_A_PTP 0xBA00 -#define PCI_SUBSYS_DEVID_CNF10K_B_PTP 0xBC00 +#define PCI_SUBSYS_DEVID_OCTX2_95XXO_PTP 0xB600 #define PCI_DEVID_OCTEONTX2_RST 0xA085 +#define PCI_DEVID_CN10K_PTP 0xA09E #define PCI_PTP_BAR_NO 0 #define PCI_RST_BAR_NO 0 @@ -39,6 +39,9 @@ #define RST_MUL_BITS GENMASK_ULL(38, 33) #define CLOCK_BASE_RATE 50000000ULL +static struct ptp *first_ptp_block; +static const struct pci_device_id ptp_id_table[]; + static u64 get_clock_rate(void) { u64 cfg, ret = CLOCK_BASE_RATE * 16; @@ -74,23 +77,14 @@ error: struct ptp *ptp_get(void) { - struct pci_dev *pdev; - struct ptp *ptp; + struct ptp *ptp = first_ptp_block; - /* If the PTP pci device is found on the system and ptp - * driver is bound to it then the PTP pci device is returned - * to the caller(rvu driver). - */ - pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, - PCI_DEVID_OCTEONTX2_PTP, NULL); - if (!pdev) + /* Check PTP block is present in hardware */ + if (!pci_dev_present(ptp_id_table)) return ERR_PTR(-ENODEV); - - ptp = pci_get_drvdata(pdev); + /* Check driver is bound to PTP block */ if (!ptp) ptp = ERR_PTR(-EPROBE_DEFER); - if (IS_ERR(ptp)) - pci_dev_put(pdev); return ptp; } @@ -190,6 +184,8 @@ static int ptp_probe(struct pci_dev *pdev, writeq(clock_comp, ptp->reg_base + PTP_CLOCK_COMP); pci_set_drvdata(pdev, ptp); + if (!first_ptp_block) + first_ptp_block = ptp; return 0; @@ -204,6 +200,9 @@ error: * `dev->driver_data`. */ pci_set_drvdata(pdev, ERR_PTR(err)); + if (!first_ptp_block) + first_ptp_block = ERR_PTR(err); + return 0; } @@ -233,19 +232,14 @@ static const struct pci_device_id ptp_id_table[] = { PCI_SUBSYS_DEVID_OCTX2_95XX_PTP) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP, PCI_VENDOR_ID_CAVIUM, - PCI_SUBSYS_DEVID_OCTX2_LOKI_PTP) }, + PCI_SUBSYS_DEVID_OCTX2_95XXN_PTP) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP, PCI_VENDOR_ID_CAVIUM, PCI_SUBSYS_DEVID_OCTX2_95MM_PTP) }, { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP, PCI_VENDOR_ID_CAVIUM, - PCI_SUBSYS_DEVID_CN10K_A_PTP) }, - { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP, - PCI_VENDOR_ID_CAVIUM, - PCI_SUBSYS_DEVID_CNF10K_A_PTP) }, - { PCI_DEVICE_SUB(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_PTP, - PCI_VENDOR_ID_CAVIUM, - PCI_SUBSYS_DEVID_CNF10K_B_PTP) }, + PCI_SUBSYS_DEVID_OCTX2_95XXO_PTP) }, + { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_CN10K_PTP) }, { 0, } }; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h index 878bc395d28f..76d404b24552 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/ptp.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/ptp.h @@ -1,7 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* Marvell PTP driver * - * Copyright (C) 2020 Marvell International Ltd. + * Copyright (C) 2020 Marvell. + * */ #ifndef PTP_H diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c index a91ccdc59403..07b0eafccad8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RPM driver +/* Marvell CN10K RPM driver * * Copyright (C) 2020 Marvell. * diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h index d32e74bd5964..f0b069442dcc 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rpm.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rpm.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RPM driver +/* Marvell CN10K RPM driver * * Copyright (C) 2020 Marvell. * diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index 5fe277e354f7..ce647e037f4d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/module.h> @@ -70,18 +67,21 @@ static void rvu_setup_hw_capabilities(struct rvu *rvu) hw->cap.nix_shaping = true; hw->cap.nix_tx_link_bp = true; hw->cap.nix_rx_multicast = true; + hw->cap.nix_shaper_toggle_wait = false; hw->rvu = rvu; - if (is_rvu_96xx_B0(rvu)) { + if (is_rvu_pre_96xx_C0(rvu)) { hw->cap.nix_fixed_txschq_mapping = true; hw->cap.nix_txsch_per_cgx_lmac = 4; hw->cap.nix_txsch_per_lbk_lmac = 132; hw->cap.nix_txsch_per_sdp_lmac = 76; hw->cap.nix_shaping = false; hw->cap.nix_tx_link_bp = false; - if (is_rvu_96xx_A0(rvu)) + if (is_rvu_96xx_A0(rvu) || is_rvu_95xx_A0(rvu)) hw->cap.nix_rx_multicast = false; } + if (!is_rvu_pre_96xx_C0(rvu)) + hw->cap.nix_shaper_toggle_wait = true; if (!is_rvu_otx2(rvu)) hw->cap.per_pf_mbox_regs = true; @@ -498,12 +498,15 @@ int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf) static void rvu_block_reset(struct rvu *rvu, int blkaddr, u64 rst_reg) { struct rvu_block *block = &rvu->hw->block[blkaddr]; + int err; if (!block->implemented) return; rvu_write64(rvu, blkaddr, rst_reg, BIT_ULL(0)); - rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true); + err = rvu_poll_reg(rvu, blkaddr, rst_reg, BIT_ULL(63), true); + if (err) + dev_err(rvu->dev, "HW block:%d reset failed\n", blkaddr); } static void rvu_reset_all_blocks(struct rvu *rvu) @@ -924,16 +927,26 @@ static int rvu_setup_hw_resources(struct rvu *rvu) block->lfreset_reg = NPA_AF_LF_RST; sprintf(block->name, "NPA"); err = rvu_alloc_bitmap(&block->lf); - if (err) + if (err) { + dev_err(rvu->dev, + "%s: Failed to allocate NPA LF bitmap\n", __func__); return err; + } nix: err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX0); - if (err) + if (err) { + dev_err(rvu->dev, + "%s: Failed to allocate NIX0 LFs bitmap\n", __func__); return err; + } + err = rvu_setup_nix_hw_resource(rvu, BLKADDR_NIX1); - if (err) + if (err) { + dev_err(rvu->dev, + "%s: Failed to allocate NIX1 LFs bitmap\n", __func__); return err; + } /* Init SSO group's bitmap */ block = &hw->block[BLKADDR_SSO]; @@ -953,8 +966,11 @@ nix: block->lfreset_reg = SSO_AF_LF_HWGRP_RST; sprintf(block->name, "SSO GROUP"); err = rvu_alloc_bitmap(&block->lf); - if (err) + if (err) { + dev_err(rvu->dev, + "%s: Failed to allocate SSO LF bitmap\n", __func__); return err; + } ssow: /* Init SSO workslot's bitmap */ @@ -974,8 +990,11 @@ ssow: block->lfreset_reg = SSOW_AF_LF_HWS_RST; sprintf(block->name, "SSOWS"); err = rvu_alloc_bitmap(&block->lf); - if (err) + if (err) { + dev_err(rvu->dev, + "%s: Failed to allocate SSOW LF bitmap\n", __func__); return err; + } tim: /* Init TIM LF's bitmap */ @@ -996,35 +1015,53 @@ tim: block->lfreset_reg = TIM_AF_LF_RST; sprintf(block->name, "TIM"); err = rvu_alloc_bitmap(&block->lf); - if (err) + if (err) { + dev_err(rvu->dev, + "%s: Failed to allocate TIM LF bitmap\n", __func__); return err; + } cpt: err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT0); - if (err) + if (err) { + dev_err(rvu->dev, + "%s: Failed to allocate CPT0 LF bitmap\n", __func__); return err; + } err = rvu_setup_cpt_hw_resource(rvu, BLKADDR_CPT1); - if (err) + if (err) { + dev_err(rvu->dev, + "%s: Failed to allocate CPT1 LF bitmap\n", __func__); return err; + } /* Allocate memory for PFVF data */ rvu->pf = devm_kcalloc(rvu->dev, hw->total_pfs, sizeof(struct rvu_pfvf), GFP_KERNEL); - if (!rvu->pf) + if (!rvu->pf) { + dev_err(rvu->dev, + "%s: Failed to allocate memory for PF's rvu_pfvf struct\n", __func__); return -ENOMEM; + } rvu->hwvf = devm_kcalloc(rvu->dev, hw->total_vfs, sizeof(struct rvu_pfvf), GFP_KERNEL); - if (!rvu->hwvf) + if (!rvu->hwvf) { + dev_err(rvu->dev, + "%s: Failed to allocate memory for VF's rvu_pfvf struct\n", __func__); return -ENOMEM; + } mutex_init(&rvu->rsrc_lock); rvu_fwdata_init(rvu); err = rvu_setup_msix_resources(rvu); - if (err) + if (err) { + dev_err(rvu->dev, + "%s: Failed to setup MSIX resources\n", __func__); return err; + } for (blkid = 0; blkid < BLK_COUNT; blkid++) { block = &hw->block[blkid]; @@ -1050,25 +1087,39 @@ cpt: goto msix_err; err = rvu_npc_init(rvu); - if (err) + if (err) { + dev_err(rvu->dev, "%s: Failed to initialize npc\n", __func__); goto npc_err; + } err = rvu_cgx_init(rvu); - if (err) + if (err) { + dev_err(rvu->dev, "%s: Failed to initialize cgx\n", __func__); goto cgx_err; + } /* Assign MACs for CGX mapped functions */ rvu_setup_pfvf_macaddress(rvu); err = rvu_npa_init(rvu); - if (err) + if (err) { + dev_err(rvu->dev, "%s: Failed to initialize npa\n", __func__); goto npa_err; + } rvu_get_lbk_bufsize(rvu); err = rvu_nix_init(rvu); - if (err) + if (err) { + dev_err(rvu->dev, "%s: Failed to initialize nix\n", __func__); goto nix_err; + } + + err = rvu_sdp_init(rvu); + if (err) { + dev_err(rvu->dev, "%s: Failed to initialize sdp\n", __func__); + goto nix_err; + } rvu_program_channels(rvu); @@ -1322,9 +1373,10 @@ int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc) int blkaddr = BLKADDR_NIX0, vf; struct rvu_pfvf *pf; + pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); + /* All CGX mapped PFs are set with assigned NIX block during init */ if (is_pf_cgxmapped(rvu, rvu_get_pf(pcifunc))) { - pf = rvu_get_pfvf(rvu, pcifunc & ~RVU_PFVF_FUNC_MASK); blkaddr = pf->nix_blkaddr; } else if (is_afvf(pcifunc)) { vf = pcifunc - 1; @@ -1337,6 +1389,10 @@ int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc) blkaddr = BLKADDR_NIX0; } + /* if SDP1 then the blkaddr is NIX1 */ + if (is_sdp_pfvf(pcifunc) && pf->sdp_info->node_id == 1) + blkaddr = BLKADDR_NIX1; + switch (blkaddr) { case BLKADDR_NIX1: pfvf->nix_blkaddr = BLKADDR_NIX1; @@ -1737,6 +1793,99 @@ int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req, return 0; } +int rvu_mbox_handler_free_rsrc_cnt(struct rvu *rvu, struct msg_req *req, + struct free_rsrcs_rsp *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + struct rvu_block *block; + struct nix_txsch *txsch; + struct nix_hw *nix_hw; + + mutex_lock(&rvu->rsrc_lock); + + block = &hw->block[BLKADDR_NPA]; + rsp->npa = rvu_rsrc_free_count(&block->lf); + + block = &hw->block[BLKADDR_NIX0]; + rsp->nix = rvu_rsrc_free_count(&block->lf); + + block = &hw->block[BLKADDR_NIX1]; + rsp->nix1 = rvu_rsrc_free_count(&block->lf); + + block = &hw->block[BLKADDR_SSO]; + rsp->sso = rvu_rsrc_free_count(&block->lf); + + block = &hw->block[BLKADDR_SSOW]; + rsp->ssow = rvu_rsrc_free_count(&block->lf); + + block = &hw->block[BLKADDR_TIM]; + rsp->tim = rvu_rsrc_free_count(&block->lf); + + block = &hw->block[BLKADDR_CPT0]; + rsp->cpt = rvu_rsrc_free_count(&block->lf); + + block = &hw->block[BLKADDR_CPT1]; + rsp->cpt1 = rvu_rsrc_free_count(&block->lf); + + if (rvu->hw->cap.nix_fixed_txschq_mapping) { + rsp->schq[NIX_TXSCH_LVL_SMQ] = 1; + rsp->schq[NIX_TXSCH_LVL_TL4] = 1; + rsp->schq[NIX_TXSCH_LVL_TL3] = 1; + rsp->schq[NIX_TXSCH_LVL_TL2] = 1; + /* NIX1 */ + if (!is_block_implemented(rvu->hw, BLKADDR_NIX1)) + goto out; + rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] = 1; + rsp->schq_nix1[NIX_TXSCH_LVL_TL4] = 1; + rsp->schq_nix1[NIX_TXSCH_LVL_TL3] = 1; + rsp->schq_nix1[NIX_TXSCH_LVL_TL2] = 1; + } else { + nix_hw = get_nix_hw(hw, BLKADDR_NIX0); + txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; + rsp->schq[NIX_TXSCH_LVL_SMQ] = + rvu_rsrc_free_count(&txsch->schq); + + txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4]; + rsp->schq[NIX_TXSCH_LVL_TL4] = + rvu_rsrc_free_count(&txsch->schq); + + txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3]; + rsp->schq[NIX_TXSCH_LVL_TL3] = + rvu_rsrc_free_count(&txsch->schq); + + txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2]; + rsp->schq[NIX_TXSCH_LVL_TL2] = + rvu_rsrc_free_count(&txsch->schq); + + if (!is_block_implemented(rvu->hw, BLKADDR_NIX1)) + goto out; + + nix_hw = get_nix_hw(hw, BLKADDR_NIX1); + txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; + rsp->schq_nix1[NIX_TXSCH_LVL_SMQ] = + rvu_rsrc_free_count(&txsch->schq); + + txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL4]; + rsp->schq_nix1[NIX_TXSCH_LVL_TL4] = + rvu_rsrc_free_count(&txsch->schq); + + txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL3]; + rsp->schq_nix1[NIX_TXSCH_LVL_TL3] = + rvu_rsrc_free_count(&txsch->schq); + + txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL2]; + rsp->schq_nix1[NIX_TXSCH_LVL_TL2] = + rvu_rsrc_free_count(&txsch->schq); + } + + rsp->schq_nix1[NIX_TXSCH_LVL_TL1] = 1; +out: + rsp->schq[NIX_TXSCH_LVL_TL1] = 1; + mutex_unlock(&rvu->rsrc_lock); + + return 0; +} + int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { @@ -2402,11 +2551,12 @@ static void rvu_afvf_queue_flr_work(struct rvu *rvu, int start_vf, int numvfs) for (vf = 0; vf < numvfs; vf++) { if (!(intr & BIT_ULL(vf))) continue; - dev = vf + start_vf + rvu->hw->total_pfs; - queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work); /* Clear and disable the interrupt */ rvupf_write64(rvu, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf)); rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf)); + + dev = vf + start_vf + rvu->hw->total_pfs; + queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work); } } @@ -2422,14 +2572,14 @@ static irqreturn_t rvu_flr_intr_handler(int irq, void *rvu_irq) for (pf = 0; pf < rvu->hw->total_pfs; pf++) { if (intr & (1ULL << pf)) { - /* PF is already dead do only AF related operations */ - queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work); /* clear interrupt */ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT, BIT_ULL(pf)); /* Disable the interrupt */ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C, BIT_ULL(pf)); + /* PF is already dead do only AF related operations */ + queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work); } } @@ -2984,27 +3134,37 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF, rvu->hw->total_pfs, rvu_afpf_mbox_handler, rvu_afpf_mbox_up_handler); - if (err) + if (err) { + dev_err(dev, "%s: Failed to initialize mbox\n", __func__); goto err_hwsetup; + } err = rvu_flr_init(rvu); - if (err) + if (err) { + dev_err(dev, "%s: Failed to initialize flr\n", __func__); goto err_mbox; + } err = rvu_register_interrupts(rvu); - if (err) + if (err) { + dev_err(dev, "%s: Failed to register interrupts\n", __func__); goto err_flr; + } err = rvu_register_dl(rvu); - if (err) + if (err) { + dev_err(dev, "%s: Failed to register devlink\n", __func__); goto err_irq; + } rvu_setup_rvum_blk_revid(rvu); /* Enable AF's VFs (if any) */ err = rvu_enable_sriov(rvu); - if (err) + if (err) { + dev_err(dev, "%s: Failed to enable sriov\n", __func__); goto err_dl; + } /* Initialize debugfs */ rvu_dbg_init(rvu); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index 91503fb2762c..d38e5c980c30 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -1,11 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef RVU_H @@ -243,8 +240,11 @@ struct rvu_pfvf { u8 nix_blkaddr; /* BLKADDR_NIX0/1 assigned to this PF */ u8 nix_rx_intf; /* NIX0_RX/NIX1_RX interface to NPC */ u8 nix_tx_intf; /* NIX0_TX/NIX1_TX interface to NPC */ + u8 lbkid; /* NIX0/1 lbk link ID */ u64 lmt_base_addr; /* Preseving the pcifunc's lmtst base addr*/ + u64 lmt_map_ent_w1; /* Preseving the word1 of lmtst map table entry*/ unsigned long flags; + struct sdp_node_info *sdp_info; }; enum rvu_pfvf_flags { @@ -314,6 +314,7 @@ struct nix_hw { struct nix_lso lso; struct nix_txvlan txvlan; struct nix_ipolicer *ipolicer; + u64 *tx_credits; }; /* RVU block's capabilities or functionality, @@ -327,8 +328,10 @@ struct hw_cap { u16 nix_txsch_per_sdp_lmac; /* Max Q's transmitting to SDP LMAC */ bool nix_fixed_txschq_mapping; /* Schq mapping fixed or flexible */ bool nix_shaping; /* Is shaping and coloring supported */ + bool nix_shaper_toggle_wait; /* Shaping toggle needs poll/wait */ bool nix_tx_link_bp; /* Can link backpressure TL queues ? */ bool nix_rx_multicast; /* Rx packet replication support */ + bool nix_common_dwrr_mtu; /* Common DWRR MTU for quantum config */ bool per_pf_mbox_regs; /* PF mbox specified in per PF registers ? */ bool programmable_chans; /* Channels programmable ? */ bool ipolicer; @@ -355,6 +358,7 @@ struct rvu_hwinfo { u16 npc_counters; /* No of match stats counters */ u32 lbk_bufsize; /* FIFO size supported by LBK */ bool npc_ext_set; /* Extended register set */ + u64 npc_stat_ena; /* Match stats enable bit */ struct hw_cap cap; struct rvu_block block[BLK_COUNT]; /* Block info */ @@ -514,20 +518,34 @@ static inline u64 rvupf_read64(struct rvu *rvu, u64 offset) } /* Silicon revisions */ +static inline bool is_rvu_pre_96xx_C0(struct rvu *rvu) +{ + struct pci_dev *pdev = rvu->pdev; + /* 96XX A0/B0, 95XX A0/A1/B0 chips */ + return ((pdev->revision == 0x00) || (pdev->revision == 0x01) || + (pdev->revision == 0x10) || (pdev->revision == 0x11) || + (pdev->revision == 0x14)); +} + static inline bool is_rvu_96xx_A0(struct rvu *rvu) { struct pci_dev *pdev = rvu->pdev; - return (pdev->revision == 0x00) && - (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX); + return (pdev->revision == 0x00); } static inline bool is_rvu_96xx_B0(struct rvu *rvu) { struct pci_dev *pdev = rvu->pdev; - return ((pdev->revision == 0x00) || (pdev->revision == 0x01)) && - (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX); + return (pdev->revision == 0x00) || (pdev->revision == 0x01); +} + +static inline bool is_rvu_95xx_A0(struct rvu *rvu) +{ + struct pci_dev *pdev = rvu->pdev; + + return (pdev->revision == 0x10) || (pdev->revision == 0x11); } /* REVID for PCIe devices. @@ -536,9 +554,10 @@ static inline bool is_rvu_96xx_B0(struct rvu *rvu) */ #define PCI_REVISION_ID_96XX 0x00 #define PCI_REVISION_ID_95XX 0x10 -#define PCI_REVISION_ID_LOKI 0x20 +#define PCI_REVISION_ID_95XXN 0x20 #define PCI_REVISION_ID_98XX 0x30 #define PCI_REVISION_ID_95XXMM 0x40 +#define PCI_REVISION_ID_95XXO 0xE0 static inline bool is_rvu_otx2(struct rvu *rvu) { @@ -547,8 +566,8 @@ static inline bool is_rvu_otx2(struct rvu *rvu) u8 midr = pdev->revision & 0xF0; return (midr == PCI_REVISION_ID_96XX || midr == PCI_REVISION_ID_95XX || - midr == PCI_REVISION_ID_LOKI || midr == PCI_REVISION_ID_98XX || - midr == PCI_REVISION_ID_95XXMM); + midr == PCI_REVISION_ID_95XXN || midr == PCI_REVISION_ID_98XX || + midr == PCI_REVISION_ID_95XXMM || midr == PCI_REVISION_ID_95XXO); } static inline u16 rvu_nix_chan_cgx(struct rvu *rvu, u8 cgxid, @@ -578,6 +597,16 @@ static inline u16 rvu_nix_chan_lbk(struct rvu *rvu, u8 lbkid, return rvu->hw->lbk_chan_base + lbkid * lbk_chans + chan; } +static inline u16 rvu_nix_chan_sdp(struct rvu *rvu, u8 chan) +{ + struct rvu_hwinfo *hw = rvu->hw; + + if (!hw->cap.programmable_chans) + return NIX_CHAN_SDP_CHX(chan); + + return hw->sdp_chan_base + chan; +} + static inline u16 rvu_nix_chan_cpt(struct rvu *rvu, u8 chan) { return rvu->hw->cpt_chan_base + chan; @@ -640,10 +669,17 @@ int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue, int qsize, int inst_size, int res_size); void rvu_aq_free(struct rvu *rvu, struct admin_queue *aq); +/* SDP APIs */ +int rvu_sdp_init(struct rvu *rvu); +bool is_sdp_pfvf(u16 pcifunc); +bool is_sdp_pf(u16 pcifunc); +bool is_sdp_vf(u16 pcifunc); + /* CGX APIs */ static inline bool is_pf_cgxmapped(struct rvu *rvu, u8 pf) { - return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs); + return (pf >= PF_CGXMAP_BASE && pf <= rvu->cgx_mapped_pfs) && + !is_sdp_pf(pf << RVU_PFVF_PF_SHIFT); } static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id) @@ -706,6 +742,8 @@ int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw, struct nix_cn10k_aq_enq_rsp *aq_rsp, u16 pcifunc, u8 ctype, u32 qidx); int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc); +u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu); +u32 convert_bytes_to_dwrr_mtu(u32 bytes); /* NPC APIs */ int rvu_npc_init(struct rvu *rvu); @@ -745,7 +783,6 @@ bool is_npc_intf_tx(u8 intf); bool is_npc_intf_rx(u8 intf); bool is_npc_interface_valid(struct rvu *rvu, u8 intf); int rvu_npc_get_tx_nibble_cfg(struct rvu *rvu, u64 nibble_ena); -int npc_mcam_verify_channel(struct rvu *rvu, u16 pcifunc, u8 intf, u16 channel); int npc_flow_steering_init(struct rvu *rvu, int blkaddr); const char *npc_get_field_name(u8 hdr); int npc_get_bank(struct npc_mcam *mcam, int index); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index fe99ac4a4dd8..81e8ea9ee30e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/types.h> @@ -448,7 +445,7 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start) u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, pcifunc)) - return -EPERM; + return LMAC_AF_ERR_PERM_DENIED; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); @@ -507,7 +504,7 @@ static int rvu_lmac_get_stats(struct rvu *rvu, struct msg_req *req, void *cgxd; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) - return -ENODEV; + return LMAC_AF_ERR_PERM_DENIED; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac); cgxd = rvu_cgx_pdata(cgx_idx, rvu); @@ -561,7 +558,7 @@ int rvu_mbox_handler_cgx_fec_stats(struct rvu *rvu, void *cgxd; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) - return -EPERM; + return LMAC_AF_ERR_PERM_DENIED; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_idx, &lmac); cgxd = rvu_cgx_pdata(cgx_idx, rvu); @@ -888,7 +885,7 @@ int rvu_mbox_handler_cgx_get_phy_fec_stats(struct rvu *rvu, struct msg_req *req, u8 cgx_id, lmac_id; if (!is_pf_cgxmapped(rvu, pf)) - return -EPERM; + return LMAC_AF_ERR_PF_NOT_MAPPED; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); return cgx_get_phy_fec_stats(rvu_cgx_pdata(cgx_id, rvu), lmac_id); @@ -1046,7 +1043,7 @@ int rvu_mbox_handler_cgx_mac_addr_reset(struct rvu *rvu, struct msg_req *req, u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) - return -EPERM; + return LMAC_AF_ERR_PERM_DENIED; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); return cgx_lmac_addr_reset(cgx_id, lmac_id); @@ -1060,7 +1057,7 @@ int rvu_mbox_handler_cgx_mac_addr_update(struct rvu *rvu, u8 cgx_id, lmac_id; if (!is_cgx_config_permitted(rvu, req->hdr.pcifunc)) - return -EPERM; + return LMAC_AF_ERR_PERM_DENIED; rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); return cgx_lmac_addr_update(cgx_id, lmac_id, req->mac_addr, req->index); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c index 8d48b64485c6..46a41cfff575 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cn10k.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell RPM CN10K driver +/* Marvell RPM CN10K driver * * Copyright (C) 2020 Marvell. */ @@ -49,6 +49,7 @@ static int lmtst_map_table_ops(struct rvu *rvu, u32 index, u64 *val, return 0; } +#define LMT_MAP_TBL_W1_OFF 8 static u32 rvu_get_lmtst_tbl_index(struct rvu *rvu, u16 pcifunc) { return ((rvu_get_pf(pcifunc) * rvu->hw->total_vfs) + @@ -82,10 +83,10 @@ static int rvu_get_lmtaddr(struct rvu *rvu, u16 pcifunc, dev_err(rvu->dev, "%s LMTLINE iova transulation failed err:%llx\n", __func__, val); return -EIO; } - /* PA[51:12] = RVU_AF_SMMU_TLN_FLIT1[60:21] + /* PA[51:12] = RVU_AF_SMMU_TLN_FLIT0[57:18] * PA[11:0] = IOVA[11:0] */ - pa = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT1) >> 21; + pa = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_SMMU_TLN_FLIT0) >> 18; pa &= GENMASK_ULL(39, 0); *lmt_addr = (pa << 12) | (iova & 0xFFF); @@ -131,9 +132,11 @@ int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu, struct lmtst_tbl_setup_req *req, struct msg_rsp *rsp) { - u64 lmt_addr, val; - u32 pri_tbl_idx; + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); + u32 pri_tbl_idx, tbl_idx; + u64 lmt_addr; int err = 0; + u64 val; /* Check if PF_FUNC wants to use it's own local memory as LMTLINE * region, if so, convert that IOVA to physical address and @@ -170,7 +173,7 @@ int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu, dev_err(rvu->dev, "Failed to read LMT map table: index 0x%x err %d\n", pri_tbl_idx, err); - return err; + goto error; } /* Update the base lmt addr of secondary with primary's base @@ -181,7 +184,53 @@ int rvu_mbox_handler_lmtst_tbl_setup(struct rvu *rvu, return err; } - return 0; + /* This mailbox can also be used to update word1 of APR_LMT_MAP_ENTRY_S + * like enabling scheduled LMTST, disable LMTLINE prefetch, disable + * early completion for ordered LMTST. + */ + if (req->sch_ena || req->dis_sched_early_comp || req->dis_line_pref) { + tbl_idx = rvu_get_lmtst_tbl_index(rvu, req->hdr.pcifunc); + err = lmtst_map_table_ops(rvu, tbl_idx + LMT_MAP_TBL_W1_OFF, + &val, LMT_TBL_OP_READ); + if (err) { + dev_err(rvu->dev, + "Failed to read LMT map table: index 0x%x err %d\n", + tbl_idx + LMT_MAP_TBL_W1_OFF, err); + goto error; + } + + /* Storing lmt map table entry word1 default value as this needs + * to be reverted in FLR. Also making sure this default value + * doesn't get overwritten on multiple calls to this mailbox. + */ + if (!pfvf->lmt_map_ent_w1) + pfvf->lmt_map_ent_w1 = val; + + /* Disable early completion for Ordered LMTSTs. */ + if (req->dis_sched_early_comp) + val |= (req->dis_sched_early_comp << + APR_LMT_MAP_ENT_DIS_SCH_CMP_SHIFT); + /* Enable scheduled LMTST */ + if (req->sch_ena) + val |= (req->sch_ena << APR_LMT_MAP_ENT_SCH_ENA_SHIFT) | + req->ssow_pf_func; + /* Disables LMTLINE prefetch before receiving store data. */ + if (req->dis_line_pref) + val |= (req->dis_line_pref << + APR_LMT_MAP_ENT_DIS_LINE_PREF_SHIFT); + + err = lmtst_map_table_ops(rvu, tbl_idx + LMT_MAP_TBL_W1_OFF, + &val, LMT_TBL_OP_WRITE); + if (err) { + dev_err(rvu->dev, + "Failed to update LMT map table: index 0x%x err %d\n", + tbl_idx + LMT_MAP_TBL_W1_OFF, err); + goto error; + } + } + +error: + return err; } /* Resetting the lmtst map table to original base addresses */ @@ -194,27 +243,45 @@ void rvu_reset_lmt_map_tbl(struct rvu *rvu, u16 pcifunc) if (is_rvu_otx2(rvu)) return; - if (pfvf->lmt_base_addr) { + if (pfvf->lmt_base_addr || pfvf->lmt_map_ent_w1) { /* This corresponds to lmt map table index */ tbl_idx = rvu_get_lmtst_tbl_index(rvu, pcifunc); /* Reverting back original lmt base addr for respective * pcifunc. */ - err = lmtst_map_table_ops(rvu, tbl_idx, &pfvf->lmt_base_addr, - LMT_TBL_OP_WRITE); - if (err) - dev_err(rvu->dev, - "Failed to update LMT map table: index 0x%x err %d\n", - tbl_idx, err); - pfvf->lmt_base_addr = 0; + if (pfvf->lmt_base_addr) { + err = lmtst_map_table_ops(rvu, tbl_idx, + &pfvf->lmt_base_addr, + LMT_TBL_OP_WRITE); + if (err) + dev_err(rvu->dev, + "Failed to update LMT map table: index 0x%x err %d\n", + tbl_idx, err); + pfvf->lmt_base_addr = 0; + } + /* Reverting back to orginal word1 val of lmtst map table entry + * which underwent changes. + */ + if (pfvf->lmt_map_ent_w1) { + err = lmtst_map_table_ops(rvu, + tbl_idx + LMT_MAP_TBL_W1_OFF, + &pfvf->lmt_map_ent_w1, + LMT_TBL_OP_WRITE); + if (err) + dev_err(rvu->dev, + "Failed to update LMT map table: index 0x%x err %d\n", + tbl_idx + LMT_MAP_TBL_W1_OFF, err); + pfvf->lmt_map_ent_w1 = 0; + } } } int rvu_set_channels_base(struct rvu *rvu) { + u16 nr_lbk_chans, nr_sdp_chans, nr_cgx_chans, nr_cpt_chans; + u16 sdp_chan_base, cgx_chan_base, cpt_chan_base; struct rvu_hwinfo *hw = rvu->hw; - u16 cpt_chan_base; - u64 nix_const; + u64 nix_const, nix_const1; int blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); @@ -222,6 +289,7 @@ int rvu_set_channels_base(struct rvu *rvu) return blkaddr; nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST); + nix_const1 = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); hw->cgx = (nix_const >> 12) & 0xFULL; hw->lmac_per_cgx = (nix_const >> 8) & 0xFULL; @@ -244,14 +312,24 @@ int rvu_set_channels_base(struct rvu *rvu) * channels such that all channel numbers are contiguous * leaving no holes. This way the new CPT channels can be * accomodated. The order of channel numbers assigned is - * LBK, SDP, CGX and CPT. + * LBK, SDP, CGX and CPT. Also the base channel number + * of a block must be multiple of number of channels + * of the block. */ - hw->sdp_chan_base = hw->lbk_chan_base + hw->lbk_links * - ((nix_const >> 16) & 0xFFULL); - hw->cgx_chan_base = hw->sdp_chan_base + hw->sdp_links * SDP_CHANNELS; + nr_lbk_chans = (nix_const >> 16) & 0xFFULL; + nr_sdp_chans = nix_const1 & 0xFFFULL; + nr_cgx_chans = nix_const & 0xFFULL; + nr_cpt_chans = (nix_const >> 32) & 0xFFFULL; + + sdp_chan_base = hw->lbk_chan_base + hw->lbk_links * nr_lbk_chans; + /* Round up base channel to multiple of number of channels */ + hw->sdp_chan_base = ALIGN(sdp_chan_base, nr_sdp_chans); + + cgx_chan_base = hw->sdp_chan_base + hw->sdp_links * nr_sdp_chans; + hw->cgx_chan_base = ALIGN(cgx_chan_base, nr_cgx_chans); - cpt_chan_base = hw->cgx_chan_base + hw->cgx_links * - (nix_const & 0xFFULL); + cpt_chan_base = hw->cgx_chan_base + hw->cgx_links * nr_cgx_chans; + hw->cpt_chan_base = ALIGN(cpt_chan_base, nr_cpt_chans); /* Out of 4096 channels start CPT from 2048 so * that MSB for CPT channels is always set @@ -355,6 +433,7 @@ err_put: static void __rvu_nix_set_channels(struct rvu *rvu, int blkaddr) { + u64 nix_const1 = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); u64 nix_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST); u16 cgx_chans, lbk_chans, sdp_chans, cpt_chans; struct rvu_hwinfo *hw = rvu->hw; @@ -364,7 +443,7 @@ static void __rvu_nix_set_channels(struct rvu *rvu, int blkaddr) cgx_chans = nix_const & 0xFFULL; lbk_chans = (nix_const >> 16) & 0xFFULL; - sdp_chans = SDP_CHANNELS; + sdp_chans = nix_const1 & 0xFFFULL; cpt_chans = (nix_const >> 32) & 0xFFFULL; start = hw->cgx_chan_base; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c index 89253f7bdadb..1f90a7403392 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cpt.c @@ -1,5 +1,9 @@ // SPDX-License-Identifier: GPL-2.0-only -/* Copyright (C) 2020 Marvell. */ +/* Marvell RVU Admin Function driver + * + * Copyright (C) 2020 Marvell. + * + */ #include <linux/bitfield.h> #include <linux/pci.h> diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index 9b2dfbf90e51..9338765da048 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2019 Marvell International Ltd. + * Copyright (C) 2019 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifdef CONFIG_DEBUG_FS diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c index 2688186066d9..274d3abe30eb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Devlink +/* Marvell RVU Admin Function Devlink * * Copyright (C) 2020 Marvell. * @@ -1364,6 +1364,89 @@ static void rvu_health_reporters_destroy(struct rvu *rvu) rvu_nix_health_reporters_destroy(rvu_dl); } +/* Devlink Params APIs */ +static int rvu_af_dl_dwrr_mtu_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + struct rvu_devlink *rvu_dl = devlink_priv(devlink); + struct rvu *rvu = rvu_dl->rvu; + int dwrr_mtu = val.vu32; + struct nix_txsch *txsch; + struct nix_hw *nix_hw; + + if (!rvu->hw->cap.nix_common_dwrr_mtu) { + NL_SET_ERR_MSG_MOD(extack, + "Setting DWRR_MTU is not supported on this silicon"); + return -EOPNOTSUPP; + } + + if ((dwrr_mtu > 65536 || !is_power_of_2(dwrr_mtu)) && + (dwrr_mtu != 9728 && dwrr_mtu != 10240)) { + NL_SET_ERR_MSG_MOD(extack, + "Invalid, supported MTUs are 0,2,4,8.16,32,64....4K,8K,32K,64K and 9728, 10240"); + return -EINVAL; + } + + nix_hw = get_nix_hw(rvu->hw, BLKADDR_NIX0); + if (!nix_hw) + return -ENODEV; + + txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; + if (rvu_rsrc_free_count(&txsch->schq) != txsch->schq.max) { + NL_SET_ERR_MSG_MOD(extack, + "Changing DWRR MTU is not supported when there are active NIXLFs"); + NL_SET_ERR_MSG_MOD(extack, + "Make sure none of the PF/VF interfaces are initialized and retry"); + return -EOPNOTSUPP; + } + + return 0; +} + +static int rvu_af_dl_dwrr_mtu_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct rvu_devlink *rvu_dl = devlink_priv(devlink); + struct rvu *rvu = rvu_dl->rvu; + u64 dwrr_mtu; + + dwrr_mtu = convert_bytes_to_dwrr_mtu(ctx->val.vu32); + rvu_write64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_RPM_MTU, dwrr_mtu); + + return 0; +} + +static int rvu_af_dl_dwrr_mtu_get(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct rvu_devlink *rvu_dl = devlink_priv(devlink); + struct rvu *rvu = rvu_dl->rvu; + u64 dwrr_mtu; + + if (!rvu->hw->cap.nix_common_dwrr_mtu) + return -EOPNOTSUPP; + + dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_RPM_MTU); + ctx->val.vu32 = convert_dwrr_mtu_to_bytes(dwrr_mtu); + + return 0; +} + +enum rvu_af_dl_param_id { + RVU_AF_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, + RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU, +}; + +static const struct devlink_param rvu_af_dl_params[] = { + DEVLINK_PARAM_DRIVER(RVU_AF_DEVLINK_PARAM_ID_DWRR_MTU, + "dwrr_mtu", DEVLINK_PARAM_TYPE_U32, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + rvu_af_dl_dwrr_mtu_get, rvu_af_dl_dwrr_mtu_set, + rvu_af_dl_dwrr_mtu_validate), +}; + +/* Devlink switch mode */ static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) { struct rvu_devlink *rvu_dl = devlink_priv(devlink); @@ -1420,13 +1503,14 @@ int rvu_register_dl(struct rvu *rvu) struct devlink *dl; int err; - dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink)); + dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink), + rvu->dev); if (!dl) { dev_warn(rvu->dev, "devlink_alloc failed\n"); return -ENOMEM; } - err = devlink_register(dl, rvu->dev); + err = devlink_register(dl); if (err) { dev_err(rvu->dev, "devlink register failed with error %d\n", err); devlink_free(dl); @@ -1438,7 +1522,30 @@ int rvu_register_dl(struct rvu *rvu) rvu_dl->rvu = rvu; rvu->rvu_dl = rvu_dl; - return rvu_health_reporters_create(rvu); + err = rvu_health_reporters_create(rvu); + if (err) { + dev_err(rvu->dev, + "devlink health reporter creation failed with error %d\n", err); + goto err_dl_health; + } + + err = devlink_params_register(dl, rvu_af_dl_params, + ARRAY_SIZE(rvu_af_dl_params)); + if (err) { + dev_err(rvu->dev, + "devlink params register failed with error %d", err); + goto err_dl_health; + } + + devlink_params_publish(dl); + + return 0; + +err_dl_health: + rvu_health_reporters_destroy(rvu); + devlink_unregister(dl); + devlink_free(dl); + return err; } void rvu_unregister_dl(struct rvu *rvu) @@ -1449,6 +1556,8 @@ void rvu_unregister_dl(struct rvu *rvu) if (!dl) return; + devlink_params_unregister(dl, rvu_af_dl_params, + ARRAY_SIZE(rvu_af_dl_params)); rvu_health_reporters_destroy(rvu); devlink_unregister(dl); devlink_free(dl); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h index 471e57dedb20..51efe88dce11 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h @@ -1,5 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Devlink +/* Marvell RVU Admin Function Devlink * * Copyright (C) 2020 Marvell. * diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index 4bfbbdf38770..9ef4e942e31e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/module.h> @@ -25,7 +22,7 @@ static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, int type, bool add); static int nix_setup_ipolicers(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr); -static void nix_ipolicer_freemem(struct nix_hw *nix_hw); +static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw); static int nix_verify_bandprof(struct nix_cn10k_aq_enq_req *req, struct nix_hw *nix_hw, u16 pcifunc); static int nix_free_all_bandprof(struct rvu *rvu, u16 pcifunc); @@ -192,6 +189,47 @@ struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr) return NULL; } +u32 convert_dwrr_mtu_to_bytes(u8 dwrr_mtu) +{ + dwrr_mtu &= 0x1FULL; + + /* MTU used for DWRR calculation is in power of 2 up until 64K bytes. + * Value of 4 is reserved for MTU value of 9728 bytes. + * Value of 5 is reserved for MTU value of 10240 bytes. + */ + switch (dwrr_mtu) { + case 4: + return 9728; + case 5: + return 10240; + default: + return BIT_ULL(dwrr_mtu); + } + + return 0; +} + +u32 convert_bytes_to_dwrr_mtu(u32 bytes) +{ + /* MTU used for DWRR calculation is in power of 2 up until 64K bytes. + * Value of 4 is reserved for MTU value of 9728 bytes. + * Value of 5 is reserved for MTU value of 10240 bytes. + */ + if (bytes > BIT_ULL(16)) + return 0; + + switch (bytes) { + case 9728: + return 4; + case 10240: + return 5; + default: + return ilog2(bytes); + } + + return 0; +} + static void nix_rx_sync(struct rvu *rvu, int blkaddr) { int err; @@ -249,16 +287,22 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr, return true; } -static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) +static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf, + struct nix_lf_alloc_rsp *rsp, bool loop) { - struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + struct rvu_pfvf *parent_pf, *pfvf = rvu_get_pfvf(rvu, pcifunc); + u16 req_chan_base, req_chan_end, req_chan_cnt; + struct rvu_hwinfo *hw = rvu->hw; + struct sdp_node_info *sdp_info; + int pkind, pf, vf, lbkid, vfid; struct mac_ops *mac_ops; - int pkind, pf, vf, lbkid; u8 cgx_id, lmac_id; + bool from_vf; int err; pf = rvu_get_pf(pcifunc); - if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) + if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK && + type != NIX_INTF_TYPE_SDP) return 0; switch (type) { @@ -276,10 +320,13 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) pfvf->tx_chan_base = pfvf->rx_chan_base; pfvf->rx_chan_cnt = 1; pfvf->tx_chan_cnt = 1; + rsp->tx_link = cgx_id * hw->lmac_per_cgx + lmac_id; + cgx_set_pkind(rvu_cgx_pdata(cgx_id, rvu), lmac_id, pkind); rvu_npc_set_pkind(rvu, pkind, pfvf); mac_ops = get_mac_ops(rvu_cgx_pdata(cgx_id, rvu)); + /* By default we enable pause frames */ if ((pcifunc & RVU_PFVF_FUNC_MASK) == 0) mac_ops->mac_enadis_pause_frm(rvu_cgx_pdata(cgx_id, @@ -299,6 +346,25 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) if (rvu->hw->lbk_links > 1) lbkid = vf & 0x1 ? 0 : 1; + /* By default NIX0 is configured to send packet on lbk link 1 + * (which corresponds to LBK1), same packet will receive on + * NIX1 over lbk link 0. If NIX1 sends packet on lbk link 0 + * (which corresponds to LBK2) packet will receive on NIX0 lbk + * link 1. + * But if lbk links for NIX0 and NIX1 are negated, i.e NIX0 + * transmits and receives on lbk link 0, whick corresponds + * to LBK1 block, back to back connectivity between NIX and + * LBK can be achieved (which is similar to 96xx) + * + * RX TX + * NIX0 lbk link 1 (LBK2) 1 (LBK1) + * NIX0 lbk link 0 (LBK0) 0 (LBK0) + * NIX1 lbk link 0 (LBK1) 0 (LBK2) + * NIX1 lbk link 1 (LBK3) 1 (LBK3) + */ + if (loop) + lbkid = !lbkid; + /* Note that AF's VFs work in pairs and talk over consecutive * loopback channels.Therefore if odd number of AF VFs are * enabled then the last VF remains with no pair. @@ -309,10 +375,51 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) rvu_nix_chan_lbk(rvu, lbkid, vf + 1); pfvf->rx_chan_cnt = 1; pfvf->tx_chan_cnt = 1; + rsp->tx_link = hw->cgx_links + lbkid; + pfvf->lbkid = lbkid; rvu_npc_set_pkind(rvu, NPC_RX_LBK_PKIND, pfvf); rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, pfvf->rx_chan_base, pfvf->rx_chan_cnt); + + break; + case NIX_INTF_TYPE_SDP: + from_vf = !!(pcifunc & RVU_PFVF_FUNC_MASK); + parent_pf = &rvu->pf[rvu_get_pf(pcifunc)]; + sdp_info = parent_pf->sdp_info; + if (!sdp_info) { + dev_err(rvu->dev, "Invalid sdp_info pointer\n"); + return -EINVAL; + } + if (from_vf) { + req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn + + sdp_info->num_pf_rings; + vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; + for (vfid = 0; vfid < vf; vfid++) + req_chan_base += sdp_info->vf_rings[vfid]; + req_chan_cnt = sdp_info->vf_rings[vf]; + req_chan_end = req_chan_base + req_chan_cnt - 1; + if (req_chan_base < rvu_nix_chan_sdp(rvu, 0) || + req_chan_end > rvu_nix_chan_sdp(rvu, 255)) { + dev_err(rvu->dev, + "PF_Func 0x%x: Invalid channel base and count\n", + pcifunc); + return -EINVAL; + } + } else { + req_chan_base = rvu_nix_chan_sdp(rvu, 0) + sdp_info->pf_srn; + req_chan_cnt = sdp_info->num_pf_rings; + } + + pfvf->rx_chan_base = req_chan_base; + pfvf->rx_chan_cnt = req_chan_cnt; + pfvf->tx_chan_base = pfvf->rx_chan_base; + pfvf->tx_chan_cnt = pfvf->rx_chan_cnt; + + rsp->tx_link = hw->cgx_links + hw->lbk_links; + rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, + pfvf->rx_chan_base, + pfvf->rx_chan_cnt); break; } @@ -393,9 +500,9 @@ int rvu_mbox_handler_nix_bp_disable(struct rvu *rvu, static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, int type, int chan_id) { - int bpid, blkaddr, lmac_chan_cnt; + int bpid, blkaddr, lmac_chan_cnt, sdp_chan_cnt; + u16 cgx_bpid_cnt, lbk_bpid_cnt, sdp_bpid_cnt; struct rvu_hwinfo *hw = rvu->hw; - u16 cgx_bpid_cnt, lbk_bpid_cnt; struct rvu_pfvf *pfvf; u8 cgx_id, lmac_id; u64 cfg; @@ -404,8 +511,12 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST); lmac_chan_cnt = cfg & 0xFF; + cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); + sdp_chan_cnt = cfg & 0xFFF; + cgx_bpid_cnt = hw->cgx_links * lmac_chan_cnt; lbk_bpid_cnt = hw->lbk_links * ((cfg >> 16) & 0xFF); + sdp_bpid_cnt = hw->sdp_links * sdp_chan_cnt; pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); @@ -443,6 +554,17 @@ static int rvu_nix_get_bpid(struct rvu *rvu, struct nix_bp_cfg_req *req, if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt)) return -EINVAL; break; + case NIX_INTF_TYPE_SDP: + if ((req->chan_base + req->chan_cnt) > 255) + return -EINVAL; + + bpid = sdp_bpid_cnt + req->chan_base; + if (req->bpid_per_chan) + bpid += chan_id; + + if (bpid > (cgx_bpid_cnt + lbk_bpid_cnt + sdp_bpid_cnt)) + return -EINVAL; + break; default: return -EINVAL; } @@ -462,9 +584,12 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, pf = rvu_get_pf(pcifunc); type = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; + if (is_sdp_pfvf(pcifunc)) + type = NIX_INTF_TYPE_SDP; - /* Enable backpressure only for CGX mapped PFs and LBK interface */ - if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK) + /* Enable backpressure only for CGX mapped PFs and LBK/SDP interface */ + if (!is_pf_cgxmapped(rvu, pf) && type != NIX_INTF_TYPE_LBK && + type != NIX_INTF_TYPE_SDP) return 0; pfvf = rvu_get_pfvf(rvu, pcifunc); @@ -481,8 +606,9 @@ int rvu_mbox_handler_nix_bp_enable(struct rvu *rvu, } cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan)); + cfg &= ~GENMASK_ULL(8, 0); rvu_write64(rvu, blkaddr, NIX_AF_RX_CHANX_CFG(chan), - cfg | (bpid & 0xFF) | BIT_ULL(16)); + cfg | (bpid & GENMASK_ULL(8, 0)) | BIT_ULL(16)); chan_id++; bpid = rvu_nix_get_bpid(rvu, req, type, chan_id); } @@ -630,9 +756,10 @@ static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf) static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr, struct rvu_pfvf *pfvf, int nixlf, int rss_sz, int rss_grps, int hwctx_size, - u64 way_mask) + u64 way_mask, bool tag_lsb_as_adder) { int err, grp, num_indices; + u64 val; /* RSS is not requested for this NIXLF */ if (!rss_sz) @@ -648,10 +775,13 @@ static int nixlf_rss_ctx_init(struct rvu *rvu, int blkaddr, (u64)pfvf->rss_ctx->iova); /* Config full RSS table size, enable RSS and caching */ - rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), - BIT_ULL(36) | BIT_ULL(4) | - ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE) | - way_mask << 20); + val = BIT_ULL(36) | BIT_ULL(4) | way_mask << 20 | + ilog2(num_indices / MAX_RSS_INDIR_TBL_SIZE); + + if (tag_lsb_as_adder) + val |= BIT_ULL(5); + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_CFG(nixlf), val); /* Config RSS group offset and sizes */ for (grp = 0; grp < rss_grps; grp++) rvu_write64(rvu, blkaddr, NIX_AF_LFX_RSS_GRPX(nixlf, grp), @@ -943,7 +1073,7 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) - return -EINVAL; + return NIX_AF_ERR_INVALID_NIXBLK; return rvu_nix_blk_aq_enq_inst(rvu, nix_hw, req, rsp); } @@ -1200,7 +1330,8 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, /* Initialize receive side scaling (RSS) */ hwctx_size = 1UL << ((ctx_cfg >> 12) & 0xF); err = nixlf_rss_ctx_init(rvu, blkaddr, pfvf, nixlf, req->rss_sz, - req->rss_grps, hwctx_size, req->way_mask); + req->rss_grps, hwctx_size, req->way_mask, + !!(req->flags & NIX_LF_RSS_TAG_LSB_AS_ADDER)); if (err) goto free_mem; @@ -1258,7 +1389,11 @@ int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_PARSE_CFG(nixlf), cfg); intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; - err = nix_interface_init(rvu, pcifunc, intf, nixlf); + if (is_sdp_pfvf(pcifunc)) + intf = NIX_INTF_TYPE_SDP; + + err = nix_interface_init(rvu, pcifunc, intf, nixlf, rsp, + !!(req->flags & NIX_LF_LBK_BLK_SEL)); if (err) goto free_mem; @@ -1364,7 +1499,7 @@ int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu, nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) - return -EINVAL; + return NIX_AF_ERR_INVALID_NIXBLK; cfg = (((u32)req->offset & 0x7) << 16) | (((u32)req->y_mask & 0xF) << 12) | @@ -1382,12 +1517,104 @@ int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu, return 0; } +/* Handle shaper update specially for few revisions */ +static bool +handle_txschq_shaper_update(struct rvu *rvu, int blkaddr, int nixlf, + int lvl, u64 reg, u64 regval) +{ + u64 regbase, oldval, sw_xoff = 0; + u64 dbgval, md_debug0 = 0; + unsigned long poll_tmo; + bool rate_reg = 0; + u32 schq; + + regbase = reg & 0xFFFF; + schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); + + /* Check for rate register */ + switch (lvl) { + case NIX_TXSCH_LVL_TL1: + md_debug0 = NIX_AF_TL1X_MD_DEBUG0(schq); + sw_xoff = NIX_AF_TL1X_SW_XOFF(schq); + + rate_reg = !!(regbase == NIX_AF_TL1X_CIR(0)); + break; + case NIX_TXSCH_LVL_TL2: + md_debug0 = NIX_AF_TL2X_MD_DEBUG0(schq); + sw_xoff = NIX_AF_TL2X_SW_XOFF(schq); + + rate_reg = (regbase == NIX_AF_TL2X_CIR(0) || + regbase == NIX_AF_TL2X_PIR(0)); + break; + case NIX_TXSCH_LVL_TL3: + md_debug0 = NIX_AF_TL3X_MD_DEBUG0(schq); + sw_xoff = NIX_AF_TL3X_SW_XOFF(schq); + + rate_reg = (regbase == NIX_AF_TL3X_CIR(0) || + regbase == NIX_AF_TL3X_PIR(0)); + break; + case NIX_TXSCH_LVL_TL4: + md_debug0 = NIX_AF_TL4X_MD_DEBUG0(schq); + sw_xoff = NIX_AF_TL4X_SW_XOFF(schq); + + rate_reg = (regbase == NIX_AF_TL4X_CIR(0) || + regbase == NIX_AF_TL4X_PIR(0)); + break; + case NIX_TXSCH_LVL_MDQ: + sw_xoff = NIX_AF_MDQX_SW_XOFF(schq); + rate_reg = (regbase == NIX_AF_MDQX_CIR(0) || + regbase == NIX_AF_MDQX_PIR(0)); + break; + } + + if (!rate_reg) + return false; + + /* Nothing special to do when state is not toggled */ + oldval = rvu_read64(rvu, blkaddr, reg); + if ((oldval & 0x1) == (regval & 0x1)) { + rvu_write64(rvu, blkaddr, reg, regval); + return true; + } + + /* PIR/CIR disable */ + if (!(regval & 0x1)) { + rvu_write64(rvu, blkaddr, sw_xoff, 1); + rvu_write64(rvu, blkaddr, reg, 0); + udelay(4); + rvu_write64(rvu, blkaddr, sw_xoff, 0); + return true; + } + + /* PIR/CIR enable */ + rvu_write64(rvu, blkaddr, sw_xoff, 1); + if (md_debug0) { + poll_tmo = jiffies + usecs_to_jiffies(10000); + /* Wait until VLD(bit32) == 1 or C_CON(bit48) == 0 */ + do { + if (time_after(jiffies, poll_tmo)) { + dev_err(rvu->dev, + "NIXLF%d: TLX%u(lvl %u) CIR/PIR enable failed\n", + nixlf, schq, lvl); + goto exit; + } + usleep_range(1, 5); + dbgval = rvu_read64(rvu, blkaddr, md_debug0); + } while (!(dbgval & BIT_ULL(32)) && (dbgval & BIT_ULL(48))); + } + rvu_write64(rvu, blkaddr, reg, regval); +exit: + rvu_write64(rvu, blkaddr, sw_xoff, 0); + return true; +} + /* Disable shaping of pkts by a scheduler queue * at a given scheduler level. */ static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr, - int lvl, int schq) + int nixlf, int lvl, int schq) { + struct rvu_hwinfo *hw = rvu->hw; u64 cir_reg = 0, pir_reg = 0; u64 cfg; @@ -1408,6 +1635,21 @@ static void nix_reset_tx_shaping(struct rvu *rvu, int blkaddr, cir_reg = NIX_AF_TL4X_CIR(schq); pir_reg = NIX_AF_TL4X_PIR(schq); break; + case NIX_TXSCH_LVL_MDQ: + cir_reg = NIX_AF_MDQX_CIR(schq); + pir_reg = NIX_AF_MDQX_PIR(schq); + break; + } + + /* Shaper state toggle needs wait/poll */ + if (hw->cap.nix_shaper_toggle_wait) { + if (cir_reg) + handle_txschq_shaper_update(rvu, blkaddr, nixlf, + lvl, cir_reg, 0); + if (pir_reg) + handle_txschq_shaper_update(rvu, blkaddr, nixlf, + lvl, pir_reg, 0); + return; } if (!cir_reg) @@ -1425,6 +1667,7 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, int lvl, int schq) { struct rvu_hwinfo *hw = rvu->hw; + int link_level; int link; if (lvl >= hw->cap.nix_tx_aggr_lvl) @@ -1434,7 +1677,9 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, if (lvl == NIX_TXSCH_LVL_TL4) rvu_write64(rvu, blkaddr, NIX_AF_TL4X_SDP_LINK_CFG(schq), 0x00); - if (lvl != NIX_TXSCH_LVL_TL2) + link_level = rvu_read64(rvu, blkaddr, NIX_AF_PSE_CHANNEL_LEVEL) & 0x01 ? + NIX_TXSCH_LVL_TL3 : NIX_TXSCH_LVL_TL2; + if (lvl != link_level) return; /* Reset TL2's CGX or LBK link config */ @@ -1443,6 +1688,40 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00); } +static void nix_clear_tx_xoff(struct rvu *rvu, int blkaddr, + int lvl, int schq) +{ + struct rvu_hwinfo *hw = rvu->hw; + u64 reg; + + /* Skip this if shaping is not supported */ + if (!hw->cap.nix_shaping) + return; + + /* Clear level specific SW_XOFF */ + switch (lvl) { + case NIX_TXSCH_LVL_TL1: + reg = NIX_AF_TL1X_SW_XOFF(schq); + break; + case NIX_TXSCH_LVL_TL2: + reg = NIX_AF_TL2X_SW_XOFF(schq); + break; + case NIX_TXSCH_LVL_TL3: + reg = NIX_AF_TL3X_SW_XOFF(schq); + break; + case NIX_TXSCH_LVL_TL4: + reg = NIX_AF_TL4X_SW_XOFF(schq); + break; + case NIX_TXSCH_LVL_MDQ: + reg = NIX_AF_MDQX_SW_XOFF(schq); + break; + default: + return; + } + + rvu_write64(rvu, blkaddr, reg, 0x0); +} + static int nix_get_tx_link(struct rvu *rvu, u16 pcifunc) { struct rvu_hwinfo *hw = rvu->hw; @@ -1620,19 +1899,18 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, int link, blkaddr, rc = 0; int lvl, idx, start, end; struct nix_txsch *txsch; - struct rvu_pfvf *pfvf; struct nix_hw *nix_hw; u32 *pfvf_map; + int nixlf; u16 schq; - pfvf = rvu_get_pfvf(rvu, pcifunc); - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); - if (!pfvf->nixlf || blkaddr < 0) - return NIX_AF_ERR_AF_LF_INVALID; + rc = nix_get_nixlf(rvu, pcifunc, &nixlf, &blkaddr); + if (rc) + return rc; nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) - return -EINVAL; + return NIX_AF_ERR_INVALID_NIXBLK; mutex_lock(&rvu->rsrc_lock); @@ -1677,7 +1955,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, NIX_TXSCHQ_CFG_DONE)) pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); - nix_reset_tx_shaping(rvu, blkaddr, lvl, schq); + nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); } for (idx = 0; idx < req->schq[lvl]; idx++) { @@ -1686,7 +1964,7 @@ int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, NIX_TXSCHQ_CFG_DONE)) pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); - nix_reset_tx_shaping(rvu, blkaddr, lvl, schq); + nix_reset_tx_shaping(rvu, blkaddr, nixlf, lvl, schq); } } @@ -1703,8 +1981,8 @@ exit: return rc; } -static void nix_smq_flush(struct rvu *rvu, int blkaddr, - int smq, u16 pcifunc, int nixlf) +static int nix_smq_flush(struct rvu *rvu, int blkaddr, + int smq, u16 pcifunc, int nixlf) { int pf = rvu_get_pf(pcifunc); u8 cgx_id = 0, lmac_id = 0; @@ -1739,6 +2017,7 @@ static void nix_smq_flush(struct rvu *rvu, int blkaddr, /* restore cgx tx state */ if (restore_tx_en) cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); + return err; } static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) @@ -1747,6 +2026,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) struct rvu_hwinfo *hw = rvu->hw; struct nix_txsch *txsch; struct nix_hw *nix_hw; + u16 map_func; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) @@ -1754,25 +2034,42 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) - return -EINVAL; + return NIX_AF_ERR_INVALID_NIXBLK; nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); if (nixlf < 0) return NIX_AF_ERR_AF_LF_INVALID; - /* Disable TL2/3 queue links before SMQ flush*/ + /* Disable TL2/3 queue links and all XOFF's before SMQ flush*/ mutex_lock(&rvu->rsrc_lock); - for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) { - if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4) + for (lvl = NIX_TXSCH_LVL_MDQ; lvl < NIX_TXSCH_LVL_CNT; lvl++) { + txsch = &nix_hw->txsch[lvl]; + + if (lvl >= hw->cap.nix_tx_aggr_lvl) continue; - txsch = &nix_hw->txsch[lvl]; for (schq = 0; schq < txsch->schq.max; schq++) { if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) continue; nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); + nix_clear_tx_xoff(rvu, blkaddr, lvl, schq); } } + nix_clear_tx_xoff(rvu, blkaddr, NIX_TXSCH_LVL_TL1, + nix_get_tx_link(rvu, pcifunc)); + + /* On PF cleanup, clear cfg done flag as + * PF would have changed default config. + */ + if (!(pcifunc & RVU_PFVF_FUNC_MASK)) { + txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1]; + schq = nix_get_tx_link(rvu, pcifunc); + /* Do not clear pcifunc in txsch->pfvf_map[schq] because + * VF might be using this TL1 queue + */ + map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]); + txsch->pfvf_map[schq] = TXSCH_SET_FLAG(map_func, 0x0); + } /* Flush SMQs */ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; @@ -1818,6 +2115,7 @@ static int nix_txschq_free_one(struct rvu *rvu, struct nix_txsch *txsch; struct nix_hw *nix_hw; u32 *pfvf_map; + int rc; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) @@ -1825,7 +2123,7 @@ static int nix_txschq_free_one(struct rvu *rvu, nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) - return -EINVAL; + return NIX_AF_ERR_INVALID_NIXBLK; nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); if (nixlf < 0) @@ -1842,15 +2140,24 @@ static int nix_txschq_free_one(struct rvu *rvu, mutex_lock(&rvu->rsrc_lock); if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) { - mutex_unlock(&rvu->rsrc_lock); + rc = NIX_AF_ERR_TLX_INVALID; goto err; } + /* Clear SW_XOFF of this resource only. + * For SMQ level, all path XOFF's + * need to be made clear by user + */ + nix_clear_tx_xoff(rvu, blkaddr, lvl, schq); + /* Flush if it is a SMQ. Onus of disabling * TL2/3 queue links before SMQ flush is on user */ - if (lvl == NIX_TXSCH_LVL_SMQ) - nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf); + if (lvl == NIX_TXSCH_LVL_SMQ && + nix_smq_flush(rvu, blkaddr, schq, pcifunc, nixlf)) { + rc = NIX_AF_SMQ_FLUSH_FAILED; + goto err; + } /* Free the resource */ rvu_free_rsrc(&txsch->schq, schq); @@ -1858,7 +2165,8 @@ static int nix_txschq_free_one(struct rvu *rvu, mutex_unlock(&rvu->rsrc_lock); return 0; err: - return NIX_AF_ERR_TLX_INVALID; + mutex_unlock(&rvu->rsrc_lock); + return rc; } int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu, @@ -1941,6 +2249,11 @@ static bool is_txschq_shaping_valid(struct rvu_hwinfo *hw, int lvl, u64 reg) regbase == NIX_AF_TL4X_PIR(0)) return false; break; + case NIX_TXSCH_LVL_MDQ: + if (regbase == NIX_AF_MDQX_CIR(0) || + regbase == NIX_AF_MDQX_PIR(0)) + return false; + break; } return true; } @@ -1958,12 +2271,48 @@ static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw, return; rvu_write64(rvu, blkaddr, NIX_AF_TL1X_TOPOLOGY(schq), (TXSCH_TL1_DFLT_RR_PRIO << 1)); - rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq), - TXSCH_TL1_DFLT_RR_QTM); + + /* On OcteonTx2 the config was in bytes and newer silcons + * it's changed to weight. + */ + if (!rvu->hw->cap.nix_common_dwrr_mtu) + rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq), + TXSCH_TL1_DFLT_RR_QTM); + else + rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SCHEDULE(schq), + CN10K_MAX_DWRR_WEIGHT); + rvu_write64(rvu, blkaddr, NIX_AF_TL1X_CIR(schq), 0x00); pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE); } +/* Register offset - [15:0] + * Scheduler Queue number - [25:16] + */ +#define NIX_TX_SCHQ_MASK GENMASK_ULL(25, 0) + +static int nix_txschq_cfg_read(struct rvu *rvu, struct nix_hw *nix_hw, + int blkaddr, struct nix_txschq_config *req, + struct nix_txschq_config *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + int idx, schq; + u64 reg; + + for (idx = 0; idx < req->num_regs; idx++) { + reg = req->reg[idx]; + reg &= NIX_TX_SCHQ_MASK; + schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); + if (!rvu_check_valid_reg(TXSCHQ_HWREGMAP, req->lvl, reg) || + !is_valid_txschq(rvu, blkaddr, req->lvl, pcifunc, schq)) + return NIX_AF_INVAL_TXSCHQ_CFG; + rsp->regval[idx] = rvu_read64(rvu, blkaddr, reg); + } + rsp->lvl = req->lvl; + rsp->num_regs = req->num_regs; + return 0; +} + static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, u16 pcifunc, struct nix_txsch *txsch) { @@ -1995,11 +2344,11 @@ static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, struct nix_txschq_config *req, - struct msg_rsp *rsp) + struct nix_txschq_config *rsp) { + u64 reg, val, regval, schq_regbase, val_mask; struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; - u64 reg, regval, schq_regbase; struct nix_txsch *txsch; struct nix_hw *nix_hw; int blkaddr, idx, err; @@ -2016,7 +2365,10 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) - return -EINVAL; + return NIX_AF_ERR_INVALID_NIXBLK; + + if (req->read) + return nix_txschq_cfg_read(rvu, nix_hw, blkaddr, req, rsp); txsch = &nix_hw->txsch[req->lvl]; pfvf_map = txsch->pfvf_map; @@ -2032,8 +2384,10 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, for (idx = 0; idx < req->num_regs; idx++) { reg = req->reg[idx]; + reg &= NIX_TX_SCHQ_MASK; regval = req->regval[idx]; schq_regbase = reg & 0xFFFF; + val_mask = req->regval_mask[idx]; if (!is_txschq_hierarchy_valid(rvu, pcifunc, blkaddr, txsch->lvl, reg, regval)) @@ -2043,6 +2397,15 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, if (!is_txschq_shaping_valid(hw, req->lvl, reg)) continue; + val = rvu_read64(rvu, blkaddr, reg); + regval = (val & val_mask) | (regval & ~val_mask); + + /* Handle shaping state toggle specially */ + if (hw->cap.nix_shaper_toggle_wait && + handle_txschq_shaper_update(rvu, blkaddr, nixlf, + req->lvl, reg, regval)) + continue; + /* Replace PF/VF visible NIXLF slot with HW NIXLF id */ if (schq_regbase == NIX_AF_SMQX_CFG(0)) { nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], @@ -2083,7 +2446,6 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, rvu_nix_tx_tl2_cfg(rvu, blkaddr, pcifunc, &nix_hw->txsch[NIX_TXSCH_LVL_TL2]); - return 0; } @@ -2114,8 +2476,12 @@ static int nix_tx_vtag_free(struct rvu *rvu, int blkaddr, u16 pcifunc, int index) { struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); - struct nix_txvlan *vlan = &nix_hw->txvlan; + struct nix_txvlan *vlan; + + if (!nix_hw) + return NIX_AF_ERR_INVALID_NIXBLK; + vlan = &nix_hw->txvlan; if (vlan->entry2pfvf_map[index] != pcifunc) return NIX_AF_ERR_PARAM; @@ -2156,10 +2522,15 @@ static int nix_tx_vtag_alloc(struct rvu *rvu, int blkaddr, u64 vtag, u8 size) { struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); - struct nix_txvlan *vlan = &nix_hw->txvlan; + struct nix_txvlan *vlan; u64 regval; int index; + if (!nix_hw) + return NIX_AF_ERR_INVALID_NIXBLK; + + vlan = &nix_hw->txvlan; + mutex_lock(&vlan->rsrc_lock); index = rvu_alloc_rsrc(&vlan->rsrc); @@ -2184,12 +2555,16 @@ static int nix_tx_vtag_decfg(struct rvu *rvu, int blkaddr, struct nix_vtag_config *req) { struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); - struct nix_txvlan *vlan = &nix_hw->txvlan; u16 pcifunc = req->hdr.pcifunc; int idx0 = req->tx.vtag0_idx; int idx1 = req->tx.vtag1_idx; + struct nix_txvlan *vlan; int err = 0; + if (!nix_hw) + return NIX_AF_ERR_INVALID_NIXBLK; + + vlan = &nix_hw->txvlan; if (req->tx.free_vtag0 && req->tx.free_vtag1) if (vlan->entry2pfvf_map[idx0] != pcifunc || vlan->entry2pfvf_map[idx1] != pcifunc) @@ -2216,9 +2591,13 @@ static int nix_tx_vtag_cfg(struct rvu *rvu, int blkaddr, struct nix_vtag_config_rsp *rsp) { struct nix_hw *nix_hw = get_nix_hw(rvu->hw, blkaddr); - struct nix_txvlan *vlan = &nix_hw->txvlan; + struct nix_txvlan *vlan; u16 pcifunc = req->hdr.pcifunc; + if (!nix_hw) + return NIX_AF_ERR_INVALID_NIXBLK; + + vlan = &nix_hw->txvlan; if (req->tx.cfg_vtag0) { rsp->vtag0_idx = nix_tx_vtag_alloc(rvu, blkaddr, @@ -2456,14 +2835,19 @@ static int nix_update_mce_rule(struct rvu *rvu, u16 pcifunc, struct npc_mcam *mcam = &rvu->hw->mcam; struct rvu_hwinfo *hw = rvu->hw; struct nix_mce_list *mce_list; + int pf; - /* skip multicast pkt replication for AF's VFs */ - if (is_afvf(pcifunc)) + /* skip multicast pkt replication for AF's VFs & SDP links */ + if (is_afvf(pcifunc) || is_sdp_pfvf(pcifunc)) return 0; if (!hw->cap.nix_rx_multicast) return 0; + pf = rvu_get_pf(pcifunc); + if (!is_pf_cgxmapped(rvu, pf)) + return 0; + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) return -EINVAL; @@ -2667,6 +3051,15 @@ static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) for (schq = 0; schq < txsch->schq.max; schq++) txsch->pfvf_map[schq] = TXSCH_MAP(0, NIX_TXSCHQ_FREE); } + + /* Setup a default value of 8192 as DWRR MTU */ + if (rvu->hw->cap.nix_common_dwrr_mtu) { + rvu_write64(rvu, blkaddr, NIX_AF_DWRR_RPM_MTU, + convert_bytes_to_dwrr_mtu(8192)); + rvu_write64(rvu, blkaddr, NIX_AF_DWRR_SDP_MTU, + convert_bytes_to_dwrr_mtu(8192)); + } + return 0; } @@ -2743,6 +3136,7 @@ int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req, struct nix_hw_info *rsp) { u16 pcifunc = req->hdr.pcifunc; + u64 dwrr_mtu; int blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); @@ -2755,6 +3149,20 @@ int rvu_mbox_handler_nix_get_hw_info(struct rvu *rvu, struct msg_req *req, rvu_get_lmac_link_max_frs(rvu, &rsp->max_mtu); rsp->min_mtu = NIC_HW_MIN_FRS; + + if (!rvu->hw->cap.nix_common_dwrr_mtu) { + /* Return '1' on OTx2 */ + rsp->rpm_dwrr_mtu = 1; + rsp->sdp_dwrr_mtu = 1; + return 0; + } + + dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_RPM_MTU); + rsp->rpm_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu); + + dwrr_mtu = rvu_read64(rvu, BLKADDR_NIX0, NIX_AF_DWRR_SDP_MTU); + rsp->sdp_dwrr_mtu = convert_dwrr_mtu_to_bytes(dwrr_mtu); + return 0; } @@ -3068,7 +3476,7 @@ static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg) hw = get_nix_hw(rvu->hw, blkaddr); if (!hw) - return -EINVAL; + return NIX_AF_ERR_INVALID_NIXBLK; /* No room to add new flow hash algoritham */ if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX) @@ -3108,7 +3516,7 @@ int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu, nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) - return -EINVAL; + return NIX_AF_ERR_INVALID_NIXBLK; alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg); /* Failed to get algo index from the exiting list, reserve new */ @@ -3366,6 +3774,77 @@ static void nix_find_link_frs(struct rvu *rvu, req->minlen = minlen; } +static int +nix_config_link_credits(struct rvu *rvu, int blkaddr, int link, + u16 pcifunc, u64 tx_credits) +{ + struct rvu_hwinfo *hw = rvu->hw; + int pf = rvu_get_pf(pcifunc); + u8 cgx_id = 0, lmac_id = 0; + unsigned long poll_tmo; + bool restore_tx_en = 0; + struct nix_hw *nix_hw; + u64 cfg, sw_xoff = 0; + u32 schq = 0; + u32 credits; + int rc; + + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return NIX_AF_ERR_INVALID_NIXBLK; + + if (tx_credits == nix_hw->tx_credits[link]) + return 0; + + /* Enable cgx tx if disabled for credits to be back */ + if (is_pf_cgxmapped(rvu, pf)) { + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); + restore_tx_en = !cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), + lmac_id, true); + } + + mutex_lock(&rvu->rsrc_lock); + /* Disable new traffic to link */ + if (hw->cap.nix_shaping) { + schq = nix_get_tx_link(rvu, pcifunc); + sw_xoff = rvu_read64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq)); + rvu_write64(rvu, blkaddr, + NIX_AF_TL1X_SW_XOFF(schq), BIT_ULL(0)); + } + + rc = -EBUSY; + poll_tmo = jiffies + usecs_to_jiffies(10000); + /* Wait for credits to return */ + do { + if (time_after(jiffies, poll_tmo)) + goto exit; + usleep_range(100, 200); + + cfg = rvu_read64(rvu, blkaddr, + NIX_AF_TX_LINKX_NORM_CREDIT(link)); + credits = (cfg >> 12) & 0xFFFFFULL; + } while (credits != nix_hw->tx_credits[link]); + + cfg &= ~(0xFFFFFULL << 12); + cfg |= (tx_credits << 12); + rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); + rc = 0; + + nix_hw->tx_credits[link] = tx_credits; + +exit: + /* Enable traffic back */ + if (hw->cap.nix_shaping && !sw_xoff) + rvu_write64(rvu, blkaddr, NIX_AF_TL1X_SW_XOFF(schq), 0); + + /* Restore state of cgx tx */ + if (restore_tx_en) + cgx_lmac_tx_enable(rvu_cgx_pdata(cgx_id, rvu), lmac_id, false); + + mutex_unlock(&rvu->rsrc_lock); + return rc; +} + int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, struct msg_rsp *rsp) { @@ -3376,6 +3855,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, struct nix_txsch *txsch; u64 cfg, lmac_fifo_len; struct nix_hw *nix_hw; + struct rvu_pfvf *pfvf; u8 cgx = 0, lmac = 0; u16 max_mtu; @@ -3385,7 +3865,7 @@ int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) - return -EINVAL; + return NIX_AF_ERR_INVALID_NIXBLK; if (is_afvf(pcifunc)) rvu_get_lbk_link_max_frs(rvu, &max_mtu); @@ -3432,7 +3912,8 @@ rx_frscfg: link = (cgx * hw->lmac_per_cgx) + lmac; } else if (pf == 0) { /* For VFs of PF0 ingress is LBK port, so config LBK link */ - link = hw->cgx_links; + pfvf = rvu_get_pfvf(rvu, pcifunc); + link = hw->cgx_links + pfvf->lbkid; } if (link < 0) @@ -3454,11 +3935,8 @@ linkcfg: lmac_fifo_len = rvu_cgx_get_fifolen(rvu) / cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); - cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link)); - cfg &= ~(0xFFFFFULL << 12); - cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12; - rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); - return 0; + return nix_config_link_credits(rvu, blkaddr, link, pcifunc, + (lmac_fifo_len - req->maxlen) / 16); } int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, @@ -3502,12 +3980,13 @@ static u64 rvu_get_lbk_link_credits(struct rvu *rvu, u16 lbk_max_frs) return 1600; /* 16 * max LBK datarate = 16 * 100Gbps */ } -static void nix_link_config(struct rvu *rvu, int blkaddr) +static void nix_link_config(struct rvu *rvu, int blkaddr, + struct nix_hw *nix_hw) { struct rvu_hwinfo *hw = rvu->hw; int cgx, lmac_cnt, slink, link; u16 lbk_max_frs, lmac_max_frs; - u64 tx_credits; + u64 tx_credits, cfg; rvu_get_lbk_link_max_frs(rvu, &lbk_max_frs); rvu_get_lmac_link_max_frs(rvu, &lmac_max_frs); @@ -3538,15 +4017,18 @@ static void nix_link_config(struct rvu *rvu, int blkaddr) */ for (cgx = 0; cgx < hw->cgx; cgx++) { lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); + /* Skip when cgx is not available or lmac cnt is zero */ + if (lmac_cnt <= 0) + continue; tx_credits = ((rvu_cgx_get_fifolen(rvu) / lmac_cnt) - lmac_max_frs) / 16; /* Enable credits and set credit pkt count to max allowed */ - tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); + cfg = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); slink = cgx * hw->lmac_per_cgx; for (link = slink; link < (slink + lmac_cnt); link++) { + nix_hw->tx_credits[link] = tx_credits; rvu_write64(rvu, blkaddr, - NIX_AF_TX_LINKX_NORM_CREDIT(link), - tx_credits); + NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); } } @@ -3554,6 +4036,7 @@ static void nix_link_config(struct rvu *rvu, int blkaddr) slink = hw->cgx_links; for (link = slink; link < (slink + hw->lbk_links); link++) { tx_credits = rvu_get_lbk_link_credits(rvu, lbk_max_frs); + nix_hw->tx_credits[link] = tx_credits; /* Enable credits and set credit pkt count to max allowed */ tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); rvu_write64(rvu, blkaddr, @@ -3647,6 +4130,28 @@ static int nix_aq_init(struct rvu *rvu, struct rvu_block *block) return 0; } +static void rvu_nix_setup_capabilities(struct rvu *rvu, int blkaddr) +{ + struct rvu_hwinfo *hw = rvu->hw; + u64 hw_const; + + hw_const = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); + + /* On OcteonTx2 DWRR quantum is directly configured into each of + * the transmit scheduler queues. And PF/VF drivers were free to + * config any value upto 2^24. + * On CN10K, HW is modified, the quantum configuration at scheduler + * queues is in terms of weight. And SW needs to setup a base DWRR MTU + * at NIX_AF_DWRR_RPM_MTU / NIX_AF_DWRR_SDP_MTU. HW will do + * 'DWRR MTU * weight' to get the quantum. + * + * Check if HW uses a common MTU for all DWRR quantum configs. + * On OcteonTx2 this register field is '0'. + */ + if (((hw_const >> 56) & 0x10) == 0x10) + hw->cap.nix_common_dwrr_mtu = true; +} + static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) { const struct npc_lt_def_cfg *ltdefs; @@ -3684,6 +4189,9 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) if (err) return err; + /* Setup capabilities of the NIX block */ + rvu_nix_setup_capabilities(rvu, blkaddr); + /* Initialize admin queue */ err = nix_aq_init(rvu, block); if (err) @@ -3692,6 +4200,9 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) /* Restore CINT timer delay to HW reset values */ rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL); + /* For better performance use NDC TX instead of NDC RX for SQ's SQEs" */ + rvu_write64(rvu, blkaddr, NIX_AF_SEB_CFG, 0x1ULL); + if (is_block_implemented(hw, blkaddr)) { err = nix_setup_txschq(rvu, nix_hw, blkaddr); if (err) @@ -3792,8 +4303,13 @@ static int rvu_nix_block_init(struct rvu *rvu, struct nix_hw *nix_hw) if (err) return err; + nix_hw->tx_credits = kcalloc(hw->cgx_links + hw->lbk_links, + sizeof(u64), GFP_KERNEL); + if (!nix_hw->tx_credits) + return -ENOMEM; + /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */ - nix_link_config(rvu, blkaddr); + nix_link_config(rvu, blkaddr, nix_hw); /* Enable Channel backpressure */ rvu_write64(rvu, blkaddr, NIX_AF_RX_CFG, BIT_ULL(0)); @@ -3849,7 +4365,9 @@ static void rvu_nix_block_freemem(struct rvu *rvu, int blkaddr, kfree(txsch->schq.bmap); } - nix_ipolicer_freemem(nix_hw); + kfree(nix_hw->tx_credits); + + nix_ipolicer_freemem(rvu, nix_hw); vlan = &nix_hw->txvlan; kfree(vlan->rsrc.bmap); @@ -4027,7 +4545,7 @@ int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu, nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) - return -EINVAL; + return NIX_AF_ERR_INVALID_NIXBLK; /* Find existing matching LSO format, if any */ for (idx = 0; idx < nix_hw->lso.in_use; idx++) { @@ -4225,11 +4743,14 @@ static int nix_setup_ipolicers(struct rvu *rvu, return 0; } -static void nix_ipolicer_freemem(struct nix_hw *nix_hw) +static void nix_ipolicer_freemem(struct rvu *rvu, struct nix_hw *nix_hw) { struct nix_ipolicer *ipolicer; int layer; + if (!rvu->hw->cap.ipolicer) + return; + for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { ipolicer = &nix_hw->ipolicer[layer]; @@ -4652,3 +5173,36 @@ static void nix_clear_ratelimit_aggr(struct rvu *rvu, struct nix_hw *nix_hw, rvu_free_rsrc(&ipolicer->band_prof, mid_prof); } } + +int rvu_mbox_handler_nix_bandprof_get_hwinfo(struct rvu *rvu, struct msg_req *req, + struct nix_bandprof_get_hwinfo_rsp *rsp) +{ + struct nix_ipolicer *ipolicer; + int blkaddr, layer, err; + struct nix_hw *nix_hw; + u64 tu; + + if (!rvu->hw->cap.ipolicer) + return NIX_AF_ERR_IPOLICER_NOTSUPP; + + err = nix_get_struct_ptrs(rvu, req->hdr.pcifunc, &nix_hw, &blkaddr); + if (err) + return err; + + /* Return number of bandwidth profiles free at each layer */ + mutex_lock(&rvu->rsrc_lock); + for (layer = 0; layer < BAND_PROF_NUM_LAYERS; layer++) { + if (layer == BAND_PROF_INVAL_LAYER) + continue; + + ipolicer = &nix_hw->ipolicer[layer]; + rsp->prof_count[layer] = rvu_rsrc_free_count(&ipolicer->band_prof); + } + mutex_unlock(&rvu->rsrc_lock); + + /* Set the policer timeunit in nanosec */ + tu = rvu_read64(rvu, blkaddr, NIX_AF_PL_TS) & GENMASK_ULL(9, 0); + rsp->policer_timeunit = (tu + 1) * 100; + + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c index 24c2bfdfec4e..70bd036ed76e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/module.h> @@ -419,6 +416,10 @@ exit: rsp->stack_pg_ptrs = (cfg >> 8) & 0xFF; rsp->stack_pg_bytes = cfg & 0xFF; rsp->qints = (cfg >> 28) & 0xFFF; + if (!is_rvu_otx2(rvu)) { + cfg = rvu_read64(rvu, block->addr, NPA_AF_BATCH_CTL); + rsp->cache_lines = (cfg >> 1) & 0x3F; + } return rc; } @@ -478,6 +479,13 @@ static int npa_aq_init(struct rvu *rvu, struct rvu_block *block) #endif rvu_write64(rvu, block->addr, NPA_AF_NDC_CFG, cfg); + /* For CN10K NPA BATCH DMA set 35 cache lines */ + if (!is_rvu_otx2(rvu)) { + cfg = rvu_read64(rvu, block->addr, NPA_AF_BATCH_CTL); + cfg &= ~0x7EULL; + cfg |= BIT_ULL(6) | BIT_ULL(2) | BIT_ULL(1); + rvu_write64(rvu, block->addr, NPA_AF_BATCH_CTL, cfg); + } /* Result structure can be followed by Aura/Pool context at * RES + 128bytes and a write mask at RES + 256 bytes, depending on * operation type. Alloc sufficient result memory for all operations. diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 52b255426c22..5efb4174e82d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/bitfield.h> @@ -23,7 +20,7 @@ #define RSVD_MCAM_ENTRIES_PER_NIXLF 1 /* Ucast for LFs */ #define NPC_PARSE_RESULT_DMAC_OFFSET 8 -#define NPC_HW_TSTAMP_OFFSET 8 +#define NPC_HW_TSTAMP_OFFSET 8ULL #define NPC_KEX_CHAN_MASK 0xFFFULL #define NPC_KEX_PF_FUNC_MASK 0xFFFFULL @@ -85,36 +82,6 @@ static int npc_mcam_verify_pf_func(struct rvu *rvu, return 0; } -int npc_mcam_verify_channel(struct rvu *rvu, u16 pcifunc, u8 intf, u16 channel) -{ - int pf = rvu_get_pf(pcifunc); - u8 cgx_id, lmac_id; - int base = 0, end; - - if (is_npc_intf_tx(intf)) - return 0; - - /* return in case of AF installed rules */ - if (is_pffunc_af(pcifunc)) - return 0; - - if (is_afvf(pcifunc)) { - end = rvu_get_num_lbk_chans(); - if (end < 0) - return -EINVAL; - } else { - rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx_id, &lmac_id); - base = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0x0); - /* CGX mapped functions has maximum of 16 channels */ - end = rvu_nix_chan_cgx(rvu, cgx_id, lmac_id, 0xF); - } - - if (channel < base || channel > end) - return -EINVAL; - - return 0; -} - void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf) { int blkaddr; @@ -634,8 +601,8 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, struct nix_rx_action action; int blkaddr, index; - /* AF's VFs work in promiscuous mode */ - if (is_afvf(pcifunc)) + /* AF's and SDP VFs work in promiscuous mode */ + if (is_afvf(pcifunc) || is_sdp_vf(pcifunc)) return; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); @@ -724,7 +691,17 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, action.index = pfvf->promisc_mce_idx; } - req.chan_mask = 0xFFFU; + /* For cn10k the upper two bits of the channel number are + * cpt channel number. with masking out these bits in the + * mcam entry, same entry used for NIX will allow packets + * received from cpt for parsing. + */ + if (!is_rvu_otx2(rvu)) { + req.chan_mask = NIX_CHAN_CPT_X2P_MASK; + } else { + req.chan_mask = 0xFFFU; + } + if (chan_cnt > 1) { if (!is_power_of_2(chan_cnt)) { dev_err(rvu->dev, @@ -853,7 +830,7 @@ void rvu_npc_install_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u16 vf_func; /* Only CGX PF/VF can add allmulticast entry */ - if (is_afvf(pcifunc)) + if (is_afvf(pcifunc) && is_sdp_vf(pcifunc)) return; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); @@ -938,7 +915,7 @@ void rvu_npc_enable_allmulti_entry(struct rvu *rvu, u16 pcifunc, int nixlf, static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, u16 pcifunc, u64 rx_action) { - int actindex, index, bank; + int actindex, index, bank, entry; bool enable; if (!(pcifunc & RVU_PFVF_FUNC_MASK)) @@ -949,7 +926,7 @@ static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam, if (mcam->entry2target_pffunc[index] == pcifunc) { bank = npc_get_bank(mcam, index); actindex = index; - index &= (mcam->banksize - 1); + entry = index & (mcam->banksize - 1); /* read vf flow entry enable status */ enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, @@ -959,7 +936,7 @@ static void npc_update_vf_flow_entry(struct rvu *rvu, struct npc_mcam *mcam, false); /* update 'action' */ rvu_write64(rvu, blkaddr, - NPC_AF_MCAMEX_BANKX_ACTION(index, bank), + NPC_AF_MCAMEX_BANKX_ACTION(entry, bank), rx_action); if (enable) npc_enable_mcam_entry(rvu, mcam, blkaddr, @@ -1898,9 +1875,22 @@ static void rvu_npc_hw_init(struct rvu *rvu, int blkaddr) mcam->banks = (npc_const >> 44) & 0xFULL; mcam->banksize = (npc_const >> 28) & 0xFFFFULL; + hw->npc_stat_ena = BIT_ULL(9); /* Extended set */ if (npc_const2) { hw->npc_ext_set = true; + /* 96xx supports only match_stats and npc_counters + * reflected in NPC_AF_CONST reg. + * STAT_SEL and ENA are at [0:8] and 9 bit positions. + * 98xx has both match_stat and ext and npc_counter + * reflected in NPC_AF_CONST2 + * STAT_SEL_EXT added at [12:14] bit position. + * cn10k supports only ext and hence npc_counters in + * NPC_AF_CONST is 0 and npc_counters reflected in NPC_AF_CONST2. + * STAT_SEL bitpos incremented from [0:8] to [0:11] and ENA bit moved to 63 + */ + if (!hw->npc_counters) + hw->npc_stat_ena = BIT_ULL(63); hw->npc_counters = (npc_const2 >> 16) & 0xFFFFULL; mcam->banksize = npc_const2 & 0xFFFFULL; } @@ -1955,7 +1945,7 @@ static void rvu_npc_setup_interfaces(struct rvu *rvu, int blkaddr) rvu_write64(rvu, blkaddr, NPC_AF_INTFX_MISS_STAT_ACT(intf), ((mcam->rx_miss_act_cntr >> 9) << 12) | - BIT_ULL(9) | mcam->rx_miss_act_cntr); + hw->npc_stat_ena | mcam->rx_miss_act_cntr); } /* Configure TX interfaces */ @@ -2030,14 +2020,15 @@ int rvu_npc_init(struct rvu *rvu) /* Enable below for Rx pkts. * - Outer IPv4 header checksum validation. - * - Detect outer L2 broadcast address and set NPC_RESULT_S[L2M]. + * - Detect outer L2 broadcast address and set NPC_RESULT_S[L2B]. + * - Detect outer L2 multicast address and set NPC_RESULT_S[L2M]. * - Inner IPv4 header checksum validation. * - Set non zero checksum error code value */ rvu_write64(rvu, blkaddr, NPC_AF_PCK_CFG, rvu_read64(rvu, blkaddr, NPC_AF_PCK_CFG) | - BIT_ULL(32) | BIT_ULL(24) | BIT_ULL(6) | - BIT_ULL(2) | BIT_ULL(1)); + ((u64)NPC_EC_OIP4_CSUM << 32) | (NPC_EC_IIP4_CSUM << 24) | + BIT_ULL(7) | BIT_ULL(6) | BIT_ULL(2) | BIT_ULL(1)); rvu_npc_setup_interfaces(rvu, blkaddr); @@ -2147,18 +2138,16 @@ static void npc_map_mcam_entry_and_cntr(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, u16 entry, u16 cntr) { u16 index = entry & (mcam->banksize - 1); - u16 bank = npc_get_bank(mcam, entry); + u32 bank = npc_get_bank(mcam, entry); + struct rvu_hwinfo *hw = rvu->hw; /* Set mapping and increment counter's refcnt */ mcam->entry2cntr_map[entry] = cntr; mcam->cntr_refcnt[cntr]++; - /* Enable stats - * NPC_AF_MCAMEX_BANKX_STAT_ACT[14:12] - counter[11:9] - * NPC_AF_MCAMEX_BANKX_STAT_ACT[8:0] - counter[8:0] - */ + /* Enable stats */ rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank), - ((cntr >> 9) << 12) | BIT_ULL(9) | cntr); + ((cntr >> 9) << 12) | hw->npc_stat_ena | cntr); } static void npc_unmap_mcam_entry_and_cntr(struct rvu *rvu, @@ -2166,7 +2155,7 @@ static void npc_unmap_mcam_entry_and_cntr(struct rvu *rvu, int blkaddr, u16 entry, u16 cntr) { u16 index = entry & (mcam->banksize - 1); - u16 bank = npc_get_bank(mcam, entry); + u32 bank = npc_get_bank(mcam, entry); /* Remove mapping and reduce counter's refcnt */ mcam->entry2cntr_map[entry] = NPC_MCAM_INVALID_MAP; @@ -2414,6 +2403,17 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc, goto alloc; } + /* For a VF base MCAM match rule is set by its PF. And all the + * further MCAM rules installed by VF on its own are + * concatenated with the base rule set by its PF. Hence PF entries + * should be at lower priority compared to VF entries. Otherwise + * base rule is hit always and rules installed by VF will be of + * no use. Hence if the request is from PF and NOT a priority + * allocation request then allocate low priority entries. + */ + if (!(pcifunc & RVU_PFVF_FUNC_MASK)) + goto lprio_alloc; + /* Find out the search range for non-priority allocation request * * Get MCAM free entry count in middle zone. @@ -2439,6 +2439,7 @@ static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc, /* Not enough free entries, search all entries in reverse, * so that low priority ones will get used up. */ +lprio_alloc: reverse = true; start = 0; end = mcam->bmap_entries; @@ -2673,7 +2674,6 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu, struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); struct npc_mcam *mcam = &rvu->hw->mcam; u16 pcifunc = req->hdr.pcifunc; - u16 channel, chan_mask; int blkaddr, rc; u8 nix_intf; @@ -2681,10 +2681,6 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu, if (blkaddr < 0) return NPC_MCAM_INVALID_REQ; - chan_mask = req->entry_data.kw_mask[0] & NPC_KEX_CHAN_MASK; - channel = req->entry_data.kw[0] & NPC_KEX_CHAN_MASK; - channel &= chan_mask; - mutex_lock(&mcam->lock); rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); if (rc) @@ -2707,12 +2703,6 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu, nix_intf = pfvf->nix_rx_intf; if (!is_pffunc_af(pcifunc) && - npc_mcam_verify_channel(rvu, pcifunc, req->intf, channel)) { - rc = NPC_MCAM_INVALID_REQ; - goto exit; - } - - if (!is_pffunc_af(pcifunc) && npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, pcifunc)) { rc = NPC_MCAM_INVALID_REQ; goto exit; @@ -2788,8 +2778,8 @@ int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu, struct npc_mcam *mcam = &rvu->hw->mcam; u16 pcifunc = req->hdr.pcifunc; u16 old_entry, new_entry; + int blkaddr, rc = 0; u16 index, cntr; - int blkaddr, rc; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) @@ -2990,10 +2980,11 @@ int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu, index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry); if (index >= mcam->bmap_entries) break; + entry = index + 1; + if (mcam->entry2cntr_map[index] != req->cntr) continue; - entry = index + 1; npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, index, req->cntr); } @@ -3058,7 +3049,6 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu, struct npc_mcam *mcam = &rvu->hw->mcam; u16 entry = NPC_MCAM_ENTRY_INVALID; u16 cntr = NPC_MCAM_ENTRY_INVALID; - u16 channel, chan_mask; int blkaddr, rc; u8 nix_intf; @@ -3069,13 +3059,6 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu, if (!is_npc_interface_valid(rvu, req->intf)) return NPC_MCAM_INVALID_REQ; - chan_mask = req->entry_data.kw_mask[0] & NPC_KEX_CHAN_MASK; - channel = req->entry_data.kw[0] & NPC_KEX_CHAN_MASK; - channel &= chan_mask; - - if (npc_mcam_verify_channel(rvu, req->hdr.pcifunc, req->intf, channel)) - return NPC_MCAM_INVALID_REQ; - if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, req->hdr.pcifunc)) return NPC_MCAM_INVALID_REQ; @@ -3252,7 +3235,7 @@ int rvu_mbox_handler_npc_mcam_entry_stats(struct rvu *rvu, /* read MCAM entry STAT_ACT register */ regval = rvu_read64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank)); - if (!(regval & BIT_ULL(9))) { + if (!(regval & rvu->hw->npc_stat_ena)) { rsp->stat_ena = 0; mutex_unlock(&mcam->lock); return 0; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c index 5c01cf4a9c5b..51ddc7b81d0b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * * Copyright (C) 2020 Marvell. */ @@ -20,6 +20,8 @@ static const char * const npc_flow_names[] = { [NPC_DMAC] = "dmac", [NPC_SMAC] = "smac", [NPC_ETYPE] = "ether type", + [NPC_VLAN_ETYPE_CTAG] = "vlan ether type ctag", + [NPC_VLAN_ETYPE_STAG] = "vlan ether type stag", [NPC_OUTER_VID] = "outer vlan id", [NPC_TOS] = "tos", [NPC_SIP_IPV4] = "ipv4 source ip", @@ -492,6 +494,11 @@ static void npc_set_features(struct rvu *rvu, int blkaddr, u8 intf) if (*features & BIT_ULL(NPC_OUTER_VID)) if (!npc_check_field(rvu, blkaddr, NPC_LB, intf)) *features &= ~BIT_ULL(NPC_OUTER_VID); + + /* for vlan ethertypes corresponding layer type should be in the key */ + if (npc_check_field(rvu, blkaddr, NPC_LB, intf)) + *features |= BIT_ULL(NPC_VLAN_ETYPE_CTAG) | + BIT_ULL(NPC_VLAN_ETYPE_STAG); } /* Scan key extraction profile and record how fields of our interest @@ -600,7 +607,7 @@ static int npc_check_unsupported_flows(struct rvu *rvu, u64 features, u8 intf) dev_info(rvu->dev, "Unsupported flow(s):\n"); for_each_set_bit(bit, (unsigned long *)&unsupported, 64) dev_info(rvu->dev, "%s ", npc_get_field_name(bit)); - return NIX_AF_ERR_NPC_KEY_NOT_SUPP; + return -EOPNOTSUPP; } return 0; @@ -747,6 +754,28 @@ static void npc_update_ipv6_flow(struct rvu *rvu, struct mcam_entry *entry, } } +static void npc_update_vlan_features(struct rvu *rvu, struct mcam_entry *entry, + u64 features, u8 intf) +{ + bool ctag = !!(features & BIT_ULL(NPC_VLAN_ETYPE_CTAG)); + bool stag = !!(features & BIT_ULL(NPC_VLAN_ETYPE_STAG)); + bool vid = !!(features & BIT_ULL(NPC_OUTER_VID)); + + /* If only VLAN id is given then always match outer VLAN id */ + if (vid && !ctag && !stag) { + npc_update_entry(rvu, NPC_LB, entry, + NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG, 0, + NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG, 0, intf); + return; + } + if (ctag) + npc_update_entry(rvu, NPC_LB, entry, NPC_LT_LB_CTAG, 0, + ~0ULL, 0, intf); + if (stag) + npc_update_entry(rvu, NPC_LB, entry, NPC_LT_LB_STAG_QINQ, 0, + ~0ULL, 0, intf); +} + static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry, u64 features, struct flow_msg *pkt, struct flow_msg *mask, @@ -779,11 +808,6 @@ static void npc_update_flow(struct rvu *rvu, struct mcam_entry *entry, npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_ICMP6, 0, ~0ULL, 0, intf); - if (features & BIT_ULL(NPC_OUTER_VID)) - npc_update_entry(rvu, NPC_LB, entry, - NPC_LT_LB_STAG_QINQ | NPC_LT_LB_CTAG, 0, - NPC_LT_LB_STAG_QINQ & NPC_LT_LB_CTAG, 0, intf); - /* For AH, LTYPE should be present in entry */ if (features & BIT_ULL(NPC_IPPROTO_AH)) npc_update_entry(rvu, NPC_LD, entry, NPC_LT_LD_AH, @@ -829,6 +853,7 @@ do { \ ntohs(mask->vlan_tci), 0); npc_update_ipv6_flow(rvu, entry, features, pkt, mask, output, intf); + npc_update_vlan_features(rvu, entry, features, intf); } static struct rvu_npc_mcam_rule *rvu_mcam_find_rule(struct npc_mcam *mcam, @@ -995,13 +1020,11 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target, struct npc_mcam *mcam = &rvu->hw->mcam; struct rvu_npc_mcam_rule dummy = { 0 }; struct rvu_npc_mcam_rule *rule; - bool new = false, msg_from_vf; u16 owner = req->hdr.pcifunc; struct msg_rsp write_rsp; struct mcam_entry *entry; int entry_index, err; - - msg_from_vf = !!(owner & RVU_PFVF_FUNC_MASK); + bool new = false; installed_features = req->features; features = req->features; @@ -1027,7 +1050,7 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target, } /* update mcam entry with default unicast rule attributes */ - if (def_ucast_rule && (msg_from_vf || (req->default_rule && req->append))) { + if (def_ucast_rule && (req->default_rule && req->append)) { missing_features = (def_ucast_rule->features ^ features) & def_ucast_rule->features; if (missing_features) @@ -1130,6 +1153,7 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu, struct npc_install_flow_rsp *rsp) { bool from_vf = !!(req->hdr.pcifunc & RVU_PFVF_FUNC_MASK); + struct rvu_switch *rswitch = &rvu->rswitch; int blkaddr, nixlf, err; struct rvu_pfvf *pfvf; bool pf_set_vfs_mac = false; @@ -1139,14 +1163,14 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu, blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) { dev_err(rvu->dev, "%s: NPC block not implemented\n", __func__); - return -ENODEV; + return NPC_MCAM_INVALID_REQ; } if (!is_npc_interface_valid(rvu, req->intf)) - return -EINVAL; + return NPC_FLOW_INTF_INVALID; if (from_vf && req->default_rule) - return NPC_MCAM_PERM_DENIED; + return NPC_FLOW_VF_PERM_DENIED; /* Each PF/VF info is maintained in struct rvu_pfvf. * rvu_pfvf for the target PF/VF needs to be retrieved @@ -1172,12 +1196,7 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu, err = npc_check_unsupported_flows(rvu, req->features, req->intf); if (err) - return err; - - /* Skip channel validation if AF is installing */ - if (!is_pffunc_af(req->hdr.pcifunc) && - npc_mcam_verify_channel(rvu, target, req->intf, req->channel)) - return -EINVAL; + return NPC_FLOW_NOT_SUPPORTED; pfvf = rvu_get_pfvf(rvu, target); @@ -1195,7 +1214,7 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu, /* Proceed if NIXLF is attached or not for TX rules */ err = nix_get_nixlf(rvu, target, &nixlf, NULL); if (err && is_npc_intf_rx(req->intf) && !pf_set_vfs_mac) - return -EINVAL; + return NPC_FLOW_NO_NIXLF; /* don't enable rule when nixlf not attached or initialized */ if (!(is_nixlf_attached(rvu, target) && @@ -1211,7 +1230,7 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu, /* Do not allow requests from uninitialized VFs */ if (from_vf && !enable) - return -EINVAL; + return NPC_FLOW_VF_NOT_INIT; /* PF sets VF mac & VF NIXLF is not attached, update the mac addr */ if (pf_set_vfs_mac && !enable) { @@ -1221,15 +1240,12 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu, return 0; } - /* If message is from VF then its flow should not overlap with - * reserved unicast flow. - */ - if (from_vf && pfvf->def_ucast_rule && is_npc_intf_rx(req->intf) && - pfvf->def_ucast_rule->features & req->features) - return -EINVAL; + mutex_lock(&rswitch->switch_lock); + err = npc_install_flow(rvu, blkaddr, target, nixlf, pfvf, + req, rsp, enable, pf_set_vfs_mac); + mutex_unlock(&rswitch->switch_lock); - return npc_install_flow(rvu, blkaddr, target, nixlf, pfvf, req, rsp, - enable, pf_set_vfs_mac); + return err; } static int npc_delete_flow(struct rvu *rvu, struct rvu_npc_mcam_rule *rule, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c index e266f0c49559..b3150f053291 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/module.h> @@ -33,8 +30,8 @@ static struct hw_reg_map txsch_reg_map[NIX_TXSCH_LVL_CNT] = { {NIX_TXSCH_LVL_SMQ, 2, 0xFFFF, {{0x0700, 0x0708}, {0x1400, 0x14C8} } }, {NIX_TXSCH_LVL_TL4, 3, 0xFFFF, {{0x0B00, 0x0B08}, {0x0B10, 0x0B18}, {0x1200, 0x12E0} } }, - {NIX_TXSCH_LVL_TL3, 3, 0xFFFF, {{0x1000, 0x10E0}, {0x1600, 0x1608}, - {0x1610, 0x1618} } }, + {NIX_TXSCH_LVL_TL3, 4, 0xFFFF, {{0x1000, 0x10E0}, {0x1600, 0x1608}, + {0x1610, 0x1618}, {0x1700, 0x17B0} } }, {NIX_TXSCH_LVL_TL2, 2, 0xFFFF, {{0x0E00, 0x0EE0}, {0x1700, 0x17B0} } }, {NIX_TXSCH_LVL_TL1, 1, 0xFFFF, {{0x0C00, 0x0D98} } }, }; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h index 8b01ef6e2c99..21f1ed4e222f 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_reg.h @@ -1,11 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef RVU_REG_H @@ -53,7 +50,7 @@ #define RVU_AF_SMMU_TXN_REQ (0x6008) #define RVU_AF_SMMU_ADDR_RSP_STS (0x6010) #define RVU_AF_SMMU_ADDR_TLN (0x6018) -#define RVU_AF_SMMU_TLN_FLIT1 (0x6030) +#define RVU_AF_SMMU_TLN_FLIT0 (0x6020) /* Admin function's privileged PF/VF registers */ #define RVU_PRIV_CONST (0x8000000) @@ -156,6 +153,7 @@ #define NPA_AF_AQ_DONE_INT_W1S (0x0688) #define NPA_AF_AQ_DONE_ENA_W1S (0x0690) #define NPA_AF_AQ_DONE_ENA_W1C (0x0698) +#define NPA_AF_BATCH_CTL (0x06a0) #define NPA_AF_LFX_AURAS_CFG(a) (0x4000 | (a) << 18) #define NPA_AF_LFX_LOC_AURAS_BASE(a) (0x4010 | (a) << 18) #define NPA_AF_LFX_QINTS_CFG(a) (0x4100 | (a) << 18) @@ -265,10 +263,13 @@ #define NIX_AF_SDP_TX_FIFO_STATUS (0x0640) #define NIX_AF_TX_NPC_CAPTURE_CONFIG (0x0660) #define NIX_AF_TX_NPC_CAPTURE_INFO (0x0670) +#define NIX_AF_SEB_CFG (0x05F0) #define NIX_AF_DEBUG_NPC_RESP_DATAX(a) (0x680 | (a) << 3) #define NIX_AF_SMQX_CFG(a) (0x700 | (a) << 16) #define NIX_AF_SQM_DBG_CTL_STATUS (0x750) +#define NIX_AF_DWRR_SDP_MTU (0x790) +#define NIX_AF_DWRR_RPM_MTU (0x7A0) #define NIX_AF_PSE_CHANNEL_LEVEL (0x800) #define NIX_AF_PSE_SHAPER_CFG (0x810) #define NIX_AF_TX_EXPR_CREDIT (0x830) @@ -701,5 +702,8 @@ #define APR_AF_LMT_CFG (0x000ull) #define APR_AF_LMT_MAP_BASE (0x008ull) #define APR_AF_LMT_CTL (0x010ull) +#define APR_LMT_MAP_ENT_DIS_SCH_CMP_SHIFT 23 +#define APR_LMT_MAP_ENT_SCH_ENA_SHIFT 22 +#define APR_LMT_MAP_ENT_DIS_LINE_PREF_SHIFT 21 #endif /* RVU_REG_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c new file mode 100644 index 000000000000..b04fb226f708 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_sdp.c @@ -0,0 +1,108 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell RVU Admin Function driver + * + * Copyright (C) 2021 Marvell. + * + */ + +#include <linux/pci.h> +#include "rvu.h" + +/* SDP PF device id */ +#define PCI_DEVID_OTX2_SDP_PF 0xA0F6 + +/* Maximum SDP blocks in a chip */ +#define MAX_SDP 2 + +/* SDP PF number */ +static int sdp_pf_num[MAX_SDP] = {-1, -1}; + +bool is_sdp_pfvf(u16 pcifunc) +{ + u16 pf = rvu_get_pf(pcifunc); + u32 found = 0, i = 0; + + while (i < MAX_SDP) { + if (pf == sdp_pf_num[i]) + found = 1; + i++; + } + + if (!found) + return false; + + return true; +} + +bool is_sdp_pf(u16 pcifunc) +{ + return (is_sdp_pfvf(pcifunc) && + !(pcifunc & RVU_PFVF_FUNC_MASK)); +} + +bool is_sdp_vf(u16 pcifunc) +{ + return (is_sdp_pfvf(pcifunc) && + !!(pcifunc & RVU_PFVF_FUNC_MASK)); +} + +int rvu_sdp_init(struct rvu *rvu) +{ + struct pci_dev *pdev = NULL; + struct rvu_pfvf *pfvf; + u32 i = 0; + + while ((i < MAX_SDP) && (pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, + PCI_DEVID_OTX2_SDP_PF, + pdev)) != NULL) { + /* The RVU PF number is one less than bus number */ + sdp_pf_num[i] = pdev->bus->number - 1; + pfvf = &rvu->pf[sdp_pf_num[i]]; + + pfvf->sdp_info = devm_kzalloc(rvu->dev, + sizeof(struct sdp_node_info), + GFP_KERNEL); + if (!pfvf->sdp_info) + return -ENOMEM; + + dev_info(rvu->dev, "SDP PF number:%d\n", sdp_pf_num[i]); + + put_device(&pdev->dev); + i++; + } + + return 0; +} + +int +rvu_mbox_handler_set_sdp_chan_info(struct rvu *rvu, + struct sdp_chan_info_msg *req, + struct msg_rsp *rsp) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, req->hdr.pcifunc); + + memcpy(pfvf->sdp_info, &req->info, sizeof(struct sdp_node_info)); + dev_info(rvu->dev, "AF: SDP%d max_vfs %d num_pf_rings %d pf_srn %d\n", + req->info.node_id, req->info.max_vfs, req->info.num_pf_rings, + req->info.pf_srn); + return 0; +} + +int +rvu_mbox_handler_get_sdp_chan_info(struct rvu *rvu, struct msg_req *req, + struct sdp_get_chan_info_msg *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + int blkaddr; + + if (!hw->cap.programmable_chans) { + rsp->chan_base = NIX_CHAN_SDP_CH_START; + rsp->num_chan = NIX_CHAN_SDP_NUM_CHANS; + } else { + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0); + rsp->chan_base = hw->sdp_chan_base; + rsp->num_chan = rvu_read64(rvu, blkaddr, NIX_AF_CONST1) & 0xFFFUL; + } + + return 0; +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h index 5bbe6727d11d..77ac96693f04 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h @@ -1,11 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * - * Copyright (C) 2018 Marvell International Ltd. + * Copyright (C) 2018 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef RVU_STRUCT_H diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c index 820adf390b8e..3392487f6b47 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Admin Function driver +/* Marvell RVU Admin Function driver * * Copyright (C) 2021 Marvell. + * */ #include <linux/bitfield.h> diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c index 56f90cf9c4c0..775fd4c35794 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Admin Function driver tracepoints +/* Marvell RVU Admin Function driver + * + * Copyright (C) 2020 Marvell. * - * Copyright (C) 2020 Marvell International Ltd. */ #define CREATE_TRACE_POINTS diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h index 64aa7d350df1..28984d0e848a 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_trace.h @@ -1,7 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Admin Function driver tracepoints +/* Marvell RVU Admin Function driver + * + * Copyright (C) 2020 Marvell. * - * Copyright (C) 2020 Marvell International Ltd. */ #undef TRACE_SYSTEM @@ -14,6 +15,8 @@ #include <linux/tracepoint.h> #include <linux/pci.h> +#include "mbox.h" + TRACE_EVENT(otx2_msg_alloc, TP_PROTO(const struct pci_dev *pdev, u16 id, u64 size), TP_ARGS(pdev, id, size), @@ -25,8 +28,8 @@ TRACE_EVENT(otx2_msg_alloc, __entry->id = id; __entry->size = size; ), - TP_printk("[%s] msg:(0x%x) size:%lld\n", __get_str(dev), - __entry->id, __entry->size) + TP_printk("[%s] msg:(%s) size:%lld\n", __get_str(dev), + otx2_mbox_id2name(__entry->id), __entry->size) ); TRACE_EVENT(otx2_msg_send, @@ -88,8 +91,8 @@ TRACE_EVENT(otx2_msg_process, __entry->id = id; __entry->err = err; ), - TP_printk("[%s] msg:(0x%x) error:%d\n", __get_str(dev), - __entry->id, __entry->err) + TP_printk("[%s] msg:(%s) error:%d\n", __get_str(dev), + otx2_mbox_id2name(__entry->id), __entry->err) ); #endif /* __RVU_TRACE_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile index 3254b02205ca..b92c267628b8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/nic/Makefile @@ -1,13 +1,14 @@ # SPDX-License-Identifier: GPL-2.0 # -# Makefile for Marvell's OcteonTX2 ethernet device drivers +# Makefile for Marvell's RVU Ethernet device drivers # obj-$(CONFIG_OCTEONTX2_PF) += rvu_nicpf.o obj-$(CONFIG_OCTEONTX2_VF) += rvu_nicvf.o rvu_nicpf-y := otx2_pf.o otx2_common.o otx2_txrx.o otx2_ethtool.o \ - otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o -rvu_nicvf-y := otx2_vf.o + otx2_ptp.o otx2_flows.o otx2_tc.o cn10k.o otx2_dmac_flt.o \ + otx2_devlink.o +rvu_nicvf-y := otx2_vf.o otx2_devlink.o ccflags-y += -I$(srctree)/drivers/net/ethernet/marvell/octeontx2/af diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c index 184de9466286..3cc76f14d2fd 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Physcial Function ethernet driver +/* Marvell RVU Ethernet driver + * + * Copyright (C) 2021 Marvell. * - * Copyright (C) 2020 Marvell. */ #include "cn10k.h" @@ -92,8 +93,7 @@ int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) aq->sq.ena = 1; /* Only one SMQ is allocated, map all SQ's to that SMQ */ aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0]; - /* FIXME: set based on NIX_AF_DWRR_RPM_MTU*/ - aq->sq.smq_rr_weight = pfvf->netdev->mtu; + aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->max_frs); aq->sq.default_chan = pfvf->hw.tx_chan_base; aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */ aq->sq.sqb_aura = sqb_aura; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h index 1a1ae334477d..8ae96815865e 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k.h @@ -1,7 +1,8 @@ -/* SPDX-License-Identifier: GPL-2.0 - * Marvell OcteonTx2 RVU Ethernet driver +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell RVU Ethernet driver + * + * Copyright (C) 2021 Marvell. * - * Copyright (C) 2020 Marvell. */ #ifndef CN10K_H @@ -9,6 +10,20 @@ #include "otx2_common.h" +static inline int mtu_to_dwrr_weight(struct otx2_nic *pfvf, int mtu) +{ + u32 weight; + + /* On OTx2, since AF returns DWRR_MTU as '1', this logic + * will work on those silicons as well. + */ + weight = mtu / pfvf->hw.dwrr_mtu; + if (mtu % pfvf->hw.dwrr_mtu) + weight += 1; + + return weight; +} + void cn10k_refill_pool_ptrs(void *dev, struct otx2_cq_queue *cq); void cn10k_sqe_flush(void *dev, struct otx2_snd_queue *sq, int size, int qidx); int cn10k_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 70fcc1fd962f..ce25c2744435 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Ethernet driver +/* Marvell RVU Ethernet driver * - * Copyright (C) 2020 Marvell International Ltd. + * Copyright (C) 2020 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/interrupt.h> @@ -208,7 +205,8 @@ int otx2_set_mac_address(struct net_device *netdev, void *p) if (!otx2_hw_set_mac_addr(pfvf, addr->sa_data)) { memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); /* update dmac field in vlan offload rule */ - if (pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT) + if (netif_running(netdev) && + pfvf->flags & OTX2_FLAG_RX_VLAN_SUPPORT) otx2_install_rxvlan_offload_flow(pfvf); /* update dmac address in ntuple and DMAC filter list */ if (pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT) @@ -268,6 +266,7 @@ unlock: int otx2_set_flowkey_cfg(struct otx2_nic *pfvf) { struct otx2_rss_info *rss = &pfvf->hw.rss_info; + struct nix_rss_flowkey_cfg_rsp *rsp; struct nix_rss_flowkey_cfg *req; int err; @@ -282,6 +281,18 @@ int otx2_set_flowkey_cfg(struct otx2_nic *pfvf) req->group = DEFAULT_RSS_CONTEXT_GROUP; err = otx2_sync_mbox_msg(&pfvf->mbox); + if (err) + goto fail; + + rsp = (struct nix_rss_flowkey_cfg_rsp *) + otx2_mbox_get_rsp(&pfvf->mbox.mbox, 0, &req->hdr); + if (IS_ERR(rsp)) { + err = PTR_ERR(rsp); + goto fail; + } + + pfvf->hw.flowkey_alg_idx = rsp->alg_idx; +fail: mutex_unlock(&pfvf->mbox.lock); return err; } @@ -572,30 +583,14 @@ void otx2_get_mac_from_af(struct net_device *netdev) } EXPORT_SYMBOL(otx2_get_mac_from_af); -static int otx2_get_link(struct otx2_nic *pfvf) -{ - int link = 0; - u16 map; - - /* cgx lmac link */ - if (pfvf->hw.tx_chan_base >= CGX_CHAN_BASE) { - map = pfvf->hw.tx_chan_base & 0x7FF; - link = 4 * ((map >> 8) & 0xF) + ((map >> 4) & 0xF); - } - /* LBK channel */ - if (pfvf->hw.tx_chan_base < SDP_CHAN_BASE) { - map = pfvf->hw.tx_chan_base & 0x7FF; - link = pfvf->hw.cgx_links | ((map >> 8) & 0xF); - } - - return link; -} - int otx2_txschq_config(struct otx2_nic *pfvf, int lvl) { struct otx2_hw *hw = &pfvf->hw; struct nix_txschq_config *req; u64 schq, parent; + u64 dwrr_val; + + dwrr_val = mtu_to_dwrr_weight(pfvf, pfvf->max_frs); req = otx2_mbox_alloc_msg_nix_txschq_cfg(&pfvf->mbox); if (!req) @@ -621,21 +616,21 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl) req->num_regs++; /* Set DWRR quantum */ req->reg[2] = NIX_AF_MDQX_SCHEDULE(schq); - req->regval[2] = DFLT_RR_QTM; + req->regval[2] = dwrr_val; } else if (lvl == NIX_TXSCH_LVL_TL4) { parent = hw->txschq_list[NIX_TXSCH_LVL_TL3][0]; req->reg[0] = NIX_AF_TL4X_PARENT(schq); req->regval[0] = parent << 16; req->num_regs++; req->reg[1] = NIX_AF_TL4X_SCHEDULE(schq); - req->regval[1] = DFLT_RR_QTM; + req->regval[1] = dwrr_val; } else if (lvl == NIX_TXSCH_LVL_TL3) { parent = hw->txschq_list[NIX_TXSCH_LVL_TL2][0]; req->reg[0] = NIX_AF_TL3X_PARENT(schq); req->regval[0] = parent << 16; req->num_regs++; req->reg[1] = NIX_AF_TL3X_SCHEDULE(schq); - req->regval[1] = DFLT_RR_QTM; + req->regval[1] = dwrr_val; } else if (lvl == NIX_TXSCH_LVL_TL2) { parent = hw->txschq_list[NIX_TXSCH_LVL_TL1][0]; req->reg[0] = NIX_AF_TL2X_PARENT(schq); @@ -643,11 +638,10 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl) req->num_regs++; req->reg[1] = NIX_AF_TL2X_SCHEDULE(schq); - req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | DFLT_RR_QTM; + req->regval[1] = TXSCH_TL1_DFLT_RR_PRIO << 24 | dwrr_val; req->num_regs++; - req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, - otx2_get_link(pfvf)); + req->reg[2] = NIX_AF_TL3_TL2X_LINKX_CFG(schq, hw->tx_link); /* Enable this queue and backpressure */ req->regval[2] = BIT_ULL(13) | BIT_ULL(12); @@ -656,7 +650,10 @@ int otx2_txschq_config(struct otx2_nic *pfvf, int lvl) * For VF this is always ignored. */ - /* Set DWRR quantum */ + /* On CN10K, if RR_WEIGHT is greater than 16384, HW will + * clip it to 16384, so configuring a 24bit max value + * will work on both OTx2 and CN10K. + */ req->reg[0] = NIX_AF_TL1X_SCHEDULE(schq); req->regval[0] = TXSCH_TL1_DFLT_RR_QTM; @@ -803,7 +800,7 @@ int otx2_sq_aq_init(void *dev, u16 qidx, u16 sqb_aura) aq->sq.ena = 1; /* Only one SMQ is allocated, map all SQ's to that SMQ */ aq->sq.smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0]; - aq->sq.smq_rr_quantum = DFLT_RR_QTM; + aq->sq.smq_rr_quantum = mtu_to_dwrr_weight(pfvf, pfvf->max_frs); aq->sq.default_chan = pfvf->hw.tx_chan_base; aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */ aq->sq.sqb_aura = sqb_aura; @@ -1190,7 +1187,22 @@ static int otx2_aura_init(struct otx2_nic *pfvf, int aura_id, /* Enable backpressure for RQ aura */ if (aura_id < pfvf->hw.rqpool_cnt && !is_otx2_lbkvf(pfvf->pdev)) { aq->aura.bp_ena = 0; + /* If NIX1 LF is attached then specify NIX1_RX. + * + * Below NPA_AURA_S[BP_ENA] is set according to the + * NPA_BPINTF_E enumeration given as: + * 0x0 + a*0x1 where 'a' is 0 for NIX0_RX and 1 for NIX1_RX so + * NIX0_RX is 0x0 + 0*0x1 = 0 + * NIX1_RX is 0x0 + 1*0x1 = 1 + * But in HRM it is given that + * "NPA_AURA_S[BP_ENA](w1[33:32]) - Enable aura backpressure to + * NIX-RX based on [BP] level. One bit per NIX-RX; index + * enumerated by NPA_BPINTF_E." + */ + if (pfvf->nix_blkaddr == BLKADDR_NIX1) + aq->aura.bp_ena = 1; aq->aura.nix0_bpid = pfvf->bpid[0]; + /* Set backpressure level for RQ's Aura */ aq->aura.bp = RQ_BP_LVL_AURA; } @@ -1577,6 +1589,7 @@ void mbox_handler_nix_lf_alloc(struct otx2_nic *pfvf, pfvf->hw.lso_tsov6_idx = rsp->lso_tsov6_idx; pfvf->hw.cgx_links = rsp->cgx_links; pfvf->hw.lbk_links = rsp->lbk_links; + pfvf->hw.tx_link = rsp->tx_link; } EXPORT_SYMBOL(mbox_handler_nix_lf_alloc); @@ -1668,6 +1681,11 @@ u16 otx2_get_max_mtu(struct otx2_nic *pfvf) * SMQ errors */ max_mtu = rsp->max_mtu - 8 - OTX2_ETH_HLEN; + + /* Also save DWRR MTU, needed for DWRR weight calculation */ + pfvf->hw.dwrr_mtu = rsp->rpm_dwrr_mtu; + if (!pfvf->hw.dwrr_mtu) + pfvf->hw.dwrr_mtu = 1; } out: diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h index 8fd58cd07f50..48227cec06ee 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.h @@ -1,11 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Ethernet driver +/* Marvell RVU Ethernet driver * - * Copyright (C) 2020 Marvell International Ltd. + * Copyright (C) 2020 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef OTX2_COMMON_H @@ -19,11 +16,13 @@ #include <linux/timecounter.h> #include <linux/soc/marvell/octeontx2/asm.h> #include <net/pkt_cls.h> +#include <net/devlink.h> #include <mbox.h> #include <npc.h> #include "otx2_reg.h" #include "otx2_txrx.h" +#include "otx2_devlink.h" #include <rvu_trace.h> /* PCI device IDs */ @@ -181,6 +180,7 @@ struct otx2_hw { /* NIX */ u16 txschq_list[NIX_TXSCH_LVL_CNT][MAX_TXSCHQ_PER_FUNC]; u16 matchall_ipolicer; + u32 dwrr_mtu; /* HW settings, coalescing etc */ u16 rx_chan_base; @@ -196,6 +196,9 @@ struct otx2_hw { u8 lso_udpv4_idx; u8 lso_udpv6_idx; + /* RSS */ + u8 flowkey_alg_idx; + /* MSI-X */ u8 cint_cnt; /* CQ interrupt count */ u16 npa_msixoff; /* Offset of NPA vectors */ @@ -212,6 +215,7 @@ struct otx2_hw { u64 cgx_fec_uncorr_blks; u8 cgx_links; /* No. of CGX links present in HW */ u8 lbk_links; /* No. of LBK links present in HW */ + u8 tx_link; /* Transmit channel link number */ #define HW_TSO 0 #define CN10K_MBOX 1 #define CN10K_LMTST 2 @@ -267,7 +271,6 @@ struct otx2_mac_table { }; struct otx2_flow_config { - u16 entry[NPC_MAX_NONCONTIG_ENTRIES]; u16 *flow_ent; u16 *def_ent; u16 nr_flows; @@ -278,16 +281,13 @@ struct otx2_flow_config { #define OTX2_MCAM_COUNT (OTX2_DEFAULT_FLOWCOUNT + \ OTX2_MAX_UNICAST_FLOWS + \ OTX2_MAX_VLAN_FLOWS) - u16 ntuple_offset; u16 unicast_offset; u16 rx_vlan_offset; u16 vf_vlan_offset; #define OTX2_PER_VF_VLAN_FLOWS 2 /* Rx + Tx per VF */ #define OTX2_VF_VLAN_RX_INDEX 0 #define OTX2_VF_VLAN_TX_INDEX 1 - u16 tc_flower_offset; - u16 ntuple_max_flows; - u16 tc_max_flows; + u16 max_flows; u8 dmacflt_max_flows; u8 *bmap_to_dmacindex; unsigned long dmacflt_bmap; @@ -298,8 +298,7 @@ struct otx2_tc_info { /* hash table to store TC offloaded flows */ struct rhashtable flow_table; struct rhashtable_params flow_ht_params; - DECLARE_BITMAP(tc_entries_bitmap, OTX2_MAX_TC_FLOWS); - unsigned long num_entries; + unsigned long *tc_entries_bitmap; }; struct dev_hw_ops { @@ -352,6 +351,11 @@ struct otx2_nic { struct otx2_vf_config *vf_configs; struct cgx_link_user_info linfo; + /* NPC MCAM */ + struct otx2_flow_config *flow_cfg; + struct otx2_mac_table *mac_table; + struct otx2_tc_info tc_info; + u64 reset_count; struct work_struct reset_task; struct workqueue_struct *flr_wq; @@ -359,7 +363,6 @@ struct otx2_nic { struct refill_work *refill_wrk; struct workqueue_struct *otx2_wq; struct work_struct rx_mode_work; - struct otx2_mac_table *mac_table; /* Ethtool stuff */ u32 msg_enable; @@ -375,9 +378,10 @@ struct otx2_nic { struct otx2_ptp *ptp; struct hwtstamp_config tstamp; - struct otx2_flow_config *flow_cfg; - struct otx2_tc_info tc_info; unsigned long rq_bmap; + + /* Devlink */ + struct otx2_devlink *dl; }; static inline bool is_otx2_lbkvf(struct pci_dev *pdev) @@ -709,6 +713,11 @@ MBOX_UP_CGX_MESSAGES #define RVU_PFVF_FUNC_SHIFT 0 #define RVU_PFVF_FUNC_MASK 0x3FF +static inline bool is_otx2_vf(u16 pcifunc) +{ + return !!(pcifunc & RVU_PFVF_FUNC_MASK); +} + static inline int rvu_get_pf(u16 pcifunc) { return (pcifunc >> RVU_PFVF_PF_SHIFT) & RVU_PFVF_PF_MASK; @@ -814,7 +823,8 @@ int otx2_set_real_num_queues(struct net_device *netdev, int tx_queues, int rx_queues); /* MCAM filter related APIs */ int otx2_mcam_flow_init(struct otx2_nic *pf); -int otx2_alloc_mcam_entries(struct otx2_nic *pfvf); +int otx2vf_mcam_flow_init(struct otx2_nic *pfvf); +int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count); void otx2_mcam_flow_del(struct otx2_nic *pf); int otx2_destroy_ntuple_flows(struct otx2_nic *pf); int otx2_destroy_mcam_flows(struct otx2_nic *pfvf); @@ -825,8 +835,7 @@ int otx2_get_all_flows(struct otx2_nic *pfvf, int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc); int otx2_remove_flow(struct otx2_nic *pfvf, u32 location); -int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp, - struct npc_install_flow_req *req); +int otx2_get_maxflows(struct otx2_flow_config *flow_cfg); void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id); int otx2_del_macfilter(struct net_device *netdev, const u8 *mac); int otx2_add_macfilter(struct net_device *netdev, const u8 *mac); @@ -838,6 +847,7 @@ int otx2_init_tc(struct otx2_nic *nic); void otx2_shutdown_tc(struct otx2_nic *nic); int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type, void *type_data); +int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic); /* CGX/RPM DMAC filters support */ int otx2_dmacflt_get_max_cnt(struct otx2_nic *pf); int otx2_dmacflt_add(struct otx2_nic *pf, const u8 *mac, u8 bit_pos); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c new file mode 100644 index 000000000000..7ac3ef2fa06a --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.c @@ -0,0 +1,156 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell RVU PF/VF Netdev Devlink + * + * Copyright (C) 2021 Marvell. + */ + +#include "otx2_common.h" + +/* Devlink Params APIs */ +static int otx2_dl_mcam_count_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + struct otx2_devlink *otx2_dl = devlink_priv(devlink); + struct otx2_nic *pfvf = otx2_dl->pfvf; + struct otx2_flow_config *flow_cfg; + + if (!pfvf->flow_cfg) { + NL_SET_ERR_MSG_MOD(extack, + "pfvf->flow_cfg not initialized"); + return -EINVAL; + } + + flow_cfg = pfvf->flow_cfg; + if (flow_cfg && flow_cfg->nr_flows) { + NL_SET_ERR_MSG_MOD(extack, + "Cannot modify count when there are active rules"); + return -EINVAL; + } + + return 0; +} + +static int otx2_dl_mcam_count_set(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct otx2_devlink *otx2_dl = devlink_priv(devlink); + struct otx2_nic *pfvf = otx2_dl->pfvf; + + if (!pfvf->flow_cfg) + return 0; + + otx2_alloc_mcam_entries(pfvf, ctx->val.vu16); + otx2_tc_alloc_ent_bitmap(pfvf); + + return 0; +} + +static int otx2_dl_mcam_count_get(struct devlink *devlink, u32 id, + struct devlink_param_gset_ctx *ctx) +{ + struct otx2_devlink *otx2_dl = devlink_priv(devlink); + struct otx2_nic *pfvf = otx2_dl->pfvf; + struct otx2_flow_config *flow_cfg; + + if (!pfvf->flow_cfg) { + ctx->val.vu16 = 0; + return 0; + } + + flow_cfg = pfvf->flow_cfg; + ctx->val.vu16 = flow_cfg->max_flows; + + return 0; +} + +enum otx2_dl_param_id { + OTX2_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX, + OTX2_DEVLINK_PARAM_ID_MCAM_COUNT, +}; + +static const struct devlink_param otx2_dl_params[] = { + DEVLINK_PARAM_DRIVER(OTX2_DEVLINK_PARAM_ID_MCAM_COUNT, + "mcam_count", DEVLINK_PARAM_TYPE_U16, + BIT(DEVLINK_PARAM_CMODE_RUNTIME), + otx2_dl_mcam_count_get, otx2_dl_mcam_count_set, + otx2_dl_mcam_count_validate), +}; + +/* Devlink OPs */ +static int otx2_devlink_info_get(struct devlink *devlink, + struct devlink_info_req *req, + struct netlink_ext_ack *extack) +{ + struct otx2_devlink *otx2_dl = devlink_priv(devlink); + struct otx2_nic *pfvf = otx2_dl->pfvf; + + if (is_otx2_vf(pfvf->pcifunc)) + return devlink_info_driver_name_put(req, "rvu_nicvf"); + + return devlink_info_driver_name_put(req, "rvu_nicpf"); +} + +static const struct devlink_ops otx2_devlink_ops = { + .info_get = otx2_devlink_info_get, +}; + +int otx2_register_dl(struct otx2_nic *pfvf) +{ + struct otx2_devlink *otx2_dl; + struct devlink *dl; + int err; + + dl = devlink_alloc(&otx2_devlink_ops, + sizeof(struct otx2_devlink), pfvf->dev); + if (!dl) { + dev_warn(pfvf->dev, "devlink_alloc failed\n"); + return -ENOMEM; + } + + err = devlink_register(dl); + if (err) { + dev_err(pfvf->dev, "devlink register failed with error %d\n", err); + devlink_free(dl); + return err; + } + + otx2_dl = devlink_priv(dl); + otx2_dl->dl = dl; + otx2_dl->pfvf = pfvf; + pfvf->dl = otx2_dl; + + err = devlink_params_register(dl, otx2_dl_params, + ARRAY_SIZE(otx2_dl_params)); + if (err) { + dev_err(pfvf->dev, + "devlink params register failed with error %d", err); + goto err_dl; + } + + devlink_params_publish(dl); + + return 0; + +err_dl: + devlink_unregister(dl); + devlink_free(dl); + return err; +} + +void otx2_unregister_dl(struct otx2_nic *pfvf) +{ + struct otx2_devlink *otx2_dl = pfvf->dl; + struct devlink *dl; + + if (!otx2_dl || !otx2_dl->dl) + return; + + dl = otx2_dl->dl; + + devlink_params_unregister(dl, otx2_dl_params, + ARRAY_SIZE(otx2_dl_params)); + + devlink_unregister(dl); + devlink_free(dl); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.h new file mode 100644 index 000000000000..c7bd4f3c6c6b --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_devlink.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Marvell RVU PF/VF Netdev Devlink + * + * Copyright (C) 2021 Marvell. + * + */ + +#ifndef OTX2_DEVLINK_H +#define OTX2_DEVLINK_H + +struct otx2_devlink { + struct devlink *dl; + struct otx2_nic *pfvf; +}; + +/* Devlink APIs */ +int otx2_register_dl(struct otx2_nic *pfvf); +void otx2_unregister_dl(struct otx2_nic *pfvf); + +#endif /* RVU_DEVLINK_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c index 383a6b5cb698..2ec800f741d8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_dmac_flt.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Physcial Function ethernet driver +/* Marvell RVU Ethernet driver * * Copyright (C) 2021 Marvell. + * */ #include "otx2_common.h" diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c index b906a0eb6e0d..799486c72177 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ethtool.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Ethernet driver +/* Marvell RVU Ethernet driver * - * Copyright (C) 2020 Marvell International Ltd. + * Copyright (C) 2020 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/pci.h> @@ -33,9 +30,6 @@ struct otx2_stat { .index = offsetof(struct otx2_dev_stats, stat) / sizeof(u64), \ } -/* Physical link config */ -#define OTX2_ETHTOOL_SUPPORTED_MODES 0x638CCBF //110001110001100110010111111 - enum link_mode { OTX2_MODE_SUPPORTED, OTX2_MODE_ADVERTISED @@ -415,7 +409,9 @@ static int otx2_set_ringparam(struct net_device *netdev, } static int otx2_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *cmd) + struct ethtool_coalesce *cmd, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct otx2_nic *pfvf = netdev_priv(netdev); struct otx2_hw *hw = &pfvf->hw; @@ -429,7 +425,9 @@ static int otx2_get_coalesce(struct net_device *netdev, } static int otx2_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct otx2_nic *pfvf = netdev_priv(netdev); struct otx2_hw *hw = &pfvf->hw; @@ -645,6 +643,7 @@ static int otx2_set_rss_hash_opts(struct otx2_nic *pfvf, static int otx2_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc, u32 *rules) { + bool ntuple = !!(dev->features & NETIF_F_NTUPLE); struct otx2_nic *pfvf = netdev_priv(dev); int ret = -EOPNOTSUPP; @@ -654,14 +653,18 @@ static int otx2_get_rxnfc(struct net_device *dev, ret = 0; break; case ETHTOOL_GRXCLSRLCNT: - nfc->rule_cnt = pfvf->flow_cfg->nr_flows; - ret = 0; + if (netif_running(dev) && ntuple) { + nfc->rule_cnt = pfvf->flow_cfg->nr_flows; + ret = 0; + } break; case ETHTOOL_GRXCLSRULE: - ret = otx2_get_flow(pfvf, nfc, nfc->fs.location); + if (netif_running(dev) && ntuple) + ret = otx2_get_flow(pfvf, nfc, nfc->fs.location); break; case ETHTOOL_GRXCLSRLALL: - ret = otx2_get_all_flows(pfvf, nfc, rules); + if (netif_running(dev) && ntuple) + ret = otx2_get_all_flows(pfvf, nfc, rules); break; case ETHTOOL_GRXFH: return otx2_get_rss_hash_opts(pfvf, nfc); @@ -696,41 +699,6 @@ static int otx2_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc) return ret; } -static int otx2vf_get_rxnfc(struct net_device *dev, - struct ethtool_rxnfc *nfc, u32 *rules) -{ - struct otx2_nic *pfvf = netdev_priv(dev); - int ret = -EOPNOTSUPP; - - switch (nfc->cmd) { - case ETHTOOL_GRXRINGS: - nfc->data = pfvf->hw.rx_queues; - ret = 0; - break; - case ETHTOOL_GRXFH: - return otx2_get_rss_hash_opts(pfvf, nfc); - default: - break; - } - return ret; -} - -static int otx2vf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *nfc) -{ - struct otx2_nic *pfvf = netdev_priv(dev); - int ret = -EOPNOTSUPP; - - switch (nfc->cmd) { - case ETHTOOL_SRXFH: - ret = otx2_set_rss_hash_opts(pfvf, nfc); - break; - default: - break; - } - - return ret; -} - static u32 otx2_get_rxfh_key_size(struct net_device *netdev) { struct otx2_nic *pfvf = netdev_priv(netdev); @@ -1116,8 +1084,6 @@ static void otx2_get_link_mode_info(u64 link_mode_bmap, }; u8 bit; - link_mode_bmap = link_mode_bmap & OTX2_ETHTOOL_SUPPORTED_MODES; - for_each_set_bit(bit, (unsigned long *)&link_mode_bmap, 27) { /* SGMII mode is set */ if (bit == 0) @@ -1357,8 +1323,8 @@ static const struct ethtool_ops otx2vf_ethtool_ops = { .get_sset_count = otx2vf_get_sset_count, .set_channels = otx2_set_channels, .get_channels = otx2_get_channels, - .get_rxnfc = otx2vf_get_rxnfc, - .set_rxnfc = otx2vf_set_rxnfc, + .get_rxnfc = otx2_get_rxnfc, + .set_rxnfc = otx2_set_rxnfc, .get_rxfh_key_size = otx2_get_rxfh_key_size, .get_rxfh_indir_size = otx2_get_rxfh_indir_size, .get_rxfh = otx2_get_rxfh, diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c index 4d9de525802d..77a13fb555fb 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_flows.c @@ -1,15 +1,19 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Physical Function ethernet driver +/* Marvell RVU Ethernet driver * * Copyright (C) 2020 Marvell. + * */ #include <net/ipv6.h> +#include <linux/sort.h> #include "otx2_common.h" #define OTX2_DEFAULT_ACTION 0x1 +static int otx2_mcam_entry_init(struct otx2_nic *pfvf); + struct otx2_flow { struct ethtool_rx_flow_spec flow_spec; struct list_head list; @@ -30,8 +34,7 @@ static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_ { devm_kfree(pfvf->dev, flow_cfg->flow_ent); flow_cfg->flow_ent = NULL; - flow_cfg->ntuple_max_flows = 0; - flow_cfg->tc_max_flows = 0; + flow_cfg->max_flows = 0; } static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf) @@ -40,11 +43,11 @@ static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf) struct npc_mcam_free_entry_req *req; int ent, err; - if (!flow_cfg->ntuple_max_flows) + if (!flow_cfg->max_flows) return 0; mutex_lock(&pfvf->mbox.lock); - for (ent = 0; ent < flow_cfg->ntuple_max_flows; ent++) { + for (ent = 0; ent < flow_cfg->max_flows; ent++) { req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox); if (!req) break; @@ -61,7 +64,12 @@ static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf) return 0; } -static int otx2_alloc_ntuple_mcam_entries(struct otx2_nic *pfvf, u16 count) +static int mcam_entry_cmp(const void *a, const void *b) +{ + return *(u16 *)a - *(u16 *)b; +} + +int otx2_alloc_mcam_entries(struct otx2_nic *pfvf, u16 count) { struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; struct npc_mcam_alloc_entry_req *req; @@ -76,8 +84,12 @@ static int otx2_alloc_ntuple_mcam_entries(struct otx2_nic *pfvf, u16 count) flow_cfg->flow_ent = devm_kmalloc_array(pfvf->dev, count, sizeof(u16), GFP_KERNEL); - if (!flow_cfg->flow_ent) + if (!flow_cfg->flow_ent) { + netdev_err(pfvf->netdev, + "%s: Unable to allocate memory for flow entries\n", + __func__); return -ENOMEM; + } mutex_lock(&pfvf->mbox.lock); @@ -92,8 +104,14 @@ static int otx2_alloc_ntuple_mcam_entries(struct otx2_nic *pfvf, u16 count) req->contig = false; req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ? NPC_MAX_NONCONTIG_ENTRIES : count - allocated; - req->priority = NPC_MCAM_HIGHER_PRIO; - req->ref_entry = flow_cfg->def_ent[0]; + + /* Allocate higher priority entries for PFs, so that VF's entries + * will be on top of PF. + */ + if (!is_otx2_vf(pfvf->pcifunc)) { + req->priority = NPC_MCAM_HIGHER_PRIO; + req->ref_entry = flow_cfg->def_ent[0]; + } /* Send message to AF */ if (otx2_sync_mbox_msg(&pfvf->mbox)) @@ -114,22 +132,34 @@ static int otx2_alloc_ntuple_mcam_entries(struct otx2_nic *pfvf, u16 count) break; } + /* Multiple MCAM entry alloc requests could result in non-sequential + * MCAM entries in the flow_ent[] array. Sort them in an ascending order, + * otherwise user installed ntuple filter index and MCAM entry index will + * not be in sync. + */ + if (allocated) + sort(&flow_cfg->flow_ent[0], allocated, + sizeof(flow_cfg->flow_ent[0]), mcam_entry_cmp, NULL); + exit: mutex_unlock(&pfvf->mbox.lock); - flow_cfg->ntuple_offset = 0; - flow_cfg->ntuple_max_flows = allocated; - flow_cfg->tc_max_flows = allocated; + flow_cfg->max_flows = allocated; + + if (allocated) { + pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC; + pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT; + } if (allocated != count) netdev_info(pfvf->netdev, - "Unable to allocate %d MCAM entries for ntuple, got %d\n", + "Unable to allocate %d MCAM entries, got only %d\n", count, allocated); - return allocated; } +EXPORT_SYMBOL(otx2_alloc_mcam_entries); -int otx2_alloc_mcam_entries(struct otx2_nic *pfvf) +static int otx2_mcam_entry_init(struct otx2_nic *pfvf) { struct otx2_flow_config *flow_cfg = pfvf->flow_cfg; struct npc_mcam_alloc_entry_req *req; @@ -189,18 +219,35 @@ int otx2_alloc_mcam_entries(struct otx2_nic *pfvf) mutex_unlock(&pfvf->mbox.lock); /* Allocate entries for Ntuple filters */ - count = otx2_alloc_ntuple_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT); + count = otx2_alloc_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT); if (count <= 0) { otx2_clear_ntuple_flow_info(pfvf, flow_cfg); return 0; } - pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT; pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT; return 0; } +int otx2vf_mcam_flow_init(struct otx2_nic *pfvf) +{ + struct otx2_flow_config *flow_cfg; + + pfvf->flow_cfg = devm_kzalloc(pfvf->dev, + sizeof(struct otx2_flow_config), + GFP_KERNEL); + if (!pfvf->flow_cfg) + return -ENOMEM; + + flow_cfg = pfvf->flow_cfg; + INIT_LIST_HEAD(&flow_cfg->flow_list); + flow_cfg->max_flows = 0; + + return 0; +} +EXPORT_SYMBOL(otx2vf_mcam_flow_init); + int otx2_mcam_flow_init(struct otx2_nic *pf) { int err; @@ -212,7 +259,10 @@ int otx2_mcam_flow_init(struct otx2_nic *pf) INIT_LIST_HEAD(&pf->flow_cfg->flow_list); - err = otx2_alloc_mcam_entries(pf); + /* Allocate bare minimum number of MCAM entries needed for + * unicast and ntuple filters. + */ + err = otx2_mcam_entry_init(pf); if (err) return err; @@ -248,6 +298,7 @@ void otx2_mcam_flow_del(struct otx2_nic *pf) { otx2_destroy_mcam_flows(pf); } +EXPORT_SYMBOL(otx2_mcam_flow_del); /* On success adds mcam entry * On failure enable promisous mode @@ -379,15 +430,19 @@ static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow) list_add(&flow->list, head); } -static int otx2_get_maxflows(struct otx2_flow_config *flow_cfg) +int otx2_get_maxflows(struct otx2_flow_config *flow_cfg) { - if (flow_cfg->nr_flows == flow_cfg->ntuple_max_flows || + if (!flow_cfg) + return 0; + + if (flow_cfg->nr_flows == flow_cfg->max_flows || bitmap_weight(&flow_cfg->dmacflt_bmap, flow_cfg->dmacflt_max_flows)) - return flow_cfg->ntuple_max_flows + flow_cfg->dmacflt_max_flows; + return flow_cfg->max_flows + flow_cfg->dmacflt_max_flows; else - return flow_cfg->ntuple_max_flows; + return flow_cfg->max_flows; } +EXPORT_SYMBOL(otx2_get_maxflows); int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc, u32 location) @@ -708,7 +763,7 @@ static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp, return 0; } -int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp, +static int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp, struct npc_install_flow_req *req) { struct ethhdr *eth_mask = &fsp->m_u.ether_spec; @@ -732,7 +787,7 @@ int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp, ether_addr_copy(pmask->dmac, eth_mask->h_dest); req->features |= BIT_ULL(NPC_DMAC); } - if (eth_mask->h_proto) { + if (eth_hdr->h_proto) { memcpy(&pkt->etype, ð_hdr->h_proto, sizeof(pkt->etype)); memcpy(&pmask->etype, ð_mask->h_proto, @@ -764,14 +819,31 @@ int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp, return -EOPNOTSUPP; } if (fsp->flow_type & FLOW_EXT) { - if (fsp->m_ext.vlan_etype) - return -EINVAL; - if (fsp->m_ext.vlan_tci) { - if (fsp->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK)) + u16 vlan_etype; + + if (fsp->m_ext.vlan_etype) { + /* Partial masks not supported */ + if (be16_to_cpu(fsp->m_ext.vlan_etype) != 0xFFFF) return -EINVAL; - if (be16_to_cpu(fsp->h_ext.vlan_tci) >= VLAN_N_VID) + + vlan_etype = be16_to_cpu(fsp->h_ext.vlan_etype); + /* Only ETH_P_8021Q and ETH_P_802AD types supported */ + if (vlan_etype != ETH_P_8021Q && + vlan_etype != ETH_P_8021AD) return -EINVAL; + memcpy(&pkt->vlan_etype, &fsp->h_ext.vlan_etype, + sizeof(pkt->vlan_etype)); + memcpy(&pmask->vlan_etype, &fsp->m_ext.vlan_etype, + sizeof(pmask->vlan_etype)); + + if (vlan_etype == ETH_P_8021Q) + req->features |= BIT_ULL(NPC_VLAN_ETYPE_CTAG); + else + req->features |= BIT_ULL(NPC_VLAN_ETYPE_STAG); + } + + if (fsp->m_ext.vlan_tci) { memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci, sizeof(pkt->vlan_tci)); memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci, @@ -858,6 +930,7 @@ static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow) if (flow->flow_spec.flow_type & FLOW_RSS) { req->op = NIX_RX_ACTIONOP_RSS; req->index = flow->rss_ctx_id; + req->flow_key_alg = pfvf->hw.flowkey_alg_idx; } else { req->op = NIX_RX_ACTIONOP_UCAST; req->index = ethtool_get_flow_spec_ring(ring_cookie); @@ -894,7 +967,7 @@ static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf, pf_mac->entry = 0; pf_mac->dmac_filter = true; - pf_mac->location = pfvf->flow_cfg->ntuple_max_flows; + pf_mac->location = pfvf->flow_cfg->max_flows; memcpy(&pf_mac->flow_spec, &flow->flow_spec, sizeof(struct ethtool_rx_flow_spec)); pf_mac->flow_spec.location = pf_mac->location; @@ -923,6 +996,12 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) int err = 0; u32 ring; + if (!flow_cfg->max_flows) { + netdev_err(pfvf->netdev, + "Ntuple rule count is 0, allocate and retry\n"); + return -EINVAL; + } + ring = ethtool_get_flow_spec_ring(fsp->ring_cookie); if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT)) return -ENOMEM; @@ -939,6 +1018,7 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) if (!flow) return -ENOMEM; flow->location = fsp->location; + flow->entry = flow_cfg->flow_ent[flow->location]; new = true; } /* struct copy */ @@ -975,7 +1055,7 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) flow->dmac_filter = true; flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap, flow_cfg->dmacflt_max_flows); - fsp->location = flow_cfg->ntuple_max_flows + flow->entry; + fsp->location = flow_cfg->max_flows + flow->entry; flow->flow_spec.location = fsp->location; flow->location = fsp->location; @@ -983,19 +1063,20 @@ int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc) otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry); } else { - if (flow->location >= pfvf->flow_cfg->ntuple_max_flows) { + if (flow->location >= pfvf->flow_cfg->max_flows) { netdev_warn(pfvf->netdev, "Can't insert non dmac ntuple rule at %d, allowed range %d-0\n", flow->location, - flow_cfg->ntuple_max_flows - 1); + flow_cfg->max_flows - 1); err = -EINVAL; } else { - flow->entry = flow_cfg->flow_ent[flow->location]; err = otx2_add_flow_msg(pfvf, flow); } } if (err) { + if (err == MBOX_MSG_INVALID) + err = -EINVAL; if (new) kfree(flow); return err; @@ -1132,6 +1213,9 @@ int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf) if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT)) return 0; + if (!flow_cfg->max_flows) + return 0; + mutex_lock(&pfvf->mbox.lock); req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox); if (!req) { @@ -1140,7 +1224,7 @@ int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf) } req->start = flow_cfg->flow_ent[0]; - req->end = flow_cfg->flow_ent[flow_cfg->ntuple_max_flows - 1]; + req->end = flow_cfg->flow_ent[flow_cfg->max_flows - 1]; err = otx2_sync_mbox_msg(&pfvf->mbox); mutex_unlock(&pfvf->mbox.lock); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 2c24944a4dba..2f2e8a3d7924 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Physical Function ethernet driver +/* Marvell RVU Physical Function ethernet driver * - * Copyright (C) 2020 Marvell International Ltd. + * Copyright (C) 2020 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/module.h> @@ -1787,17 +1784,10 @@ static netdev_tx_t otx2_xmit(struct sk_buff *skb, struct net_device *netdev) static netdev_features_t otx2_fix_features(struct net_device *dev, netdev_features_t features) { - /* check if n-tuple filters are ON */ - if ((features & NETIF_F_HW_TC) && (dev->features & NETIF_F_NTUPLE)) { - netdev_info(dev, "Disabling n-tuple filters\n"); - features &= ~NETIF_F_NTUPLE; - } - - /* check if tc hw offload is ON */ - if ((features & NETIF_F_NTUPLE) && (dev->features & NETIF_F_HW_TC)) { - netdev_info(dev, "Disabling TC hardware offload\n"); - features &= ~NETIF_F_HW_TC; - } + if (features & NETIF_F_HW_VLAN_CTAG_RX) + features |= NETIF_F_HW_VLAN_STAG_RX; + else + features &= ~NETIF_F_HW_VLAN_STAG_RX; return features; } @@ -1854,6 +1844,7 @@ static int otx2_set_features(struct net_device *netdev, netdev_features_t changed = features ^ netdev->features; bool ntuple = !!(features & NETIF_F_NTUPLE); struct otx2_nic *pf = netdev_priv(netdev); + bool tc = !!(features & NETIF_F_HW_TC); if ((changed & NETIF_F_LOOPBACK) && netif_running(netdev)) return otx2_cgx_config_loopback(pf, @@ -1866,12 +1857,42 @@ static int otx2_set_features(struct net_device *netdev, if ((changed & NETIF_F_NTUPLE) && !ntuple) otx2_destroy_ntuple_flows(pf); - if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) && - pf->tc_info.num_entries) { + if ((changed & NETIF_F_NTUPLE) && ntuple) { + if (!pf->flow_cfg->max_flows) { + netdev_err(netdev, + "Can't enable NTUPLE, MCAM entries not allocated\n"); + return -EINVAL; + } + } + + if ((changed & NETIF_F_HW_TC) && tc) { + if (!pf->flow_cfg->max_flows) { + netdev_err(netdev, + "Can't enable TC, MCAM entries not allocated\n"); + return -EINVAL; + } + } + + if ((changed & NETIF_F_HW_TC) && !tc && + pf->flow_cfg && pf->flow_cfg->nr_flows) { netdev_err(netdev, "Can't disable TC hardware offload while flows are active\n"); return -EBUSY; } + if ((changed & NETIF_F_NTUPLE) && ntuple && + (netdev->features & NETIF_F_HW_TC) && !(changed & NETIF_F_HW_TC)) { + netdev_err(netdev, + "Can't enable NTUPLE when TC is active, disable TC and retry\n"); + return -EINVAL; + } + + if ((changed & NETIF_F_HW_TC) && tc && + (netdev->features & NETIF_F_NTUPLE) && !(changed & NETIF_F_NTUPLE)) { + netdev_err(netdev, + "Can't enable TC when NTUPLE is active, disable NTUPLE and retry\n"); + return -EINVAL; + } + return 0; } @@ -2331,7 +2352,7 @@ static const struct net_device_ops otx2_netdev_ops = { .ndo_set_features = otx2_set_features, .ndo_tx_timeout = otx2_tx_timeout, .ndo_get_stats64 = otx2_get_stats64, - .ndo_do_ioctl = otx2_ioctl, + .ndo_eth_ioctl = otx2_ioctl, .ndo_set_vf_mac = otx2_set_vf_mac, .ndo_set_vf_vlan = otx2_set_vf_vlan, .ndo_get_vf_config = otx2_get_vf_config, @@ -2569,8 +2590,6 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) NETIF_F_GSO_UDP_L4); netdev->features |= netdev->hw_features; - netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL; - err = otx2_mcam_flow_init(pf); if (err) goto err_ptp_destroy; @@ -2594,12 +2613,13 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (pf->flags & OTX2_FLAG_TC_FLOWER_SUPPORT) netdev->hw_features |= NETIF_F_HW_TC; + netdev->hw_features |= NETIF_F_LOOPBACK | NETIF_F_RXALL; + netdev->gso_max_segs = OTX2_MAX_GSO_SEGS; netdev->watchdog_timeo = OTX2_TX_TIMEOUT; netdev->netdev_ops = &otx2_netdev_ops; - /* MTU range: 64 - 9190 */ netdev->min_mtu = OTX2_MIN_MTU; netdev->max_mtu = otx2_get_max_mtu(pf); @@ -2619,6 +2639,10 @@ static int otx2_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (err) goto err_mcam_flow_del; + err = otx2_register_dl(pf); + if (err) + goto err_mcam_flow_del; + /* Initialize SR-IOV resources */ err = otx2_sriov_vfcfg_init(pf); if (err) @@ -2776,6 +2800,7 @@ static void otx2_remove(struct pci_dev *pdev) /* Disable link notifications */ otx2_cgx_config_linkevents(pf, false); + otx2_unregister_dl(pf); unregister_netdev(netdev); otx2_sriov_disable(pf->pdev); otx2_sriov_vfcfg_cleanup(pf); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c index 56390a664517..ec9e49985c2c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.c @@ -1,7 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 PTP support for ethernet driver +/* Marvell RVU Ethernet driver + * + * Copyright (C) 2020 Marvell. * - * Copyright (C) 2020 Marvell International Ltd. */ #include "otx2_common.h" diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h index 706d63a43ae1..6ff284211d7b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_ptp.h @@ -1,5 +1,9 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 PTP support for ethernet driver */ +/* Marvell RVU Ethernet driver + * + * Copyright (C) 2020 Marvell. + * + */ #ifndef OTX2_PTP_H #define OTX2_PTP_H diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h index f4fd72ee9a25..1b967eaf948b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_reg.h @@ -1,11 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Ethernet driver +/* Marvell RVU Ethernet driver * - * Copyright (C) 2020 Marvell International Ltd. + * Copyright (C) 2020 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef OTX2_REG_H diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h index 1f49b3caf5d4..4bbd12ff26e6 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h @@ -1,11 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Ethernet driver +/* Marvell RVU Ethernet driver * - * Copyright (C) 2020 Marvell International Ltd. + * Copyright (C) 2020 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef OTX2_STRUCT_H diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c index 972b202b9884..626961a41089 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_tc.c @@ -1,8 +1,10 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Physcial Function ethernet driver +/* Marvell RVU Ethernet driver * * Copyright (C) 2021 Marvell. + * */ + #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/inetdevice.h> @@ -52,6 +54,29 @@ struct otx2_tc_flow { bool is_act_police; }; +int otx2_tc_alloc_ent_bitmap(struct otx2_nic *nic) +{ + struct otx2_tc_info *tc = &nic->tc_info; + + if (!nic->flow_cfg->max_flows || is_otx2_vf(nic->pcifunc)) + return 0; + + /* Max flows changed, free the existing bitmap */ + kfree(tc->tc_entries_bitmap); + + tc->tc_entries_bitmap = + kcalloc(BITS_TO_LONGS(nic->flow_cfg->max_flows), + sizeof(long), GFP_KERNEL); + if (!tc->tc_entries_bitmap) { + netdev_err(nic->netdev, + "Unable to alloc TC flow entries bitmap\n"); + return -ENOMEM; + } + + return 0; +} +EXPORT_SYMBOL(otx2_tc_alloc_ent_bitmap); + static void otx2_get_egress_burst_cfg(u32 burst, u32 *burst_exp, u32 *burst_mantissa) { @@ -485,8 +510,8 @@ static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node, match.key->vlan_priority << 13; vlan_tci_mask = match.mask->vlan_id | - match.key->vlan_dei << 12 | - match.key->vlan_priority << 13; + match.mask->vlan_dei << 12 | + match.mask->vlan_priority << 13; flow_spec->vlan_tci = htons(vlan_tci); flow_mask->vlan_tci = htons(vlan_tci_mask); @@ -596,6 +621,7 @@ static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry) static int otx2_tc_del_flow(struct otx2_nic *nic, struct flow_cls_offload *tc_flow_cmd) { + struct otx2_flow_config *flow_cfg = nic->flow_cfg; struct otx2_tc_info *tc_info = &nic->tc_info; struct otx2_tc_flow *flow_node; int err; @@ -638,7 +664,7 @@ static int otx2_tc_del_flow(struct otx2_nic *nic, kfree_rcu(flow_node, rcu); clear_bit(flow_node->bitpos, tc_info->tc_entries_bitmap); - tc_info->num_entries--; + flow_cfg->nr_flows--; return 0; } @@ -647,6 +673,7 @@ static int otx2_tc_add_flow(struct otx2_nic *nic, struct flow_cls_offload *tc_flow_cmd) { struct netlink_ext_ack *extack = tc_flow_cmd->common.extack; + struct otx2_flow_config *flow_cfg = nic->flow_cfg; struct otx2_tc_info *tc_info = &nic->tc_info; struct otx2_tc_flow *new_node, *old_node; struct npc_install_flow_req *req, dummy; @@ -655,9 +682,9 @@ static int otx2_tc_add_flow(struct otx2_nic *nic, if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT)) return -ENOMEM; - if (bitmap_full(tc_info->tc_entries_bitmap, nic->flow_cfg->tc_max_flows)) { + if (bitmap_full(tc_info->tc_entries_bitmap, flow_cfg->max_flows)) { NL_SET_ERR_MSG_MOD(extack, - "Not enough MCAM space to add the flow"); + "Free MCAM entry not available to add the flow"); return -ENOMEM; } @@ -695,10 +722,9 @@ static int otx2_tc_add_flow(struct otx2_nic *nic, memcpy(req, &dummy, sizeof(struct npc_install_flow_req)); new_node->bitpos = find_first_zero_bit(tc_info->tc_entries_bitmap, - nic->flow_cfg->tc_max_flows); + flow_cfg->max_flows); req->channel = nic->hw.rx_chan_base; - req->entry = nic->flow_cfg->flow_ent[nic->flow_cfg->tc_flower_offset + - nic->flow_cfg->tc_max_flows - new_node->bitpos]; + req->entry = flow_cfg->flow_ent[flow_cfg->max_flows - new_node->bitpos - 1]; req->intf = NIX_INTF_RX; req->set_cntr = 1; new_node->entry = req->entry; @@ -723,7 +749,7 @@ static int otx2_tc_add_flow(struct otx2_nic *nic, } set_bit(new_node->bitpos, tc_info->tc_entries_bitmap); - tc_info->num_entries++; + flow_cfg->nr_flows++; return 0; @@ -1008,10 +1034,21 @@ static const struct rhashtable_params tc_flow_ht_params = { int otx2_init_tc(struct otx2_nic *nic) { struct otx2_tc_info *tc = &nic->tc_info; + int err; /* Exclude receive queue 0 being used for police action */ set_bit(0, &nic->rq_bmap); + if (!nic->flow_cfg) { + netdev_err(nic->netdev, + "Can't init TC, nic->flow_cfg is not setup\n"); + return -EINVAL; + } + + err = otx2_tc_alloc_ent_bitmap(nic); + if (err) + return err; + tc->flow_ht_params = tc_flow_ht_params; return rhashtable_init(&tc->flow_table, &tc->flow_ht_params); } @@ -1020,5 +1057,6 @@ void otx2_shutdown_tc(struct otx2_nic *nic) { struct otx2_tc_info *tc = &nic->tc_info; + kfree(tc->tc_entries_bitmap); rhashtable_destroy(&tc->flow_table); } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index 22ec03a618b1..f42b1d4e0c67 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -1,11 +1,8 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Ethernet driver +/* Marvell RVU Ethernet driver * - * Copyright (C) 2020 Marvell International Ltd. + * Copyright (C) 2020 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #include <linux/etherdevice.h> diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h index 2f144e2cf436..869de5f59e73 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h @@ -1,11 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */ -/* Marvell OcteonTx2 RVU Ethernet driver +/* Marvell RVU Ethernet driver * - * Copyright (C) 2020 Marvell International Ltd. + * Copyright (C) 2020 Marvell. * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. */ #ifndef OTX2_TXRX_H diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c index a8bee5aefec1..03b4ec630432 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_vf.c @@ -1,5 +1,9 @@ // SPDX-License-Identifier: GPL-2.0 -/* Marvell OcteonTx2 RVU Virtual Function ethernet driver */ +/* Marvell RVU Virtual Function ethernet driver + * + * Copyright (C) 2020 Marvell. + * + */ #include <linux/etherdevice.h> #include <linux/module.h> @@ -464,6 +468,28 @@ static void otx2vf_reset_task(struct work_struct *work) rtnl_unlock(); } +static int otx2vf_set_features(struct net_device *netdev, + netdev_features_t features) +{ + netdev_features_t changed = features ^ netdev->features; + bool ntuple_enabled = !!(features & NETIF_F_NTUPLE); + struct otx2_nic *vf = netdev_priv(netdev); + + if (changed & NETIF_F_NTUPLE) { + if (!ntuple_enabled) { + otx2_mcam_flow_del(vf); + return 0; + } + + if (!otx2_get_maxflows(vf->flow_cfg)) { + netdev_err(netdev, + "Can't enable NTUPLE, MCAM entries not allocated\n"); + return -EINVAL; + } + } + return 0; +} + static const struct net_device_ops otx2vf_netdev_ops = { .ndo_open = otx2vf_open, .ndo_stop = otx2vf_stop, @@ -471,6 +497,7 @@ static const struct net_device_ops otx2vf_netdev_ops = { .ndo_set_rx_mode = otx2vf_set_rx_mode, .ndo_set_mac_address = otx2_set_mac_address, .ndo_change_mtu = otx2vf_change_mtu, + .ndo_set_features = otx2vf_set_features, .ndo_get_stats64 = otx2_get_stats64, .ndo_tx_timeout = otx2_tx_timeout, }; @@ -627,12 +654,14 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) NETIF_F_HW_VLAN_STAG_TX; netdev->features |= netdev->hw_features; + netdev->hw_features |= NETIF_F_NTUPLE; + netdev->hw_features |= NETIF_F_RXALL; + netdev->gso_max_segs = OTX2_MAX_GSO_SEGS; netdev->watchdog_timeo = OTX2_TX_TIMEOUT; netdev->netdev_ops = &otx2vf_netdev_ops; - /* MTU range: 68 - 9190 */ netdev->min_mtu = OTX2_MIN_MTU; netdev->max_mtu = otx2_get_max_mtu(vf); @@ -658,6 +687,14 @@ static int otx2vf_probe(struct pci_dev *pdev, const struct pci_device_id *id) otx2vf_set_ethtool_ops(netdev); + err = otx2vf_mcam_flow_init(vf); + if (err) + goto err_unreg_netdev; + + err = otx2_register_dl(vf); + if (err) + goto err_unreg_netdev; + /* Enable pause frames by default */ vf->flags |= OTX2_FLAG_RX_PAUSE_ENABLED; vf->flags |= OTX2_FLAG_TX_PAUSE_ENABLED; @@ -695,6 +732,7 @@ static void otx2vf_remove(struct pci_dev *pdev) vf = netdev_priv(netdev); cancel_work_sync(&vf->reset_task); + otx2_unregister_dl(vf); unregister_netdev(netdev); if (vf->otx2_wq) destroy_workqueue(vf->otx2_wq); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c index fa7a0682ad1e..68b442eb6d69 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_devlink.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.c @@ -390,11 +390,12 @@ static const struct devlink_ops prestera_dl_ops = { .trap_drop_counter_get = prestera_drop_counter_get, }; -struct prestera_switch *prestera_devlink_alloc(void) +struct prestera_switch *prestera_devlink_alloc(struct prestera_device *dev) { struct devlink *dl; - dl = devlink_alloc(&prestera_dl_ops, sizeof(struct prestera_switch)); + dl = devlink_alloc(&prestera_dl_ops, sizeof(struct prestera_switch), + dev->dev); return devlink_priv(dl); } @@ -411,7 +412,7 @@ int prestera_devlink_register(struct prestera_switch *sw) struct devlink *dl = priv_to_devlink(sw); int err; - err = devlink_register(dl, sw->dev->dev); + err = devlink_register(dl); if (err) { dev_err(prestera_dev(sw), "devlink_register failed: %d\n", err); return err; diff --git a/drivers/net/ethernet/marvell/prestera/prestera_devlink.h b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h index 5d73aa9db897..cc34c3db13a2 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_devlink.h +++ b/drivers/net/ethernet/marvell/prestera/prestera_devlink.h @@ -6,7 +6,7 @@ #include "prestera.h" -struct prestera_switch *prestera_devlink_alloc(void); +struct prestera_switch *prestera_devlink_alloc(struct prestera_device *dev); void prestera_devlink_free(struct prestera_switch *sw); int prestera_devlink_register(struct prestera_switch *sw); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_main.c b/drivers/net/ethernet/marvell/prestera/prestera_main.c index 226f4ff29f6e..44c670807fb3 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_main.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_main.c @@ -746,7 +746,8 @@ static int prestera_netdev_port_event(struct net_device *lower, case NETDEV_CHANGEUPPER: if (netif_is_bridge_master(upper)) { if (info->linking) - return prestera_bridge_port_join(upper, port); + return prestera_bridge_port_join(upper, port, + extack); else prestera_bridge_port_leave(upper, port); } else if (netif_is_lag_master(upper)) { @@ -904,7 +905,7 @@ int prestera_device_register(struct prestera_device *dev) struct prestera_switch *sw; int err; - sw = prestera_devlink_alloc(); + sw = prestera_devlink_alloc(dev); if (!sw) return -ENOMEM; diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c index 9a309169dbae..3ce6ccd0f539 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.c @@ -480,7 +480,8 @@ err_port_flood_set: } int prestera_bridge_port_join(struct net_device *br_dev, - struct prestera_port *port) + struct prestera_port *port, + struct netlink_ext_ack *extack) { struct prestera_switchdev *swdev = port->sw->swdev; struct prestera_bridge_port *br_port; @@ -500,6 +501,11 @@ int prestera_bridge_port_join(struct net_device *br_dev, goto err_brport_create; } + err = switchdev_bridge_port_offload(br_port->dev, port->dev, NULL, + NULL, NULL, false, extack); + if (err) + goto err_switchdev_offload; + if (bridge->vlan_enabled) return 0; @@ -510,6 +516,8 @@ int prestera_bridge_port_join(struct net_device *br_dev, return 0; err_port_join: + switchdev_bridge_port_unoffload(br_port->dev, NULL, NULL, NULL); +err_switchdev_offload: prestera_bridge_port_put(br_port); err_brport_create: prestera_bridge_put(bridge); @@ -584,6 +592,8 @@ void prestera_bridge_port_leave(struct net_device *br_dev, else prestera_bridge_1d_port_leave(br_port); + switchdev_bridge_port_unoffload(br_port->dev, NULL, NULL, NULL); + prestera_hw_port_learning_set(port, false); prestera_hw_port_flood_set(port, BR_FLOOD | BR_MCAST_FLOOD, 0); prestera_port_vid_stp_set(port, PRESTERA_VID_ALL, BR_STATE_FORWARDING); diff --git a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h index a91bc35d235f..0e93fda3d9a5 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h +++ b/drivers/net/ethernet/marvell/prestera/prestera_switchdev.h @@ -8,7 +8,8 @@ int prestera_switchdev_init(struct prestera_switch *sw); void prestera_switchdev_fini(struct prestera_switch *sw); int prestera_bridge_port_join(struct net_device *br_dev, - struct prestera_port *port); + struct prestera_port *port, + struct netlink_ext_ack *extack); void prestera_bridge_port_leave(struct net_device *br_dev, struct prestera_port *port); diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c index 9b48ae4bac39..fab53c9b8380 100644 --- a/drivers/net/ethernet/marvell/pxa168_eth.c +++ b/drivers/net/ethernet/marvell/pxa168_eth.c @@ -1377,7 +1377,7 @@ static const struct net_device_ops pxa168_eth_netdev_ops = { .ndo_set_rx_mode = pxa168_eth_set_rx_mode, .ndo_set_mac_address = pxa168_eth_set_mac_address, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = phy_do_ioctl, + .ndo_eth_ioctl = phy_do_ioctl, .ndo_change_mtu = pxa168_eth_change_mtu, .ndo_tx_timeout = pxa168_eth_tx_timeout, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index d4bb27ba1419..051dd3fb5b03 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -615,7 +615,9 @@ static inline u32 skge_usecs2clk(const struct skge_hw *hw, u32 usec) } static int skge_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *ecmd) + struct ethtool_coalesce *ecmd, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct skge_port *skge = netdev_priv(dev); struct skge_hw *hw = skge->hw; @@ -639,7 +641,9 @@ static int skge_get_coalesce(struct net_device *dev, /* Note: interrupt timer is per board, but can turn on/off per port */ static int skge_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *ecmd) + struct ethtool_coalesce *ecmd, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct skge_port *skge = netdev_priv(dev); struct skge_hw *hw = skge->hw; @@ -3787,7 +3791,7 @@ static const struct net_device_ops skge_netdev_ops = { .ndo_open = skge_up, .ndo_stop = skge_down, .ndo_start_xmit = skge_xmit_frame, - .ndo_do_ioctl = skge_ioctl, + .ndo_eth_ioctl = skge_ioctl, .ndo_get_stats = skge_get_stats, .ndo_tx_timeout = skge_tx_timeout, .ndo_change_mtu = skge_change_mtu, diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 8b8bff59c8fe..e9fc74e54b22 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -4052,7 +4052,9 @@ static int sky2_set_pauseparam(struct net_device *dev, } static int sky2_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *ecmd) + struct ethtool_coalesce *ecmd, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; @@ -4087,7 +4089,9 @@ static int sky2_get_coalesce(struct net_device *dev, /* Note: this affect both ports */ static int sky2_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *ecmd) + struct ethtool_coalesce *ecmd, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct sky2_port *sky2 = netdev_priv(dev); struct sky2_hw *hw = sky2->hw; @@ -4693,7 +4697,7 @@ static const struct net_device_ops sky2_netdev_ops[2] = { .ndo_open = sky2_open, .ndo_stop = sky2_close, .ndo_start_xmit = sky2_xmit_frame, - .ndo_do_ioctl = sky2_ioctl, + .ndo_eth_ioctl = sky2_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = sky2_set_mac_address, .ndo_set_rx_mode = sky2_set_multicast, @@ -4710,7 +4714,7 @@ static const struct net_device_ops sky2_netdev_ops[2] = { .ndo_open = sky2_open, .ndo_stop = sky2_close, .ndo_start_xmit = sky2_xmit_frame, - .ndo_do_ioctl = sky2_ioctl, + .ndo_eth_ioctl = sky2_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = sky2_set_mac_address, .ndo_set_rx_mode = sky2_set_multicast, @@ -4884,7 +4888,7 @@ static int sky2_test_msi(struct sky2_hw *hw) /* This driver supports yukon2 chipset only */ static const char *sky2_name(u8 chipid, char *buf, int sz) { - const char *name[] = { + static const char *const name[] = { "XL", /* 0xb3 */ "EC Ultra", /* 0xb4 */ "Extreme", /* 0xb5 */ diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 64adfd24e134..398c23cec815 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -2933,7 +2933,7 @@ static const struct net_device_ops mtk_netdev_ops = { .ndo_start_xmit = mtk_start_xmit, .ndo_set_mac_address = mtk_set_mac_address, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = mtk_do_ioctl, + .ndo_eth_ioctl = mtk_do_ioctl, .ndo_change_mtu = mtk_change_mtu, .ndo_tx_timeout = mtk_tx_timeout, .ndo_get_stats64 = mtk_get_stats64, diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c index 96d2891f1675..1d5dd2015453 100644 --- a/drivers/net/ethernet/mediatek/mtk_star_emac.c +++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c @@ -1162,7 +1162,7 @@ static const struct net_device_ops mtk_star_netdev_ops = { .ndo_start_xmit = mtk_star_netdev_start_xmit, .ndo_get_stats64 = mtk_star_netdev_get_stats64, .ndo_set_rx_mode = mtk_star_set_rx_mode, - .ndo_do_ioctl = mtk_star_netdev_ioctl, + .ndo_eth_ioctl = mtk_star_netdev_ioctl, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; diff --git a/drivers/net/ethernet/mellanox/mlx4/Kconfig b/drivers/net/ethernet/mellanox/mlx4/Kconfig index 400e611ba041..1b4b1f642317 100644 --- a/drivers/net/ethernet/mellanox/mlx4/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx4/Kconfig @@ -6,8 +6,8 @@ config MLX4_EN tristate "Mellanox Technologies 1/10/40Gbit Ethernet support" depends on PCI && NETDEVICES && ETHERNET && INET + depends on PTP_1588_CLOCK_OPTIONAL select MLX4_CORE - imply PTP_1588_CLOCK help This driver supports Mellanox Technologies ConnectX Ethernet devices. diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c index 3616b77caa0a..ef518b1040f7 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c @@ -998,7 +998,9 @@ mlx4_en_set_link_ksettings(struct net_device *dev, } static int mlx4_en_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct mlx4_en_priv *priv = netdev_priv(dev); @@ -1020,7 +1022,9 @@ static int mlx4_en_get_coalesce(struct net_device *dev, } static int mlx4_en_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct mlx4_en_priv *priv = netdev_priv(dev); diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 5d0c9c62382d..a2f61a87cef8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -2828,7 +2828,7 @@ static const struct net_device_ops mlx4_netdev_ops = { .ndo_set_mac_address = mlx4_en_set_mac, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = mlx4_en_change_mtu, - .ndo_do_ioctl = mlx4_en_ioctl, + .ndo_eth_ioctl = mlx4_en_ioctl, .ndo_tx_timeout = mlx4_en_tx_timeout, .ndo_vlan_rx_add_vid = mlx4_en_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = mlx4_en_vlan_rx_kill_vid, diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index 442991d91c15..7f6d3b82c29b 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -991,7 +991,7 @@ void mlx4_en_calc_rx_buf(struct net_device *dev) * expense of more costly truesize accounting */ priv->frag_info[0].frag_stride = PAGE_SIZE; - priv->dma_dir = PCI_DMA_BIDIRECTIONAL; + priv->dma_dir = DMA_BIDIRECTIONAL; priv->rx_headroom = XDP_PACKET_HEADROOM; i = 1; } else { @@ -1021,7 +1021,7 @@ void mlx4_en_calc_rx_buf(struct net_device *dev) buf_size += frag_size; i++; } - priv->dma_dir = PCI_DMA_FROMDEVICE; + priv->dma_dir = DMA_FROM_DEVICE; priv->rx_headroom = 0; } diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 31b74bddb7cd..c56b9dba4c71 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -297,12 +297,12 @@ u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, dma_unmap_single(priv->ddev, tx_info->map0_dma, tx_info->map0_byte_count, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); else dma_unmap_page(priv->ddev, tx_info->map0_dma, tx_info->map0_byte_count, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); /* Optimize the common case when there are no wraparounds */ if (likely((void *)tx_desc + (tx_info->nr_txbb << LOG_TXBB_SIZE) <= end)) { @@ -311,7 +311,7 @@ u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, dma_unmap_page(priv->ddev, (dma_addr_t)be64_to_cpu(data->addr), be32_to_cpu(data->byte_count), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); } } else { if ((void *)data >= end) @@ -325,7 +325,7 @@ u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv, dma_unmap_page(priv->ddev, (dma_addr_t)be64_to_cpu(data->addr), be32_to_cpu(data->byte_count), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); } } } @@ -831,7 +831,7 @@ static bool mlx4_en_build_dma_wqe(struct mlx4_en_priv *priv, dma = dma_map_single(ddev, skb->data + lso_header_size, byte_count, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); if (dma_mapping_error(ddev, dma)) goto tx_drop_unmap; @@ -853,7 +853,7 @@ tx_drop_unmap: ++data; dma_unmap_page(ddev, (dma_addr_t)be64_to_cpu(data->addr), be32_to_cpu(data->byte_count), - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); } return false; @@ -1170,7 +1170,7 @@ netdev_tx_t mlx4_en_xmit_frame(struct mlx4_en_rx_ring *rx_ring, tx_info->nr_bytes = max_t(unsigned int, length, ETH_ZLEN); dma_sync_single_range_for_device(priv->ddev, dma, frame->page_offset, - length, PCI_DMA_TODEVICE); + length, DMA_TO_DEVICE); data->addr = cpu_to_be64(dma + frame->page_offset); dma_wmb(); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 28ac4693da3c..5a6b0fcaf7f8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -3806,24 +3806,15 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data, pci_set_master(pdev); - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) { dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n"); - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n"); goto err_release_regions; } } - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - dev_warn(&pdev->dev, "Warning: couldn't set 64-bit consistent PCI DMA mask\n"); - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, aborting\n"); - goto err_release_regions; - } - } /* Allow large DMA segments, up to the firmware limit of 1 GB */ dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024); @@ -4005,7 +3996,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) printk_once(KERN_INFO "%s", mlx4_version); - devlink = devlink_alloc(&mlx4_devlink_ops, sizeof(*priv)); + devlink = devlink_alloc(&mlx4_devlink_ops, sizeof(*priv), &pdev->dev); if (!devlink) return -ENOMEM; priv = devlink_priv(devlink); @@ -4024,7 +4015,7 @@ static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id) mutex_init(&dev->persist->interface_state_mutex); mutex_init(&dev->persist->pci_status_mutex); - ret = devlink_register(devlink, &pdev->dev); + ret = devlink_register(devlink); if (ret) goto err_persist_free; ret = devlink_params_register(devlink, mlx4_devlink_params, diff --git a/drivers/net/ethernet/mellanox/mlx4/qp.c b/drivers/net/ethernet/mellanox/mlx4/qp.c index 427e7a31862c..b149e601f673 100644 --- a/drivers/net/ethernet/mellanox/mlx4/qp.c +++ b/drivers/net/ethernet/mellanox/mlx4/qp.c @@ -739,7 +739,7 @@ static void mlx4_cleanup_qp_zones(struct mlx4_dev *dev) int i; for (i = 0; - i < sizeof(qp_table->zones_uids)/sizeof(qp_table->zones_uids[0]); + i < ARRAY_SIZE(qp_table->zones_uids); i++) { struct mlx4_bitmap *bitmap = mlx4_zone_get_bitmap(qp_table->zones, @@ -917,7 +917,7 @@ int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt, { int err; int i; - enum mlx4_qp_state states[] = { + static const enum mlx4_qp_state states[] = { MLX4_QP_STATE_RST, MLX4_QP_STATE_INIT, MLX4_QP_STATE_RTR, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig index e1a5a79e27c7..92056452a9e3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig +++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig @@ -10,7 +10,7 @@ config MLX5_CORE select NET_DEVLINK depends on VXLAN || !VXLAN depends on MLXFW || !MLXFW - depends on PTP_1588_CLOCK || !PTP_1588_CLOCK + depends on PTP_1588_CLOCK_OPTIONAL depends on PCI_HYPERV_INTERFACE || !PCI_HYPERV_INTERFACE help Core driver for low level functionality of the ConnectX-4 and diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index b5072a3a2585..63032cd6efb1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -15,14 +15,15 @@ mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ health.o mcg.o cq.o alloc.o port.o mr.o pd.o \ transobj.o vport.o sriov.o fs_cmd.o fs_core.o pci_irq.o \ fs_counters.o fs_ft_pool.o rl.o lag.o dev.o events.o wq.o lib/gid.o \ - lib/devcom.o lib/pci_vsc.o lib/dm.o diag/fs_tracepoint.o \ + lib/devcom.o lib/pci_vsc.o lib/dm.o lib/fs_ttc.o diag/fs_tracepoint.o \ diag/fw_tracer.o diag/crdump.o devlink.o diag/rsc_dump.o \ fw_reset.o qos.o # # Netdev basic # -mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ +mlx5_core-$(CONFIG_MLX5_CORE_EN) += en/rqt.o en/tir.o en/rss.o en/rx_res.o \ + en/channels.o en_main.o en_common.o en_fs.o en_ethtool.o \ en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \ en_selftest.o en/port.o en/monitor_stats.o en/health.o \ en/reporter_tx.o en/reporter_rx.o en/params.o en/xsk/pool.o \ @@ -43,19 +44,22 @@ mlx5_core-$(CONFIG_MLX5_CLS_ACT) += en_tc.o en/rep/tc.o en/rep/neigh.o \ lib/fs_chains.o en/tc_tun.o \ esw/indir_table.o en/tc_tun_encap.o \ en/tc_tun_vxlan.o en/tc_tun_gre.o en/tc_tun_geneve.o \ - en/tc_tun_mplsoudp.o diag/en_tc_tracepoint.o + en/tc_tun_mplsoudp.o diag/en_tc_tracepoint.o \ + en/tc/post_act.o mlx5_core-$(CONFIG_MLX5_TC_CT) += en/tc_ct.o +mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += en/tc/sample.o # # Core extra # mlx5_core-$(CONFIG_MLX5_ESWITCH) += eswitch.o eswitch_offloads.o eswitch_offloads_termtbl.o \ - ecpf.o rdma.o esw/legacy.o + ecpf.o rdma.o esw/legacy.o \ + esw/devlink_port.o esw/vporttbl.o esw/qos.o + mlx5_core-$(CONFIG_MLX5_ESWITCH) += esw/acl/helper.o \ esw/acl/egress_lgcy.o esw/acl/egress_ofld.o \ - esw/acl/ingress_lgcy.o esw/acl/ingress_ofld.o \ - esw/devlink_port.o esw/vporttbl.o -mlx5_core-$(CONFIG_MLX5_TC_SAMPLE) += esw/sample.o + esw/acl/ingress_lgcy.o esw/acl/ingress_ofld.o + mlx5_core-$(CONFIG_MLX5_BRIDGE) += esw/bridge.o en/rep/bridge.o mlx5_core-$(CONFIG_MLX5_MPFS) += lib/mpfs.o diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 9d79c5ec31e9..db5dfff585c9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -877,7 +877,7 @@ static void cb_timeout_handler(struct work_struct *work) ent->ret = -ETIMEDOUT; mlx5_core_warn(dev, "cmd[%d]: %s(0x%x) Async, timeout. Will cause a leak of a command resource\n", ent->idx, mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); - mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); + mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true); out: cmd_ent_put(ent); /* for the cmd_ent_get() took on schedule delayed work */ @@ -994,7 +994,7 @@ static void cmd_work_handler(struct work_struct *work) MLX5_SET(mbox_out, ent->out, status, status); MLX5_SET(mbox_out, ent->out, syndrome, drv_synd); - mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); + mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true); return; } @@ -1008,7 +1008,7 @@ static void cmd_work_handler(struct work_struct *work) poll_timeout(ent); /* make sure we read the descriptor after ownership is SW */ rmb(); - mlx5_cmd_comp_handler(dev, 1UL << ent->idx, (ent->ret == -ETIMEDOUT)); + mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, (ent->ret == -ETIMEDOUT)); } } @@ -1068,7 +1068,7 @@ static void wait_func_handle_exec_timeout(struct mlx5_core_dev *dev, mlx5_command_str(msg_to_opcode(ent->in)), msg_to_opcode(ent->in)); ent->ret = -ETIMEDOUT; - mlx5_cmd_comp_handler(dev, 1UL << ent->idx, true); + mlx5_cmd_comp_handler(dev, 1ULL << ent->idx, true); } static int wait_func(struct mlx5_core_dev *dev, struct mlx5_cmd_work_ent *ent) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c index 360e093874d4..cf97985628ab 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c @@ -89,7 +89,8 @@ static void mlx5_add_cq_to_tasklet(struct mlx5_core_cq *cq, int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, u32 *in, int inlen, u32 *out, int outlen) { - int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), c_eqn); + int eqn = MLX5_GET(cqc, MLX5_ADDR_OF(create_cq_in, in, cq_context), + c_eqn_or_apu_element); u32 din[MLX5_ST_SZ_DW(destroy_cq_in)] = {}; struct mlx5_eq_comp *eq; int err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index def2156e50ee..e8093c4e09d4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -53,7 +53,7 @@ static bool is_eth_rep_supported(struct mlx5_core_dev *dev) return true; } -static bool is_eth_supported(struct mlx5_core_dev *dev) +bool mlx5_eth_supported(struct mlx5_core_dev *dev) { if (!IS_ENABLED(CONFIG_MLX5_CORE_EN)) return false; @@ -105,7 +105,18 @@ static bool is_eth_supported(struct mlx5_core_dev *dev) return true; } -static bool is_vnet_supported(struct mlx5_core_dev *dev) +static bool is_eth_enabled(struct mlx5_core_dev *dev) +{ + union devlink_param_value val; + int err; + + err = devlink_param_driverinit_value_get(priv_to_devlink(dev), + DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH, + &val); + return err ? false : val.vbool; +} + +bool mlx5_vnet_supported(struct mlx5_core_dev *dev) { if (!IS_ENABLED(CONFIG_MLX5_VDPA_NET)) return false; @@ -127,6 +138,17 @@ static bool is_vnet_supported(struct mlx5_core_dev *dev) return true; } +static bool is_vnet_enabled(struct mlx5_core_dev *dev) +{ + union devlink_param_value val; + int err; + + err = devlink_param_driverinit_value_get(priv_to_devlink(dev), + DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET, + &val); + return err ? false : val.vbool; +} + static bool is_ib_rep_supported(struct mlx5_core_dev *dev) { if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND)) @@ -170,7 +192,7 @@ static bool is_mp_supported(struct mlx5_core_dev *dev) return true; } -static bool is_ib_supported(struct mlx5_core_dev *dev) +bool mlx5_rdma_supported(struct mlx5_core_dev *dev) { if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND)) return false; @@ -187,6 +209,17 @@ static bool is_ib_supported(struct mlx5_core_dev *dev) return true; } +static bool is_ib_enabled(struct mlx5_core_dev *dev) +{ + union devlink_param_value val; + int err; + + err = devlink_param_driverinit_value_get(priv_to_devlink(dev), + DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA, + &val); + return err ? false : val.vbool; +} + enum { MLX5_INTERFACE_PROTOCOL_ETH, MLX5_INTERFACE_PROTOCOL_ETH_REP, @@ -201,13 +234,17 @@ enum { static const struct mlx5_adev_device { const char *suffix; bool (*is_supported)(struct mlx5_core_dev *dev); + bool (*is_enabled)(struct mlx5_core_dev *dev); } mlx5_adev_devices[] = { [MLX5_INTERFACE_PROTOCOL_VNET] = { .suffix = "vnet", - .is_supported = &is_vnet_supported }, + .is_supported = &mlx5_vnet_supported, + .is_enabled = &is_vnet_enabled }, [MLX5_INTERFACE_PROTOCOL_IB] = { .suffix = "rdma", - .is_supported = &is_ib_supported }, + .is_supported = &mlx5_rdma_supported, + .is_enabled = &is_ib_enabled }, [MLX5_INTERFACE_PROTOCOL_ETH] = { .suffix = "eth", - .is_supported = &is_eth_supported }, + .is_supported = &mlx5_eth_supported, + .is_enabled = &is_eth_enabled }, [MLX5_INTERFACE_PROTOCOL_ETH_REP] = { .suffix = "eth-rep", .is_supported = &is_eth_rep_supported }, [MLX5_INTERFACE_PROTOCOL_IB_REP] = { .suffix = "rdma-rep", @@ -308,6 +345,14 @@ int mlx5_attach_device(struct mlx5_core_dev *dev) if (!priv->adev[i]) { bool is_supported = false; + if (mlx5_adev_devices[i].is_enabled) { + bool enabled; + + enabled = mlx5_adev_devices[i].is_enabled(dev); + if (!enabled) + continue; + } + if (mlx5_adev_devices[i].is_supported) is_supported = mlx5_adev_devices[i].is_supported(dev); @@ -360,6 +405,14 @@ void mlx5_detach_device(struct mlx5_core_dev *dev) if (!priv->adev[i]) continue; + if (mlx5_adev_devices[i].is_enabled) { + bool enabled; + + enabled = mlx5_adev_devices[i].is_enabled(dev); + if (!enabled) + goto skip_suspend; + } + adev = &priv->adev[i]->adev; /* Auxiliary driver was unbind manually through sysfs */ if (!adev->dev.driver) @@ -397,7 +450,7 @@ int mlx5_register_device(struct mlx5_core_dev *dev) void mlx5_unregister_device(struct mlx5_core_dev *dev) { mutex_lock(&mlx5_intf_mutex); - dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV; + dev->priv.flags = MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV; mlx5_rescan_drivers_locked(dev); mutex_unlock(&mlx5_intf_mutex); } @@ -447,12 +500,21 @@ static void delete_drivers(struct mlx5_core_dev *dev) if (!priv->adev[i]) continue; + if (mlx5_adev_devices[i].is_enabled) { + bool enabled; + + enabled = mlx5_adev_devices[i].is_enabled(dev); + if (!enabled) + goto del_adev; + } + if (mlx5_adev_devices[i].is_supported && !delete_all) is_supported = mlx5_adev_devices[i].is_supported(dev); if (is_supported) continue; +del_adev: del_adev(&priv->adev[i]->adev); priv->adev[i] = NULL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c index d791d351b489..e84287ffc7ce 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.c @@ -7,6 +7,7 @@ #include "fw_reset.h" #include "fs_core.h" #include "eswitch.h" +#include "esw/qos.h" #include "sf/dev/dev.h" #include "sf/sf.h" @@ -292,6 +293,13 @@ static const struct devlink_ops mlx5_devlink_ops = { .eswitch_encap_mode_get = mlx5_devlink_eswitch_encap_mode_get, .port_function_hw_addr_get = mlx5_devlink_port_function_hw_addr_get, .port_function_hw_addr_set = mlx5_devlink_port_function_hw_addr_set, + .rate_leaf_tx_share_set = mlx5_esw_devlink_rate_leaf_tx_share_set, + .rate_leaf_tx_max_set = mlx5_esw_devlink_rate_leaf_tx_max_set, + .rate_node_tx_share_set = mlx5_esw_devlink_rate_node_tx_share_set, + .rate_node_tx_max_set = mlx5_esw_devlink_rate_node_tx_max_set, + .rate_node_new = mlx5_esw_devlink_rate_node_new, + .rate_node_del = mlx5_esw_devlink_rate_node_del, + .rate_leaf_parent_set = mlx5_esw_devlink_rate_parent_set, #endif #ifdef CONFIG_MLX5_SF_MANAGER .port_new = mlx5_devlink_sf_port_new, @@ -359,9 +367,10 @@ int mlx5_devlink_traps_get_action(struct mlx5_core_dev *dev, int trap_id, return 0; } -struct devlink *mlx5_devlink_alloc(void) +struct devlink *mlx5_devlink_alloc(struct device *dev) { - return devlink_alloc(&mlx5_devlink_ops, sizeof(struct mlx5_core_dev)); + return devlink_alloc(&mlx5_devlink_ops, sizeof(struct mlx5_core_dev), + dev); } void mlx5_devlink_free(struct devlink *devlink) @@ -595,6 +604,157 @@ static void mlx5_devlink_set_params_init_values(struct devlink *devlink) #endif } +static const struct devlink_param enable_eth_param = + DEVLINK_PARAM_GENERIC(ENABLE_ETH, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), + NULL, NULL, NULL); + +static int mlx5_devlink_eth_param_register(struct devlink *devlink) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + union devlink_param_value value; + int err; + + if (!mlx5_eth_supported(dev)) + return 0; + + err = devlink_param_register(devlink, &enable_eth_param); + if (err) + return err; + + value.vbool = true; + devlink_param_driverinit_value_set(devlink, + DEVLINK_PARAM_GENERIC_ID_ENABLE_ETH, + value); + devlink_param_publish(devlink, &enable_eth_param); + return 0; +} + +static void mlx5_devlink_eth_param_unregister(struct devlink *devlink) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + + if (!mlx5_eth_supported(dev)) + return; + + devlink_param_unpublish(devlink, &enable_eth_param); + devlink_param_unregister(devlink, &enable_eth_param); +} + +static int mlx5_devlink_enable_rdma_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + bool new_state = val.vbool; + + if (new_state && !mlx5_rdma_supported(dev)) + return -EOPNOTSUPP; + return 0; +} + +static const struct devlink_param enable_rdma_param = + DEVLINK_PARAM_GENERIC(ENABLE_RDMA, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), + NULL, NULL, mlx5_devlink_enable_rdma_validate); + +static int mlx5_devlink_rdma_param_register(struct devlink *devlink) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + union devlink_param_value value; + int err; + + if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND) || MLX5_ESWITCH_MANAGER(dev)) + return 0; + + err = devlink_param_register(devlink, &enable_rdma_param); + if (err) + return err; + + value.vbool = true; + devlink_param_driverinit_value_set(devlink, + DEVLINK_PARAM_GENERIC_ID_ENABLE_RDMA, + value); + devlink_param_publish(devlink, &enable_rdma_param); + return 0; +} + +static void mlx5_devlink_rdma_param_unregister(struct devlink *devlink) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + + if (!IS_ENABLED(CONFIG_MLX5_INFINIBAND) || MLX5_ESWITCH_MANAGER(dev)) + return; + + devlink_param_unpublish(devlink, &enable_rdma_param); + devlink_param_unregister(devlink, &enable_rdma_param); +} + +static const struct devlink_param enable_vnet_param = + DEVLINK_PARAM_GENERIC(ENABLE_VNET, BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), + NULL, NULL, NULL); + +static int mlx5_devlink_vnet_param_register(struct devlink *devlink) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + union devlink_param_value value; + int err; + + if (!mlx5_vnet_supported(dev)) + return 0; + + err = devlink_param_register(devlink, &enable_vnet_param); + if (err) + return err; + + value.vbool = true; + devlink_param_driverinit_value_set(devlink, + DEVLINK_PARAM_GENERIC_ID_ENABLE_VNET, + value); + devlink_param_publish(devlink, &enable_rdma_param); + return 0; +} + +static void mlx5_devlink_vnet_param_unregister(struct devlink *devlink) +{ + struct mlx5_core_dev *dev = devlink_priv(devlink); + + if (!mlx5_vnet_supported(dev)) + return; + + devlink_param_unpublish(devlink, &enable_vnet_param); + devlink_param_unregister(devlink, &enable_vnet_param); +} + +static int mlx5_devlink_auxdev_params_register(struct devlink *devlink) +{ + int err; + + err = mlx5_devlink_eth_param_register(devlink); + if (err) + return err; + + err = mlx5_devlink_rdma_param_register(devlink); + if (err) + goto rdma_err; + + err = mlx5_devlink_vnet_param_register(devlink); + if (err) + goto vnet_err; + return 0; + +vnet_err: + mlx5_devlink_rdma_param_unregister(devlink); +rdma_err: + mlx5_devlink_eth_param_unregister(devlink); + return err; +} + +static void mlx5_devlink_auxdev_params_unregister(struct devlink *devlink) +{ + mlx5_devlink_vnet_param_unregister(devlink); + mlx5_devlink_rdma_param_unregister(devlink); + mlx5_devlink_eth_param_unregister(devlink); +} + #define MLX5_TRAP_DROP(_id, _group_id) \ DEVLINK_TRAP_GENERIC(DROP, DROP, _id, \ DEVLINK_TRAP_GROUP_GENERIC_ID_##_group_id, \ @@ -638,11 +798,11 @@ static void mlx5_devlink_traps_unregister(struct devlink *devlink) ARRAY_SIZE(mlx5_trap_groups_arr)); } -int mlx5_devlink_register(struct devlink *devlink, struct device *dev) +int mlx5_devlink_register(struct devlink *devlink) { int err; - err = devlink_register(devlink, dev); + err = devlink_register(devlink); if (err) return err; @@ -653,6 +813,10 @@ int mlx5_devlink_register(struct devlink *devlink, struct device *dev) mlx5_devlink_set_params_init_values(devlink); devlink_params_publish(devlink); + err = mlx5_devlink_auxdev_params_register(devlink); + if (err) + goto auxdev_reg_err; + err = mlx5_devlink_traps_register(devlink); if (err) goto traps_reg_err; @@ -660,6 +824,8 @@ int mlx5_devlink_register(struct devlink *devlink, struct device *dev) return 0; traps_reg_err: + mlx5_devlink_auxdev_params_unregister(devlink); +auxdev_reg_err: devlink_params_unregister(devlink, mlx5_devlink_params, ARRAY_SIZE(mlx5_devlink_params)); params_reg_err: @@ -670,6 +836,8 @@ params_reg_err: void mlx5_devlink_unregister(struct devlink *devlink) { mlx5_devlink_traps_unregister(devlink); + mlx5_devlink_auxdev_params_unregister(devlink); + devlink_params_unpublish(devlink); devlink_params_unregister(devlink, mlx5_devlink_params, ARRAY_SIZE(mlx5_devlink_params)); devlink_unregister(devlink); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h index 7318d44b774b..30bf4882779b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/devlink.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/devlink.h @@ -31,9 +31,9 @@ int mlx5_devlink_trap_get_num_active(struct mlx5_core_dev *dev); int mlx5_devlink_traps_get_action(struct mlx5_core_dev *dev, int trap_id, enum devlink_trap_action *action); -struct devlink *mlx5_devlink_alloc(void); +struct devlink *mlx5_devlink_alloc(struct device *dev); void mlx5_devlink_free(struct devlink *devlink); -int mlx5_devlink_register(struct devlink *devlink, struct device *dev); +int mlx5_devlink_register(struct devlink *devlink); void mlx5_devlink_unregister(struct devlink *devlink); #endif /* __MLX5_DEVLINK_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index b1b51bbba054..669a75f3537a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -58,6 +58,7 @@ #include "en/qos.h" #include "lib/hv_vhca.h" #include "lib/clock.h" +#include "en/rx_res.h" extern const struct net_device_ops mlx5e_netdev_ops; struct page_pool; @@ -65,14 +66,13 @@ struct page_pool; #define MLX5E_METADATA_ETHER_TYPE (0x8CE4) #define MLX5E_METADATA_ETHER_LEN 8 -#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v) - #define MLX5E_ETH_HARD_MTU (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN) #define MLX5E_HW2SW_MTU(params, hwmtu) ((hwmtu) - ((params)->hard_mtu)) #define MLX5E_SW2HW_MTU(params, swmtu) ((swmtu) + ((params)->hard_mtu)) #define MLX5E_MAX_NUM_TC 8 +#define MLX5E_MAX_NUM_MQPRIO_CH_TC TC_QOPT_MAX_QUEUE #define MLX5_RX_HEADROOM NET_SKB_PAD #define MLX5_SKB_FRAG_SZ(len) (SKB_DATA_ALIGN(len) + \ @@ -126,7 +126,6 @@ struct page_pool; #define MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW 0x2 -#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024) #define MLX5E_DEFAULT_LRO_TIMEOUT 32 #define MLX5E_LRO_TIMEOUT_ARR_SIZE 4 @@ -139,8 +138,6 @@ struct page_pool; #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES 0x80 #define MLX5E_PARAMS_DEFAULT_MIN_RX_WQES_MPW 0x2 -#define MLX5E_LOG_INDIR_RQT_SIZE 0x8 -#define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE) #define MLX5E_MIN_NUM_CHANNELS 0x1 #define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE / 2) #define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC) @@ -252,7 +249,10 @@ struct mlx5e_params { u8 rq_wq_type; u8 log_rq_mtu_frames; u16 num_channels; - u8 num_tc; + struct { + u16 mode; + u8 num_tc; + } mqprio; bool rx_cqe_compress_def; bool tunneled_offload_en; struct dim_cq_moder rx_cq_moderation; @@ -272,6 +272,12 @@ struct mlx5e_params { bool ptp_rx; }; +static inline u8 mlx5e_get_dcb_num_tc(struct mlx5e_params *params) +{ + return params->mqprio.mode == TC_MQPRIO_MODE_DCB ? + params->mqprio.num_tc : 1; +} + enum { MLX5E_RQ_STATE_ENABLED, MLX5E_RQ_STATE_RECOVERING, @@ -745,29 +751,11 @@ enum { MLX5E_STATE_XDP_ACTIVE, }; -struct mlx5e_rqt { - u32 rqtn; - bool enabled; -}; - -struct mlx5e_tir { - u32 tirn; - struct mlx5e_rqt rqt; - struct list_head list; -}; - enum { MLX5E_TC_PRIO = 0, MLX5E_NIC_PRIO }; -struct mlx5e_rss_params { - u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE]; - u32 rx_hash_fields[MLX5E_NUM_INDIR_TIRS]; - u8 toeplitz_hash_key[40]; - u8 hfunc; -}; - struct mlx5e_modify_sq_param { int curr_state; int next_state; @@ -837,13 +825,7 @@ struct mlx5e_priv { struct mlx5e_channels channels; u32 tisn[MLX5_MAX_PORTS][MLX5E_MAX_NUM_TC]; - struct mlx5e_rqt indir_rqt; - struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS]; - struct mlx5e_tir inner_indir_tir[MLX5E_NUM_INDIR_TIRS]; - struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS]; - struct mlx5e_tir xsk_tir[MLX5E_MAX_NUM_CHANNELS]; - struct mlx5e_tir ptp_tir; - struct mlx5e_rss_params rss_params; + struct mlx5e_rx_res *rx_res; u32 tx_rates[MLX5E_MAX_NUM_SQS]; struct mlx5e_flow_steering fs; @@ -948,25 +930,6 @@ int mlx5e_vlan_rx_kill_vid(struct net_device *dev, __always_unused __be16 proto, u16 vid); void mlx5e_timestamp_init(struct mlx5e_priv *priv); -struct mlx5e_redirect_rqt_param { - bool is_rss; - union { - u32 rqn; /* Direct RQN (Non-RSS) */ - struct { - u8 hfunc; - struct mlx5e_channels *channels; - } rss; /* RSS data */ - }; -}; - -int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, - struct mlx5e_redirect_rqt_param rrp); -void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params, - const struct mlx5e_tirc_config *ttconfig, - void *tirc, bool inner); -void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in); -struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt); - struct mlx5e_xsk_param; struct mlx5e_rq_param; @@ -1028,9 +991,6 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv); void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv); int mlx5e_ptp_rx_manage_fs_ctx(struct mlx5e_priv *priv, void *ctx); -void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, - int num_channels); - int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state); void mlx5e_activate_rq(struct mlx5e_rq *rq); void mlx5e_deactivate_rq(struct mlx5e_rq *rq); @@ -1065,10 +1025,6 @@ static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev) extern const struct ethtool_ops mlx5e_ethtool_ops; -int mlx5e_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, - u32 *in); -void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, - struct mlx5e_tir *tir); int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev); void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev); int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb, @@ -1084,17 +1040,6 @@ void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq); int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node); void mlx5e_free_di_list(struct mlx5e_rq *rq); -int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv); - -int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc); -void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv); - -int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n); -void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n); -int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n); -void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n); -void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt); - int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn); void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn); @@ -1106,7 +1051,6 @@ int mlx5e_close(struct net_device *netdev); int mlx5e_open(struct net_device *netdev); void mlx5e_queue_update_stats(struct mlx5e_priv *priv); -int mlx5e_bits_invert(unsigned long a, int size); int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv); int mlx5e_set_dev_port_mtu_ctx(struct mlx5e_priv *priv, void *context); @@ -1183,8 +1127,6 @@ int mlx5e_netdev_change_profile(struct mlx5e_priv *priv, void mlx5e_netdev_attach_nic_profile(struct mlx5e_priv *priv); void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv); void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu); -void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params, - u16 num_channels); void mlx5e_rx_dim_work(struct work_struct *work); void mlx5e_tx_dim_work(struct work_struct *work); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c new file mode 100644 index 000000000000..e7c14c0de0a7 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.c @@ -0,0 +1,46 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */ + +#include "channels.h" +#include "en.h" +#include "en/ptp.h" + +unsigned int mlx5e_channels_get_num(struct mlx5e_channels *chs) +{ + return chs->num; +} + +void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn) +{ + struct mlx5e_channel *c; + + WARN_ON(ix >= mlx5e_channels_get_num(chs)); + c = chs->c[ix]; + + *rqn = c->rq.rqn; +} + +bool mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn) +{ + struct mlx5e_channel *c; + + WARN_ON(ix >= mlx5e_channels_get_num(chs)); + c = chs->c[ix]; + + if (!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) + return false; + + *rqn = c->xskrq.rqn; + return true; +} + +bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn) +{ + struct mlx5e_ptp *c = chs->ptp; + + if (!c || !test_bit(MLX5E_PTP_STATE_RX, c->state)) + return false; + + *rqn = c->rq.rqn; + return true; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h new file mode 100644 index 000000000000..ca00cbc827cb --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/channels.h @@ -0,0 +1,16 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */ + +#ifndef __MLX5_EN_CHANNELS_H__ +#define __MLX5_EN_CHANNELS_H__ + +#include <linux/kernel.h> + +struct mlx5e_channels; + +unsigned int mlx5e_channels_get_num(struct mlx5e_channels *chs); +void mlx5e_channels_get_regular_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn); +bool mlx5e_channels_get_xsk_rqn(struct mlx5e_channels *chs, unsigned int ix, u32 *rqn); +bool mlx5e_channels_get_ptp_rqn(struct mlx5e_channels *chs, u32 *rqn); + +#endif /* __MLX5_EN_CHANNELS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c index bc33eaada3b9..86e079310ac3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/devlink.c @@ -55,19 +55,15 @@ void mlx5e_devlink_port_unregister(struct mlx5e_priv *priv) { struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv); - if (dl_port->registered) - devlink_port_unregister(dl_port); + devlink_port_unregister(dl_port); } struct devlink_port *mlx5e_get_devlink_port(struct net_device *dev) { struct mlx5e_priv *priv = netdev_priv(dev); - struct devlink_port *port; if (!netif_device_present(dev)) return NULL; - port = mlx5e_devlink_get_dl_port(priv); - if (port->registered) - return port; - return NULL; + + return mlx5e_devlink_get_dl_port(priv); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index 1d5ce07b83f4..41684a6c44e9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -5,6 +5,9 @@ #define __MLX5E_FLOW_STEER_H__ #include "mod_hdr.h" +#include "lib/fs_ttc.h" + +struct mlx5e_post_act; enum { MLX5E_TC_FT_LEVEL = 0, @@ -18,6 +21,7 @@ struct mlx5e_tc_table { struct mutex t_lock; struct mlx5_flow_table *t; struct mlx5_fs_chains *chains; + struct mlx5e_post_act *post_act; struct rhashtable ht; @@ -67,27 +71,7 @@ struct mlx5e_l2_table { bool promisc_enabled; }; -enum mlx5e_traffic_types { - MLX5E_TT_IPV4_TCP, - MLX5E_TT_IPV6_TCP, - MLX5E_TT_IPV4_UDP, - MLX5E_TT_IPV6_UDP, - MLX5E_TT_IPV4_IPSEC_AH, - MLX5E_TT_IPV6_IPSEC_AH, - MLX5E_TT_IPV4_IPSEC_ESP, - MLX5E_TT_IPV6_IPSEC_ESP, - MLX5E_TT_IPV4, - MLX5E_TT_IPV6, - MLX5E_TT_ANY, - MLX5E_NUM_TT, - MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY, -}; - -struct mlx5e_tirc_config { - u8 l3_prot_type; - u8 l4_prot_type; - u32 rx_hash_fields; -}; +#define MLX5E_NUM_INDIR_TIRS (MLX5_NUM_TT - 1) #define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ MLX5_HASH_FIELD_SEL_DST_IP) @@ -99,30 +83,6 @@ struct mlx5e_tirc_config { MLX5_HASH_FIELD_SEL_DST_IP |\ MLX5_HASH_FIELD_SEL_IPSEC_SPI) -enum mlx5e_tunnel_types { - MLX5E_TT_IPV4_GRE, - MLX5E_TT_IPV6_GRE, - MLX5E_TT_IPV4_IPIP, - MLX5E_TT_IPV6_IPIP, - MLX5E_TT_IPV4_IPV6, - MLX5E_TT_IPV6_IPV6, - MLX5E_NUM_TUNNEL_TT, -}; - -bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev); - -struct mlx5e_ttc_rule { - struct mlx5_flow_handle *rule; - struct mlx5_flow_destination default_dest; -}; - -/* L3/L4 traffic type classifier */ -struct mlx5e_ttc_table { - struct mlx5e_flow_table ft; - struct mlx5e_ttc_rule rules[MLX5E_NUM_TT]; - struct mlx5_flow_handle *tunnel_rules[MLX5E_NUM_TUNNEL_TT]; -}; - /* NIC prio FTS */ enum { MLX5E_PROMISC_FT_LEVEL, @@ -144,21 +104,7 @@ enum { #endif }; -#define MLX5E_TTC_NUM_GROUPS 3 -#define MLX5E_TTC_GROUP1_SIZE (BIT(3) + MLX5E_NUM_TUNNEL_TT) -#define MLX5E_TTC_GROUP2_SIZE BIT(1) -#define MLX5E_TTC_GROUP3_SIZE BIT(0) -#define MLX5E_TTC_TABLE_SIZE (MLX5E_TTC_GROUP1_SIZE +\ - MLX5E_TTC_GROUP2_SIZE +\ - MLX5E_TTC_GROUP3_SIZE) - -#define MLX5E_INNER_TTC_NUM_GROUPS 3 -#define MLX5E_INNER_TTC_GROUP1_SIZE BIT(3) -#define MLX5E_INNER_TTC_GROUP2_SIZE BIT(1) -#define MLX5E_INNER_TTC_GROUP3_SIZE BIT(0) -#define MLX5E_INNER_TTC_TABLE_SIZE (MLX5E_INNER_TTC_GROUP1_SIZE +\ - MLX5E_INNER_TTC_GROUP2_SIZE +\ - MLX5E_INNER_TTC_GROUP3_SIZE) +struct mlx5e_priv; #ifdef CONFIG_MLX5_EN_RXNFC @@ -226,8 +172,8 @@ struct mlx5e_flow_steering { struct mlx5e_promisc_table promisc; struct mlx5e_vlan_table *vlan; struct mlx5e_l2_table l2; - struct mlx5e_ttc_table ttc; - struct mlx5e_ttc_table inner_ttc; + struct mlx5_ttc_table *ttc; + struct mlx5_ttc_table *inner_ttc; #ifdef CONFIG_MLX5_EN_ARFS struct mlx5e_arfs_tables *arfs; #endif @@ -239,33 +185,13 @@ struct mlx5e_flow_steering { struct mlx5e_ptp_fs *ptp_fs; }; -struct ttc_params { - struct mlx5_flow_table_attr ft_attr; - u32 any_tt_tirn; - u32 indir_tirn[MLX5E_NUM_INDIR_TIRS]; - struct mlx5e_ttc_table *inner_ttc; -}; - -void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv, struct ttc_params *ttc_params); -void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params); -void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params); - -int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params, - struct mlx5e_ttc_table *ttc); -void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv, - struct mlx5e_ttc_table *ttc); +void mlx5e_set_ttc_params(struct mlx5e_priv *priv, + struct ttc_params *ttc_params, bool tunnel); -int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params, - struct mlx5e_ttc_table *ttc); -void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv, - struct mlx5e_ttc_table *ttc); +void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv); +int mlx5e_create_ttc_table(struct mlx5e_priv *priv); void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft); -int mlx5e_ttc_fwd_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type, - struct mlx5_flow_destination *new_dest); -struct mlx5_flow_destination -mlx5e_ttc_get_default_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type); -int mlx5e_ttc_fwd_default_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type); void mlx5e_enable_cvlan_filter(struct mlx5e_priv *priv); void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv); @@ -273,7 +199,6 @@ void mlx5e_disable_cvlan_filter(struct mlx5e_priv *priv); int mlx5e_create_flow_steering(struct mlx5e_priv *priv); void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv); -u8 mlx5e_get_proto_by_tunnel_type(enum mlx5e_tunnel_types tt); int mlx5e_add_vlan_trap(struct mlx5e_priv *priv, int trap_id, int tir_num); void mlx5e_remove_vlan_trap(struct mlx5e_priv *priv); int mlx5e_add_mac_trap(struct mlx5e_priv *priv, int trap_id, int tir_num); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c index 909faa6c89d7..7aa25a5e29d7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c @@ -33,22 +33,22 @@ static char *fs_udp_type2str(enum fs_udp_type i) } } -static enum mlx5e_traffic_types fs_udp2tt(enum fs_udp_type i) +static enum mlx5_traffic_types fs_udp2tt(enum fs_udp_type i) { switch (i) { case FS_IPV4_UDP: - return MLX5E_TT_IPV4_UDP; + return MLX5_TT_IPV4_UDP; default: /* FS_IPV6_UDP */ - return MLX5E_TT_IPV6_UDP; + return MLX5_TT_IPV6_UDP; } } -static enum fs_udp_type tt2fs_udp(enum mlx5e_traffic_types i) +static enum fs_udp_type tt2fs_udp(enum mlx5_traffic_types i) { switch (i) { - case MLX5E_TT_IPV4_UDP: + case MLX5_TT_IPV4_UDP: return FS_IPV4_UDP; - case MLX5E_TT_IPV6_UDP: + case MLX5_TT_IPV6_UDP: return FS_IPV6_UDP; default: return FS_UDP_NUM_TYPES; @@ -75,7 +75,7 @@ static void fs_udp_set_dport_flow(struct mlx5_flow_spec *spec, enum fs_udp_type struct mlx5_flow_handle * mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_priv *priv, - enum mlx5e_traffic_types ttc_type, + enum mlx5_traffic_types ttc_type, u32 tir_num, u16 d_port) { enum fs_udp_type type = tt2fs_udp(ttc_type); @@ -124,7 +124,7 @@ static int fs_udp_add_default_rule(struct mlx5e_priv *priv, enum fs_udp_type typ fs_udp = priv->fs.udp; fs_udp_t = &fs_udp->tables[type]; - dest = mlx5e_ttc_get_default_dest(priv, fs_udp2tt(type)); + dest = mlx5_ttc_get_default_dest(priv->fs.ttc, fs_udp2tt(type)); rule = mlx5_add_flow_rules(fs_udp_t->t, NULL, &flow_act, &dest, 1); if (IS_ERR(rule)) { err = PTR_ERR(rule); @@ -259,7 +259,7 @@ static int fs_udp_disable(struct mlx5e_priv *priv) for (i = 0; i < FS_UDP_NUM_TYPES; i++) { /* Modify ttc rules destination to point back to the indir TIRs */ - err = mlx5e_ttc_fwd_default_dest(priv, fs_udp2tt(i)); + err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, fs_udp2tt(i)); if (err) { netdev_err(priv->netdev, "%s: modify ttc[%d] default destination failed, err(%d)\n", @@ -281,7 +281,7 @@ static int fs_udp_enable(struct mlx5e_priv *priv) dest.ft = priv->fs.udp->tables[i].t; /* Modify ttc rules destination to point on the accel_fs FTs */ - err = mlx5e_ttc_fwd_dest(priv, fs_udp2tt(i), &dest); + err = mlx5_ttc_fwd_dest(priv->fs.ttc, fs_udp2tt(i), &dest); if (err) { netdev_err(priv->netdev, "%s: modify ttc[%d] destination to accel failed, err(%d)\n", @@ -401,7 +401,7 @@ static int fs_any_add_default_rule(struct mlx5e_priv *priv) fs_any = priv->fs.any; fs_any_t = &fs_any->table; - dest = mlx5e_ttc_get_default_dest(priv, MLX5E_TT_ANY); + dest = mlx5_ttc_get_default_dest(priv->fs.ttc, MLX5_TT_ANY); rule = mlx5_add_flow_rules(fs_any_t->t, NULL, &flow_act, &dest, 1); if (IS_ERR(rule)) { err = PTR_ERR(rule); @@ -514,11 +514,11 @@ static int fs_any_disable(struct mlx5e_priv *priv) int err; /* Modify ttc rules destination to point back to the indir TIRs */ - err = mlx5e_ttc_fwd_default_dest(priv, MLX5E_TT_ANY); + err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, MLX5_TT_ANY); if (err) { netdev_err(priv->netdev, "%s: modify ttc[%d] default destination failed, err(%d)\n", - __func__, MLX5E_TT_ANY, err); + __func__, MLX5_TT_ANY, err); return err; } return 0; @@ -533,11 +533,11 @@ static int fs_any_enable(struct mlx5e_priv *priv) dest.ft = priv->fs.any->table.t; /* Modify ttc rules destination to point on the accel_fs FTs */ - err = mlx5e_ttc_fwd_dest(priv, MLX5E_TT_ANY, &dest); + err = mlx5_ttc_fwd_dest(priv->fs.ttc, MLX5_TT_ANY, &dest); if (err) { netdev_err(priv->netdev, "%s: modify ttc[%d] destination to accel failed, err(%d)\n", - __func__, MLX5E_TT_ANY, err); + __func__, MLX5_TT_ANY, err); return err; } return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h index 8385df24eb99..7a70c4f38fda 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.h @@ -12,7 +12,7 @@ void mlx5e_fs_tt_redirect_del_rule(struct mlx5_flow_handle *rule); /* UDP traffic type redirect */ struct mlx5_flow_handle * mlx5e_fs_tt_redirect_udp_add_rule(struct mlx5e_priv *priv, - enum mlx5e_traffic_types ttc_type, + enum mlx5_traffic_types ttc_type, u32 tir_num, u16 d_port); void mlx5e_fs_tt_redirect_udp_destroy(struct mlx5e_priv *priv); int mlx5e_fs_tt_redirect_udp_create(struct mlx5e_priv *priv); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c index ea321e528749..4e72ca8070e2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.c @@ -5,11 +5,15 @@ #include <linux/slab.h> #include <linux/xarray.h> #include <linux/hashtable.h> +#include <linux/refcount.h> #include "mapping.h" #define MAPPING_GRACE_PERIOD 2000 +static LIST_HEAD(shared_ctx_list); +static DEFINE_MUTEX(shared_ctx_lock); + struct mapping_ctx { struct xarray xarray; DECLARE_HASHTABLE(ht, 8); @@ -20,6 +24,10 @@ struct mapping_ctx { struct delayed_work dwork; struct list_head pending_list; spinlock_t pending_list_lock; /* Guards pending list */ + u64 id; + u8 type; + struct list_head list; + refcount_t refcount; }; struct mapping_item { @@ -205,11 +213,48 @@ mapping_create(size_t data_size, u32 max_id, bool delayed_removal) mutex_init(&ctx->lock); xa_init_flags(&ctx->xarray, XA_FLAGS_ALLOC1); + refcount_set(&ctx->refcount, 1); + INIT_LIST_HEAD(&ctx->list); + + return ctx; +} + +struct mapping_ctx * +mapping_create_for_id(u64 id, u8 type, size_t data_size, u32 max_id, bool delayed_removal) +{ + struct mapping_ctx *ctx; + + mutex_lock(&shared_ctx_lock); + list_for_each_entry(ctx, &shared_ctx_list, list) { + if (ctx->id == id && ctx->type == type) { + if (refcount_inc_not_zero(&ctx->refcount)) + goto unlock; + break; + } + } + + ctx = mapping_create(data_size, max_id, delayed_removal); + if (IS_ERR(ctx)) + goto unlock; + + ctx->id = id; + ctx->type = type; + list_add(&ctx->list, &shared_ctx_list); + +unlock: + mutex_unlock(&shared_ctx_lock); return ctx; } void mapping_destroy(struct mapping_ctx *ctx) { + if (!refcount_dec_and_test(&ctx->refcount)) + return; + + mutex_lock(&shared_ctx_lock); + list_del(&ctx->list); + mutex_unlock(&shared_ctx_lock); + mapping_flush_work(ctx); xa_destroy(&ctx->xarray); mutex_destroy(&ctx->lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h index 285525cc5470..4e2119f0f4c1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/mapping.h @@ -24,4 +24,9 @@ struct mapping_ctx *mapping_create(size_t data_size, u32 max_id, bool delayed_removal); void mapping_destroy(struct mapping_ctx *ctx); +/* adds mapping with an id or get an existing mapping with the same id + */ +struct mapping_ctx * +mapping_create_for_id(u64 id, u8 type, size_t data_size, u32 max_id, bool delayed_removal); + #endif /* __MLX5_MAPPING_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c index 2cbf18c967f7..3cbb596821e8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.c @@ -167,6 +167,18 @@ u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev, return is_linear_skb ? mlx5e_get_linear_rq_headroom(params, xsk) : 0; } +struct mlx5e_lro_param mlx5e_get_lro_param(struct mlx5e_params *params) +{ + struct mlx5e_lro_param lro_param; + + lro_param = (struct mlx5e_lro_param) { + .enabled = params->lro_en, + .timeout = params->lro_timeout, + }; + + return lro_param; +} + u16 mlx5e_calc_sq_stop_room(struct mlx5_core_dev *mdev, struct mlx5e_params *params) { bool is_mpwqe = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_SKB_TX_MPWQE); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h index e9593f5f0661..879ad46d754e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/params.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/params.h @@ -11,6 +11,11 @@ struct mlx5e_xsk_param { u16 chunk_size; }; +struct mlx5e_lro_param { + bool enabled; + u32 timeout; +}; + struct mlx5e_cq_param { u32 cqc[MLX5_ST_SZ_DW(cqc)]; struct mlx5_wq_param wq; @@ -120,6 +125,7 @@ u8 mlx5e_mpwqe_get_log_num_strides(struct mlx5_core_dev *mdev, u16 mlx5e_get_rq_headroom(struct mlx5_core_dev *mdev, struct mlx5e_params *params, struct mlx5e_xsk_param *xsk); +struct mlx5e_lro_param mlx5e_get_lro_param(struct mlx5e_params *params); /* Build queue parameters */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c index efef4adce086..ee688dec67a9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c @@ -326,13 +326,14 @@ static int mlx5e_ptp_open_txqsqs(struct mlx5e_ptp *c, struct mlx5e_ptp_params *cparams) { struct mlx5e_params *params = &cparams->params; + u8 num_tc = mlx5e_get_dcb_num_tc(params); int ix_base; int err; int tc; - ix_base = params->num_tc * params->num_channels; + ix_base = num_tc * params->num_channels; - for (tc = 0; tc < params->num_tc; tc++) { + for (tc = 0; tc < num_tc; tc++) { int txq_ix = ix_base + tc; err = mlx5e_ptp_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix, @@ -365,9 +366,12 @@ static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c, struct mlx5e_create_cq_param ccp = {}; struct dim_cq_moder ptp_moder = {}; struct mlx5e_cq_param *cq_param; + u8 num_tc; int err; int tc; + num_tc = mlx5e_get_dcb_num_tc(params); + ccp.node = dev_to_node(mlx5_core_dma_dev(c->mdev)); ccp.ch_stats = c->stats; ccp.napi = &c->napi; @@ -375,7 +379,7 @@ static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c, cq_param = &cparams->txq_sq_param.cqp; - for (tc = 0; tc < params->num_tc; tc++) { + for (tc = 0; tc < num_tc; tc++) { struct mlx5e_cq *cq = &c->ptpsq[tc].txqsq.cq; err = mlx5e_open_cq(c->priv, ptp_moder, cq_param, &ccp, cq); @@ -383,7 +387,7 @@ static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c, goto out_err_txqsq_cq; } - for (tc = 0; tc < params->num_tc; tc++) { + for (tc = 0; tc < num_tc; tc++) { struct mlx5e_cq *cq = &c->ptpsq[tc].ts_cq; struct mlx5e_ptpsq *ptpsq = &c->ptpsq[tc]; @@ -399,7 +403,7 @@ static int mlx5e_ptp_open_tx_cqs(struct mlx5e_ptp *c, out_err_ts_cq: for (--tc; tc >= 0; tc--) mlx5e_close_cq(&c->ptpsq[tc].ts_cq); - tc = params->num_tc; + tc = num_tc; out_err_txqsq_cq: for (--tc; tc >= 0; tc--) mlx5e_close_cq(&c->ptpsq[tc].txqsq.cq); @@ -475,7 +479,7 @@ static void mlx5e_ptp_build_params(struct mlx5e_ptp *c, params->num_channels = orig->num_channels; params->hard_mtu = orig->hard_mtu; params->sw_mtu = orig->sw_mtu; - params->num_tc = orig->num_tc; + params->mqprio = orig->mqprio; /* SQ */ if (test_bit(MLX5E_PTP_STATE_TX, c->state)) { @@ -605,9 +609,9 @@ static void mlx5e_ptp_rx_unset_fs(struct mlx5e_priv *priv) static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv) { + u32 tirn = mlx5e_rx_res_get_tirn_ptp(priv->rx_res); struct mlx5e_ptp_fs *ptp_fs = priv->fs.ptp_fs; struct mlx5_flow_handle *rule; - u32 tirn = priv->ptp_tir.tirn; int err; if (ptp_fs->valid) @@ -617,7 +621,7 @@ static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv) if (err) goto out_free; - rule = mlx5e_fs_tt_redirect_udp_add_rule(priv, MLX5E_TT_IPV4_UDP, + rule = mlx5e_fs_tt_redirect_udp_add_rule(priv, MLX5_TT_IPV4_UDP, tirn, PTP_EV_PORT); if (IS_ERR(rule)) { err = PTR_ERR(rule); @@ -625,7 +629,7 @@ static int mlx5e_ptp_rx_set_fs(struct mlx5e_priv *priv) } ptp_fs->udp_v4_rule = rule; - rule = mlx5e_fs_tt_redirect_udp_add_rule(priv, MLX5E_TT_IPV6_UDP, + rule = mlx5e_fs_tt_redirect_udp_add_rule(priv, MLX5_TT_IPV6_UDP, tirn, PTP_EV_PORT); if (IS_ERR(rule)) { err = PTR_ERR(rule); @@ -680,7 +684,7 @@ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params, c->pdev = mlx5_core_dma_dev(priv->mdev); c->netdev = priv->netdev; c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey.key); - c->num_tc = params->num_tc; + c->num_tc = mlx5e_get_dcb_num_tc(params); c->stats = &priv->ptp_stats.ch; c->lag_port = lag_port; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c index 5efe3278b0f6..e8a8d78e3e4d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c @@ -132,7 +132,7 @@ static u16 mlx5e_qid_from_qos(struct mlx5e_channels *chs, u16 qid) */ bool is_ptp = MLX5E_GET_PFLAG(&chs->params, MLX5E_PFLAG_TX_PORT_TS); - return (chs->params.num_channels + is_ptp) * chs->params.num_tc + qid; + return (chs->params.num_channels + is_ptp) * mlx5e_get_dcb_num_tc(&chs->params) + qid; } int mlx5e_get_txq_by_classid(struct mlx5e_priv *priv, u16 classid) @@ -733,8 +733,8 @@ static void mlx5e_reset_qdisc(struct net_device *dev, u16 qid) spin_unlock_bh(qdisc_lock(qdisc)); } -int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 classid, u16 *old_qid, - u16 *new_qid, struct netlink_ext_ack *extack) +int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 *classid, + struct netlink_ext_ack *extack) { struct mlx5e_qos_node *node; struct netdev_queue *txq; @@ -742,11 +742,9 @@ int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 classid, u16 *old_qid, bool opened; int err; - qos_dbg(priv->mdev, "TC_HTB_LEAF_DEL classid %04x\n", classid); - - *old_qid = *new_qid = 0; + qos_dbg(priv->mdev, "TC_HTB_LEAF_DEL classid %04x\n", *classid); - node = mlx5e_sw_node_find(priv, classid); + node = mlx5e_sw_node_find(priv, *classid); if (!node) return -ENOENT; @@ -764,7 +762,7 @@ int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 classid, u16 *old_qid, err = mlx5_qos_destroy_node(priv->mdev, node->hw_id); if (err) /* Not fatal. */ qos_warn(priv->mdev, "Failed to destroy leaf node %u (class %04x), err = %d\n", - node->hw_id, classid, err); + node->hw_id, *classid, err); mlx5e_sw_node_delete(priv, node); @@ -826,8 +824,7 @@ int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 classid, u16 *old_qid, if (opened) mlx5e_reactivate_qos_sq(priv, moved_qid, txq); - *old_qid = mlx5e_qid_from_qos(&priv->channels, moved_qid); - *new_qid = mlx5e_qid_from_qos(&priv->channels, qid); + *classid = node->classid; return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h index 5af7991fcd19..757682b7c0e0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.h @@ -34,8 +34,8 @@ int mlx5e_htb_leaf_alloc_queue(struct mlx5e_priv *priv, u16 classid, struct netlink_ext_ack *extack); int mlx5e_htb_leaf_to_inner(struct mlx5e_priv *priv, u16 classid, u16 child_classid, u64 rate, u64 ceil, struct netlink_ext_ack *extack); -int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 classid, u16 *old_qid, - u16 *new_qid, struct netlink_ext_ack *extack); +int mlx5e_htb_leaf_del(struct mlx5e_priv *priv, u16 *classid, + struct netlink_ext_ack *extack); int mlx5e_htb_leaf_del_last(struct mlx5e_priv *priv, u16 classid, bool force, struct netlink_ext_ack *extack); int mlx5e_htb_node_modify(struct mlx5e_priv *priv, u16 classid, u64 rate, u64 ceil, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c index 3c0032c9647c..0c38c2e319be 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c @@ -15,9 +15,116 @@ struct mlx5_bridge_switchdev_fdb_work { struct work_struct work; struct switchdev_notifier_fdb_info fdb_info; struct net_device *dev; + struct mlx5_esw_bridge_offloads *br_offloads; bool add; }; +static bool mlx5_esw_bridge_dev_same_esw(struct net_device *dev, struct mlx5_eswitch *esw) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + + return esw == priv->mdev->priv.eswitch; +} + +static bool mlx5_esw_bridge_dev_same_hw(struct net_device *dev, struct mlx5_eswitch *esw) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5_core_dev *mdev, *esw_mdev; + u64 system_guid, esw_system_guid; + + mdev = priv->mdev; + esw_mdev = esw->dev; + + system_guid = mlx5_query_nic_system_image_guid(mdev); + esw_system_guid = mlx5_query_nic_system_image_guid(esw_mdev); + + return system_guid == esw_system_guid; +} + +static struct net_device * +mlx5_esw_bridge_lag_rep_get(struct net_device *dev, struct mlx5_eswitch *esw) +{ + struct net_device *lower; + struct list_head *iter; + + netdev_for_each_lower_dev(dev, lower, iter) { + struct mlx5_core_dev *mdev; + struct mlx5e_priv *priv; + + if (!mlx5e_eswitch_rep(lower)) + continue; + + priv = netdev_priv(lower); + mdev = priv->mdev; + if (mlx5_lag_is_shared_fdb(mdev) && mlx5_esw_bridge_dev_same_esw(lower, esw)) + return lower; + } + + return NULL; +} + +static struct net_device * +mlx5_esw_bridge_rep_vport_num_vhca_id_get(struct net_device *dev, struct mlx5_eswitch *esw, + u16 *vport_num, u16 *esw_owner_vhca_id) +{ + struct mlx5e_rep_priv *rpriv; + struct mlx5e_priv *priv; + + if (netif_is_lag_master(dev)) + dev = mlx5_esw_bridge_lag_rep_get(dev, esw); + + if (!dev || !mlx5e_eswitch_rep(dev) || !mlx5_esw_bridge_dev_same_hw(dev, esw)) + return NULL; + + priv = netdev_priv(dev); + rpriv = priv->ppriv; + *vport_num = rpriv->rep->vport; + *esw_owner_vhca_id = MLX5_CAP_GEN(priv->mdev, vhca_id); + return dev; +} + +static struct net_device * +mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(struct net_device *dev, struct mlx5_eswitch *esw, + u16 *vport_num, u16 *esw_owner_vhca_id) +{ + struct net_device *lower_dev; + struct list_head *iter; + + if (netif_is_lag_master(dev) || mlx5e_eswitch_rep(dev)) + return mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, esw, vport_num, + esw_owner_vhca_id); + + netdev_for_each_lower_dev(dev, lower_dev, iter) { + struct net_device *rep; + + if (netif_is_bridge_master(lower_dev)) + continue; + + rep = mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(lower_dev, esw, vport_num, + esw_owner_vhca_id); + if (rep) + return rep; + } + + return NULL; +} + +static bool mlx5_esw_bridge_is_local(struct net_device *dev, struct net_device *rep, + struct mlx5_eswitch *esw) +{ + struct mlx5_core_dev *mdev; + struct mlx5e_priv *priv; + + if (!mlx5_esw_bridge_dev_same_esw(rep, esw)) + return false; + + priv = netdev_priv(rep); + mdev = priv->mdev; + if (netif_is_lag_master(dev)) + return mlx5_lag_is_shared_fdb(mdev) && mlx5_lag_is_master(mdev); + return true; +} + static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr) { struct mlx5_esw_bridge_offloads *br_offloads = container_of(nb, @@ -25,37 +132,36 @@ static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr netdev_nb); struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct netdev_notifier_changeupper_info *info = ptr; + struct net_device *upper = info->upper_dev, *rep; + struct mlx5_eswitch *esw = br_offloads->esw; + u16 vport_num, esw_owner_vhca_id; struct netlink_ext_ack *extack; - struct mlx5e_rep_priv *rpriv; - struct mlx5_eswitch *esw; - struct mlx5_vport *vport; - struct net_device *upper; - struct mlx5e_priv *priv; - u16 vport_num; - - if (!mlx5e_eswitch_rep(dev)) - return 0; + int ifindex = upper->ifindex; + int err; - upper = info->upper_dev; if (!netif_is_bridge_master(upper)) return 0; - esw = br_offloads->esw; - priv = netdev_priv(dev); - if (esw != priv->mdev->priv.eswitch) + rep = mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, esw, &vport_num, &esw_owner_vhca_id); + if (!rep) return 0; - rpriv = priv->ppriv; - vport_num = rpriv->rep->vport; - vport = mlx5_eswitch_get_vport(esw, vport_num); - if (IS_ERR(vport)) - return PTR_ERR(vport); - extack = netdev_notifier_info_to_extack(&info->info); - return info->linking ? - mlx5_esw_bridge_vport_link(upper->ifindex, br_offloads, vport, extack) : - mlx5_esw_bridge_vport_unlink(upper->ifindex, br_offloads, vport, extack); + if (mlx5_esw_bridge_is_local(dev, rep, esw)) + err = info->linking ? + mlx5_esw_bridge_vport_link(ifindex, vport_num, esw_owner_vhca_id, + br_offloads, extack) : + mlx5_esw_bridge_vport_unlink(ifindex, vport_num, esw_owner_vhca_id, + br_offloads, extack); + else if (mlx5_esw_bridge_dev_same_hw(rep, esw)) + err = info->linking ? + mlx5_esw_bridge_vport_peer_link(ifindex, vport_num, esw_owner_vhca_id, + br_offloads, extack) : + mlx5_esw_bridge_vport_peer_unlink(ifindex, vport_num, esw_owner_vhca_id, + br_offloads, extack); + + return err; } static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb, @@ -75,31 +181,28 @@ static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb, return notifier_from_errno(err); } -static int mlx5_esw_bridge_port_obj_add(struct net_device *dev, - const void *ctx, - const struct switchdev_obj *obj, - struct netlink_ext_ack *extack) +static int +mlx5_esw_bridge_port_obj_add(struct net_device *dev, + struct switchdev_notifier_port_obj_info *port_obj_info, + struct mlx5_esw_bridge_offloads *br_offloads) { + struct netlink_ext_ack *extack = switchdev_notifier_info_to_extack(&port_obj_info->info); + const struct switchdev_obj *obj = port_obj_info->obj; const struct switchdev_obj_port_vlan *vlan; - struct mlx5e_rep_priv *rpriv; - struct mlx5_eswitch *esw; - struct mlx5_vport *vport; - struct mlx5e_priv *priv; - u16 vport_num; - int err = 0; + u16 vport_num, esw_owner_vhca_id; + int err; - priv = netdev_priv(dev); - rpriv = priv->ppriv; - vport_num = rpriv->rep->vport; - esw = priv->mdev->priv.eswitch; - vport = mlx5_eswitch_get_vport(esw, vport_num); - if (IS_ERR(vport)) - return PTR_ERR(vport); + if (!mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num, + &esw_owner_vhca_id)) + return 0; + + port_obj_info->handled = true; switch (obj->id) { case SWITCHDEV_OBJ_ID_PORT_VLAN: vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); - err = mlx5_esw_bridge_port_vlan_add(vlan->vid, vlan->flags, esw, vport, extack); + err = mlx5_esw_bridge_port_vlan_add(vport_num, esw_owner_vhca_id, vlan->vid, + vlan->flags, br_offloads, extack); break; default: return -EOPNOTSUPP; @@ -107,29 +210,25 @@ static int mlx5_esw_bridge_port_obj_add(struct net_device *dev, return err; } -static int mlx5_esw_bridge_port_obj_del(struct net_device *dev, - const void *ctx, - const struct switchdev_obj *obj) +static int +mlx5_esw_bridge_port_obj_del(struct net_device *dev, + struct switchdev_notifier_port_obj_info *port_obj_info, + struct mlx5_esw_bridge_offloads *br_offloads) { + const struct switchdev_obj *obj = port_obj_info->obj; const struct switchdev_obj_port_vlan *vlan; - struct mlx5e_rep_priv *rpriv; - struct mlx5_eswitch *esw; - struct mlx5_vport *vport; - struct mlx5e_priv *priv; - u16 vport_num; + u16 vport_num, esw_owner_vhca_id; - priv = netdev_priv(dev); - rpriv = priv->ppriv; - vport_num = rpriv->rep->vport; - esw = priv->mdev->priv.eswitch; - vport = mlx5_eswitch_get_vport(esw, vport_num); - if (IS_ERR(vport)) - return PTR_ERR(vport); + if (!mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num, + &esw_owner_vhca_id)) + return 0; + + port_obj_info->handled = true; switch (obj->id) { case SWITCHDEV_OBJ_ID_PORT_VLAN: vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); - mlx5_esw_bridge_port_vlan_del(vlan->vid, esw, vport); + mlx5_esw_bridge_port_vlan_del(vport_num, esw_owner_vhca_id, vlan->vid, br_offloads); break; default: return -EOPNOTSUPP; @@ -137,25 +236,21 @@ static int mlx5_esw_bridge_port_obj_del(struct net_device *dev, return 0; } -static int mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev, - const void *ctx, - const struct switchdev_attr *attr, - struct netlink_ext_ack *extack) +static int +mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev, + struct switchdev_notifier_port_attr_info *port_attr_info, + struct mlx5_esw_bridge_offloads *br_offloads) { - struct mlx5e_rep_priv *rpriv; - struct mlx5_eswitch *esw; - struct mlx5_vport *vport; - struct mlx5e_priv *priv; - u16 vport_num; - int err = 0; + struct netlink_ext_ack *extack = switchdev_notifier_info_to_extack(&port_attr_info->info); + const struct switchdev_attr *attr = port_attr_info->attr; + u16 vport_num, esw_owner_vhca_id; + int err; - priv = netdev_priv(dev); - rpriv = priv->ppriv; - vport_num = rpriv->rep->vport; - esw = priv->mdev->priv.eswitch; - vport = mlx5_eswitch_get_vport(esw, vport_num); - if (IS_ERR(vport)) - return PTR_ERR(vport); + if (!mlx5_esw_bridge_lower_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num, + &esw_owner_vhca_id)) + return 0; + + port_attr_info->handled = true; switch (attr->id) { case SWITCHDEV_ATTR_ID_PORT_PRE_BRIDGE_FLAGS: @@ -167,10 +262,12 @@ static int mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev, case SWITCHDEV_ATTR_ID_PORT_BRIDGE_FLAGS: break; case SWITCHDEV_ATTR_ID_BRIDGE_AGEING_TIME: - err = mlx5_esw_bridge_ageing_time_set(attr->u.ageing_time, esw, vport); + err = mlx5_esw_bridge_ageing_time_set(vport_num, esw_owner_vhca_id, + attr->u.ageing_time, br_offloads); break; case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: - err = mlx5_esw_bridge_vlan_filtering_set(attr->u.vlan_filtering, esw, vport); + err = mlx5_esw_bridge_vlan_filtering_set(vport_num, esw_owner_vhca_id, + attr->u.vlan_filtering, br_offloads); break; default: err = -EOPNOTSUPP; @@ -179,27 +276,24 @@ static int mlx5_esw_bridge_port_obj_attr_set(struct net_device *dev, return err; } -static int mlx5_esw_bridge_event_blocking(struct notifier_block *unused, +static int mlx5_esw_bridge_event_blocking(struct notifier_block *nb, unsigned long event, void *ptr) { + struct mlx5_esw_bridge_offloads *br_offloads = container_of(nb, + struct mlx5_esw_bridge_offloads, + nb_blk); struct net_device *dev = switchdev_notifier_info_to_dev(ptr); int err; switch (event) { case SWITCHDEV_PORT_OBJ_ADD: - err = switchdev_handle_port_obj_add(dev, ptr, - mlx5e_eswitch_rep, - mlx5_esw_bridge_port_obj_add); + err = mlx5_esw_bridge_port_obj_add(dev, ptr, br_offloads); break; case SWITCHDEV_PORT_OBJ_DEL: - err = switchdev_handle_port_obj_del(dev, ptr, - mlx5e_eswitch_rep, - mlx5_esw_bridge_port_obj_del); + err = mlx5_esw_bridge_port_obj_del(dev, ptr, br_offloads); break; case SWITCHDEV_PORT_ATTR_SET: - err = switchdev_handle_port_attr_set(dev, ptr, - mlx5e_eswitch_rep, - mlx5_esw_bridge_port_obj_attr_set); + err = mlx5_esw_bridge_port_obj_attr_set(dev, ptr, br_offloads); break; default: err = 0; @@ -222,27 +316,23 @@ static void mlx5_esw_bridge_switchdev_fdb_event_work(struct work_struct *work) container_of(work, struct mlx5_bridge_switchdev_fdb_work, work); struct switchdev_notifier_fdb_info *fdb_info = &fdb_work->fdb_info; + struct mlx5_esw_bridge_offloads *br_offloads = + fdb_work->br_offloads; struct net_device *dev = fdb_work->dev; - struct mlx5e_rep_priv *rpriv; - struct mlx5_eswitch *esw; - struct mlx5_vport *vport; - struct mlx5e_priv *priv; - u16 vport_num; + u16 vport_num, esw_owner_vhca_id; rtnl_lock(); - priv = netdev_priv(dev); - rpriv = priv->ppriv; - vport_num = rpriv->rep->vport; - esw = priv->mdev->priv.eswitch; - vport = mlx5_eswitch_get_vport(esw, vport_num); - if (IS_ERR(vport)) + if (!mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, br_offloads->esw, &vport_num, + &esw_owner_vhca_id)) goto out; if (fdb_work->add) - mlx5_esw_bridge_fdb_create(dev, esw, vport, fdb_info); + mlx5_esw_bridge_fdb_create(dev, vport_num, esw_owner_vhca_id, br_offloads, + fdb_info); else - mlx5_esw_bridge_fdb_remove(dev, esw, vport, fdb_info); + mlx5_esw_bridge_fdb_remove(dev, vport_num, esw_owner_vhca_id, br_offloads, + fdb_info); out: rtnl_unlock(); @@ -251,7 +341,8 @@ out: static struct mlx5_bridge_switchdev_fdb_work * mlx5_esw_bridge_init_switchdev_fdb_work(struct net_device *dev, bool add, - struct switchdev_notifier_fdb_info *fdb_info) + struct switchdev_notifier_fdb_info *fdb_info, + struct mlx5_esw_bridge_offloads *br_offloads) { struct mlx5_bridge_switchdev_fdb_work *work; u8 *addr; @@ -273,6 +364,7 @@ mlx5_esw_bridge_init_switchdev_fdb_work(struct net_device *dev, bool add, dev_hold(dev); work->dev = dev; + work->br_offloads = br_offloads; work->add = add; return work; } @@ -286,20 +378,14 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb, struct net_device *dev = switchdev_notifier_info_to_dev(ptr); struct switchdev_notifier_fdb_info *fdb_info; struct mlx5_bridge_switchdev_fdb_work *work; + struct mlx5_eswitch *esw = br_offloads->esw; struct switchdev_notifier_info *info = ptr; - struct net_device *upper; - struct mlx5e_priv *priv; - - if (!mlx5e_eswitch_rep(dev)) - return NOTIFY_DONE; - priv = netdev_priv(dev); - if (priv->mdev->priv.eswitch != br_offloads->esw) - return NOTIFY_DONE; + u16 vport_num, esw_owner_vhca_id; + struct net_device *upper, *rep; if (event == SWITCHDEV_PORT_ATTR_SET) { - int err = switchdev_handle_port_attr_set(dev, ptr, - mlx5e_eswitch_rep, - mlx5_esw_bridge_port_obj_attr_set); + int err = mlx5_esw_bridge_port_obj_attr_set(dev, ptr, br_offloads); + return notifier_from_errno(err); } @@ -309,7 +395,27 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb, if (!netif_is_bridge_master(upper)) return NOTIFY_DONE; + rep = mlx5_esw_bridge_rep_vport_num_vhca_id_get(dev, esw, &vport_num, &esw_owner_vhca_id); + if (!rep) + return NOTIFY_DONE; + switch (event) { + case SWITCHDEV_FDB_ADD_TO_BRIDGE: + /* only handle the event on native eswtich of representor */ + if (!mlx5_esw_bridge_is_local(dev, rep, esw)) + break; + + fdb_info = container_of(info, + struct switchdev_notifier_fdb_info, + info); + mlx5_esw_bridge_fdb_update_used(dev, vport_num, esw_owner_vhca_id, br_offloads, + fdb_info); + break; + case SWITCHDEV_FDB_DEL_TO_BRIDGE: + /* only handle the event on peers */ + if (mlx5_esw_bridge_is_local(dev, rep, esw)) + break; + fallthrough; case SWITCHDEV_FDB_ADD_TO_DEVICE: case SWITCHDEV_FDB_DEL_TO_DEVICE: fdb_info = container_of(info, @@ -318,7 +424,8 @@ static int mlx5_esw_bridge_switchdev_event(struct notifier_block *nb, work = mlx5_esw_bridge_init_switchdev_fdb_work(dev, event == SWITCHDEV_FDB_ADD_TO_DEVICE, - fdb_info); + fdb_info, + br_offloads); if (IS_ERR(work)) { WARN_ONCE(1, "Failed to init switchdev work, err=%ld", PTR_ERR(work)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c index 059799e4f483..51a4d80f7fa3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c @@ -17,7 +17,7 @@ #include "en/mapping.h" #include "en/tc_tun.h" #include "lib/port_tun.h" -#include "esw/sample.h" +#include "en/tc/sample.h" struct mlx5e_rep_indr_block_priv { struct net_device *netdev; @@ -516,7 +516,6 @@ void mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv *rpriv) mlx5e_rep_indr_block_unbind); } -#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb, struct mlx5e_tc_update_priv *tc_priv, u32 tunnel_id) @@ -609,12 +608,13 @@ static bool mlx5e_restore_tunnel(struct mlx5e_priv *priv, struct sk_buff *skb, return true; } -static bool mlx5e_restore_skb(struct sk_buff *skb, u32 chain, u32 reg_c1, - struct mlx5e_tc_update_priv *tc_priv) +static bool mlx5e_restore_skb_chain(struct sk_buff *skb, u32 chain, u32 reg_c1, + struct mlx5e_tc_update_priv *tc_priv) { struct mlx5e_priv *priv = netdev_priv(skb->dev); u32 tunnel_id = (reg_c1 >> ESW_TUN_OFFSET) & TUNNEL_ID_MASK; +#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) if (chain) { struct mlx5_rep_uplink_priv *uplink_priv; struct mlx5e_rep_priv *uplink_rpriv; @@ -636,9 +636,25 @@ static bool mlx5e_restore_skb(struct sk_buff *skb, u32 chain, u32 reg_c1, zone_restore_id)) return false; } +#endif /* CONFIG_NET_TC_SKB_EXT */ + return mlx5e_restore_tunnel(priv, skb, tc_priv, tunnel_id); } -#endif /* CONFIG_NET_TC_SKB_EXT */ + +static void mlx5e_restore_skb_sample(struct mlx5e_priv *priv, struct sk_buff *skb, + struct mlx5_mapped_obj *mapped_obj, + struct mlx5e_tc_update_priv *tc_priv) +{ + if (!mlx5e_restore_tunnel(priv, skb, tc_priv, mapped_obj->sample.tunnel_id)) { + netdev_dbg(priv->netdev, + "Failed to restore tunnel info for sampled packet\n"); + return; + } +#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) + mlx5e_tc_sample_skb(skb, mapped_obj); +#endif /* CONFIG_MLX5_TC_SAMPLE */ + mlx5_rep_tc_post_napi_receive(tc_priv); +} bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe, struct sk_buff *skb, @@ -647,7 +663,7 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe, struct mlx5_mapped_obj mapped_obj; struct mlx5_eswitch *esw; struct mlx5e_priv *priv; - u32 reg_c0, reg_c1; + u32 reg_c0; int err; reg_c0 = (be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK); @@ -659,8 +675,6 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe, */ skb->mark = 0; - reg_c1 = be32_to_cpu(cqe->ft_metadata); - priv = netdev_priv(skb->dev); esw = priv->mdev->priv.eswitch; err = mapping_find(esw->offloads.reg_c0_obj_pool, reg_c0, &mapped_obj); @@ -671,18 +685,14 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe, return false; } -#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT) - if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) - return mlx5e_restore_skb(skb, mapped_obj.chain, reg_c1, tc_priv); -#endif /* CONFIG_NET_TC_SKB_EXT */ -#if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) - if (mapped_obj.type == MLX5_MAPPED_OBJ_SAMPLE) { - mlx5_esw_sample_skb(skb, &mapped_obj); + if (mapped_obj.type == MLX5_MAPPED_OBJ_CHAIN) { + u32 reg_c1 = be32_to_cpu(cqe->ft_metadata); + + return mlx5e_restore_skb_chain(skb, mapped_obj.chain, reg_c1, tc_priv); + } else if (mapped_obj.type == MLX5_MAPPED_OBJ_SAMPLE) { + mlx5e_restore_skb_sample(priv, skb, &mapped_obj, tc_priv); return false; - } -#endif /* CONFIG_MLX5_TC_SAMPLE */ - if (mapped_obj.type != MLX5_MAPPED_OBJ_SAMPLE && - mapped_obj.type != MLX5_MAPPED_OBJ_CHAIN) { + } else { netdev_dbg(priv->netdev, "Invalid mapped object type: %d\n", mapped_obj.type); return false; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c index 9d361efd5ff7..bb682fd751c9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/reporter_tx.c @@ -372,7 +372,7 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter, for (i = 0; i < priv->channels.num; i++) { struct mlx5e_channel *c = priv->channels.c[i]; - for (tc = 0; tc < priv->channels.params.num_tc; tc++) { + for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) { struct mlx5e_txqsq *sq = &c->sq[tc]; err = mlx5e_tx_reporter_build_diagnose_output(fmsg, sq, tc); @@ -384,7 +384,7 @@ static int mlx5e_tx_reporter_diagnose(struct devlink_health_reporter *reporter, if (!ptp_ch || !test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state)) goto close_sqs_nest; - for (tc = 0; tc < priv->channels.params.num_tc; tc++) { + for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) { err = mlx5e_tx_reporter_build_diagnose_output_ptpsq(fmsg, &ptp_ch->ptpsq[tc], tc); @@ -494,7 +494,7 @@ static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv, for (i = 0; i < priv->channels.num; i++) { struct mlx5e_channel *c = priv->channels.c[i]; - for (tc = 0; tc < priv->channels.params.num_tc; tc++) { + for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) { struct mlx5e_txqsq *sq = &c->sq[tc]; err = mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "SQ"); @@ -504,7 +504,7 @@ static int mlx5e_tx_reporter_dump_all_sqs(struct mlx5e_priv *priv, } if (ptp_ch && test_bit(MLX5E_PTP_STATE_TX, ptp_ch->state)) { - for (tc = 0; tc < priv->channels.params.num_tc; tc++) { + for (tc = 0; tc < mlx5e_get_dcb_num_tc(&priv->channels.params); tc++) { struct mlx5e_txqsq *sq = &ptp_ch->ptpsq[tc].txqsq; err = mlx5e_health_queue_dump(priv, fmsg, sq->sqn, "PTP SQ"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c new file mode 100644 index 000000000000..b915fb29dd2c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.c @@ -0,0 +1,170 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */ + +#include "rqt.h" +#include <linux/mlx5/transobj.h> + +void mlx5e_rss_params_indir_init_uniform(struct mlx5e_rss_params_indir *indir, + unsigned int num_channels) +{ + unsigned int i; + + for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) + indir->table[i] = i % num_channels; +} + +static int mlx5e_rqt_init(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev, + u16 max_size, u32 *init_rqns, u16 init_size) +{ + void *rqtc; + int inlen; + int err; + u32 *in; + int i; + + rqt->mdev = mdev; + rqt->size = max_size; + + inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * init_size; + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); + + MLX5_SET(rqtc, rqtc, rqt_max_size, rqt->size); + + MLX5_SET(rqtc, rqtc, rqt_actual_size, init_size); + for (i = 0; i < init_size; i++) + MLX5_SET(rqtc, rqtc, rq_num[i], init_rqns[i]); + + err = mlx5_core_create_rqt(rqt->mdev, in, inlen, &rqt->rqtn); + + kvfree(in); + return err; +} + +int mlx5e_rqt_init_direct(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev, + bool indir_enabled, u32 init_rqn) +{ + u16 max_size = indir_enabled ? MLX5E_INDIR_RQT_SIZE : 1; + + return mlx5e_rqt_init(rqt, mdev, max_size, &init_rqn, 1); +} + +static int mlx5e_bits_invert(unsigned long a, int size) +{ + int inv = 0; + int i; + + for (i = 0; i < size; i++) + inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i; + + return inv; +} + +static int mlx5e_calc_indir_rqns(u32 *rss_rqns, u32 *rqns, unsigned int num_rqns, + u8 hfunc, struct mlx5e_rss_params_indir *indir) +{ + unsigned int i; + + for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) { + unsigned int ix = i; + + if (hfunc == ETH_RSS_HASH_XOR) + ix = mlx5e_bits_invert(ix, ilog2(MLX5E_INDIR_RQT_SIZE)); + + ix = indir->table[ix]; + + if (WARN_ON(ix >= num_rqns)) + /* Could be a bug in the driver or in the kernel part of + * ethtool: indir table refers to non-existent RQs. + */ + return -EINVAL; + rss_rqns[i] = rqns[ix]; + } + + return 0; +} + +int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev, + u32 *rqns, unsigned int num_rqns, + u8 hfunc, struct mlx5e_rss_params_indir *indir) +{ + u32 *rss_rqns; + int err; + + rss_rqns = kvmalloc_array(MLX5E_INDIR_RQT_SIZE, sizeof(*rss_rqns), GFP_KERNEL); + if (!rss_rqns) + return -ENOMEM; + + err = mlx5e_calc_indir_rqns(rss_rqns, rqns, num_rqns, hfunc, indir); + if (err) + goto out; + + err = mlx5e_rqt_init(rqt, mdev, MLX5E_INDIR_RQT_SIZE, rss_rqns, MLX5E_INDIR_RQT_SIZE); + +out: + kvfree(rss_rqns); + return err; +} + +void mlx5e_rqt_destroy(struct mlx5e_rqt *rqt) +{ + mlx5_core_destroy_rqt(rqt->mdev, rqt->rqtn); +} + +static int mlx5e_rqt_redirect(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int size) +{ + unsigned int i; + void *rqtc; + int inlen; + u32 *in; + int err; + + inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * size; + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx); + + MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1); + MLX5_SET(rqtc, rqtc, rqt_actual_size, size); + for (i = 0; i < size; i++) + MLX5_SET(rqtc, rqtc, rq_num[i], rqns[i]); + + err = mlx5_core_modify_rqt(rqt->mdev, rqt->rqtn, in, inlen); + + kvfree(in); + return err; +} + +int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn) +{ + return mlx5e_rqt_redirect(rqt, &rqn, 1); +} + +int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_rqns, + u8 hfunc, struct mlx5e_rss_params_indir *indir) +{ + u32 *rss_rqns; + int err; + + if (WARN_ON(rqt->size != MLX5E_INDIR_RQT_SIZE)) + return -EINVAL; + + rss_rqns = kvmalloc_array(MLX5E_INDIR_RQT_SIZE, sizeof(*rss_rqns), GFP_KERNEL); + if (!rss_rqns) + return -ENOMEM; + + err = mlx5e_calc_indir_rqns(rss_rqns, rqns, num_rqns, hfunc, indir); + if (err) + goto out; + + err = mlx5e_rqt_redirect(rqt, rss_rqns, MLX5E_INDIR_RQT_SIZE); + +out: + kvfree(rss_rqns); + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h new file mode 100644 index 000000000000..60c985a12f24 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rqt.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */ + +#ifndef __MLX5_EN_RQT_H__ +#define __MLX5_EN_RQT_H__ + +#include <linux/kernel.h> + +#define MLX5E_INDIR_RQT_SIZE (1 << 8) + +struct mlx5_core_dev; + +struct mlx5e_rss_params_indir { + u32 table[MLX5E_INDIR_RQT_SIZE]; +}; + +void mlx5e_rss_params_indir_init_uniform(struct mlx5e_rss_params_indir *indir, + unsigned int num_channels); + +struct mlx5e_rqt { + struct mlx5_core_dev *mdev; + u32 rqtn; + u16 size; +}; + +int mlx5e_rqt_init_direct(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev, + bool indir_enabled, u32 init_rqn); +int mlx5e_rqt_init_indir(struct mlx5e_rqt *rqt, struct mlx5_core_dev *mdev, + u32 *rqns, unsigned int num_rqns, + u8 hfunc, struct mlx5e_rss_params_indir *indir); +void mlx5e_rqt_destroy(struct mlx5e_rqt *rqt); + +static inline u32 mlx5e_rqt_get_rqtn(struct mlx5e_rqt *rqt) +{ + return rqt->rqtn; +} + +int mlx5e_rqt_redirect_direct(struct mlx5e_rqt *rqt, u32 rqn); +int mlx5e_rqt_redirect_indir(struct mlx5e_rqt *rqt, u32 *rqns, unsigned int num_rqns, + u8 hfunc, struct mlx5e_rss_params_indir *indir); + +#endif /* __MLX5_EN_RQT_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c new file mode 100644 index 000000000000..625cd49ef96c --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.c @@ -0,0 +1,588 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. + +#include "rss.h" + +#define mlx5e_rss_warn(__dev, format, ...) \ + dev_warn((__dev)->device, "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, \ + ##__VA_ARGS__) + +static const struct mlx5e_rss_params_traffic_type rss_default_config[MLX5E_NUM_INDIR_TIRS] = { + [MLX5_TT_IPV4_TCP] = { + .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, + .l4_prot_type = MLX5_L4_PROT_TYPE_TCP, + .rx_hash_fields = MLX5_HASH_IP_L4PORTS, + }, + [MLX5_TT_IPV6_TCP] = { + .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, + .l4_prot_type = MLX5_L4_PROT_TYPE_TCP, + .rx_hash_fields = MLX5_HASH_IP_L4PORTS, + }, + [MLX5_TT_IPV4_UDP] = { + .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, + .l4_prot_type = MLX5_L4_PROT_TYPE_UDP, + .rx_hash_fields = MLX5_HASH_IP_L4PORTS, + }, + [MLX5_TT_IPV6_UDP] = { + .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, + .l4_prot_type = MLX5_L4_PROT_TYPE_UDP, + .rx_hash_fields = MLX5_HASH_IP_L4PORTS, + }, + [MLX5_TT_IPV4_IPSEC_AH] = { + .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, + .l4_prot_type = 0, + .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI, + }, + [MLX5_TT_IPV6_IPSEC_AH] = { + .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, + .l4_prot_type = 0, + .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI, + }, + [MLX5_TT_IPV4_IPSEC_ESP] = { + .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, + .l4_prot_type = 0, + .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI, + }, + [MLX5_TT_IPV6_IPSEC_ESP] = { + .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, + .l4_prot_type = 0, + .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI, + }, + [MLX5_TT_IPV4] = { + .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, + .l4_prot_type = 0, + .rx_hash_fields = MLX5_HASH_IP, + }, + [MLX5_TT_IPV6] = { + .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, + .l4_prot_type = 0, + .rx_hash_fields = MLX5_HASH_IP, + }, +}; + +struct mlx5e_rss_params_traffic_type +mlx5e_rss_get_default_tt_config(enum mlx5_traffic_types tt) +{ + return rss_default_config[tt]; +} + +struct mlx5e_rss { + struct mlx5e_rss_params_hash hash; + struct mlx5e_rss_params_indir indir; + u32 rx_hash_fields[MLX5E_NUM_INDIR_TIRS]; + struct mlx5e_tir *tir[MLX5E_NUM_INDIR_TIRS]; + struct mlx5e_tir *inner_tir[MLX5E_NUM_INDIR_TIRS]; + struct mlx5e_rqt rqt; + struct mlx5_core_dev *mdev; + u32 drop_rqn; + bool inner_ft_support; + bool enabled; + refcount_t refcnt; +}; + +struct mlx5e_rss *mlx5e_rss_alloc(void) +{ + return kvzalloc(sizeof(struct mlx5e_rss), GFP_KERNEL); +} + +void mlx5e_rss_free(struct mlx5e_rss *rss) +{ + kvfree(rss); +} + +static void mlx5e_rss_params_init(struct mlx5e_rss *rss) +{ + enum mlx5_traffic_types tt; + + rss->hash.hfunc = ETH_RSS_HASH_TOP; + netdev_rss_key_fill(rss->hash.toeplitz_hash_key, + sizeof(rss->hash.toeplitz_hash_key)); + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) + rss->rx_hash_fields[tt] = + mlx5e_rss_get_default_tt_config(tt).rx_hash_fields; +} + +static struct mlx5e_tir **rss_get_tirp(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, + bool inner) +{ + return inner ? &rss->inner_tir[tt] : &rss->tir[tt]; +} + +static struct mlx5e_tir *rss_get_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, + bool inner) +{ + return *rss_get_tirp(rss, tt, inner); +} + +static struct mlx5e_rss_params_traffic_type +mlx5e_rss_get_tt_config(struct mlx5e_rss *rss, enum mlx5_traffic_types tt) +{ + struct mlx5e_rss_params_traffic_type rss_tt; + + rss_tt = mlx5e_rss_get_default_tt_config(tt); + rss_tt.rx_hash_fields = rss->rx_hash_fields[tt]; + return rss_tt; +} + +static int mlx5e_rss_create_tir(struct mlx5e_rss *rss, + enum mlx5_traffic_types tt, + const struct mlx5e_lro_param *init_lro_param, + bool inner) +{ + struct mlx5e_rss_params_traffic_type rss_tt; + struct mlx5e_tir_builder *builder; + struct mlx5e_tir **tir_p; + struct mlx5e_tir *tir; + u32 rqtn; + int err; + + if (inner && !rss->inner_ft_support) { + mlx5e_rss_warn(rss->mdev, + "Cannot create inner indirect TIR[%d], RSS inner FT is not supported.\n", + tt); + return -EINVAL; + } + + tir_p = rss_get_tirp(rss, tt, inner); + if (*tir_p) + return -EINVAL; + + tir = kvzalloc(sizeof(*tir), GFP_KERNEL); + if (!tir) + return -ENOMEM; + + builder = mlx5e_tir_builder_alloc(false); + if (!builder) { + err = -ENOMEM; + goto free_tir; + } + + rqtn = mlx5e_rqt_get_rqtn(&rss->rqt); + mlx5e_tir_builder_build_rqt(builder, rss->mdev->mlx5e_res.hw_objs.td.tdn, + rqtn, rss->inner_ft_support); + mlx5e_tir_builder_build_lro(builder, init_lro_param); + rss_tt = mlx5e_rss_get_tt_config(rss, tt); + mlx5e_tir_builder_build_rss(builder, &rss->hash, &rss_tt, inner); + + err = mlx5e_tir_init(tir, builder, rss->mdev, true); + mlx5e_tir_builder_free(builder); + if (err) { + mlx5e_rss_warn(rss->mdev, "Failed to create %sindirect TIR: err = %d, tt = %d\n", + inner ? "inner " : "", err, tt); + goto free_tir; + } + + *tir_p = tir; + return 0; + +free_tir: + kvfree(tir); + return err; +} + +static void mlx5e_rss_destroy_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, + bool inner) +{ + struct mlx5e_tir **tir_p; + struct mlx5e_tir *tir; + + tir_p = rss_get_tirp(rss, tt, inner); + if (!*tir_p) + return; + + tir = *tir_p; + mlx5e_tir_destroy(tir); + kvfree(tir); + *tir_p = NULL; +} + +static int mlx5e_rss_create_tirs(struct mlx5e_rss *rss, + const struct mlx5e_lro_param *init_lro_param, + bool inner) +{ + enum mlx5_traffic_types tt, max_tt; + int err; + + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { + err = mlx5e_rss_create_tir(rss, tt, init_lro_param, inner); + if (err) + goto err_destroy_tirs; + } + + return 0; + +err_destroy_tirs: + max_tt = tt; + for (tt = 0; tt < max_tt; tt++) + mlx5e_rss_destroy_tir(rss, tt, inner); + return err; +} + +static void mlx5e_rss_destroy_tirs(struct mlx5e_rss *rss, bool inner) +{ + enum mlx5_traffic_types tt; + + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) + mlx5e_rss_destroy_tir(rss, tt, inner); +} + +static int mlx5e_rss_update_tir(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, + bool inner) +{ + struct mlx5e_rss_params_traffic_type rss_tt; + struct mlx5e_tir_builder *builder; + struct mlx5e_tir *tir; + int err; + + tir = rss_get_tir(rss, tt, inner); + if (!tir) + return 0; + + builder = mlx5e_tir_builder_alloc(true); + if (!builder) + return -ENOMEM; + + rss_tt = mlx5e_rss_get_tt_config(rss, tt); + + mlx5e_tir_builder_build_rss(builder, &rss->hash, &rss_tt, inner); + err = mlx5e_tir_modify(tir, builder); + + mlx5e_tir_builder_free(builder); + return err; +} + +static int mlx5e_rss_update_tirs(struct mlx5e_rss *rss) +{ + enum mlx5_traffic_types tt; + int err, retval; + + retval = 0; + + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { + err = mlx5e_rss_update_tir(rss, tt, false); + if (err) { + retval = retval ? : err; + mlx5e_rss_warn(rss->mdev, + "Failed to update RSS hash of indirect TIR for traffic type %d: err = %d\n", + tt, err); + } + + if (!rss->inner_ft_support) + continue; + + err = mlx5e_rss_update_tir(rss, tt, true); + if (err) { + retval = retval ? : err; + mlx5e_rss_warn(rss->mdev, + "Failed to update RSS hash of inner indirect TIR for traffic type %d: err = %d\n", + tt, err); + } + } + return retval; +} + +int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev, + bool inner_ft_support, u32 drop_rqn) +{ + rss->mdev = mdev; + rss->inner_ft_support = inner_ft_support; + rss->drop_rqn = drop_rqn; + + mlx5e_rss_params_init(rss); + refcount_set(&rss->refcnt, 1); + + return mlx5e_rqt_init_direct(&rss->rqt, mdev, true, drop_rqn); +} + +int mlx5e_rss_init(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev, + bool inner_ft_support, u32 drop_rqn, + const struct mlx5e_lro_param *init_lro_param) +{ + int err; + + err = mlx5e_rss_init_no_tirs(rss, mdev, inner_ft_support, drop_rqn); + if (err) + goto err_out; + + err = mlx5e_rss_create_tirs(rss, init_lro_param, false); + if (err) + goto err_destroy_rqt; + + if (inner_ft_support) { + err = mlx5e_rss_create_tirs(rss, init_lro_param, true); + if (err) + goto err_destroy_tirs; + } + + return 0; + +err_destroy_tirs: + mlx5e_rss_destroy_tirs(rss, false); +err_destroy_rqt: + mlx5e_rqt_destroy(&rss->rqt); +err_out: + return err; +} + +int mlx5e_rss_cleanup(struct mlx5e_rss *rss) +{ + if (!refcount_dec_if_one(&rss->refcnt)) + return -EBUSY; + + mlx5e_rss_destroy_tirs(rss, false); + + if (rss->inner_ft_support) + mlx5e_rss_destroy_tirs(rss, true); + + mlx5e_rqt_destroy(&rss->rqt); + + return 0; +} + +void mlx5e_rss_refcnt_inc(struct mlx5e_rss *rss) +{ + refcount_inc(&rss->refcnt); +} + +void mlx5e_rss_refcnt_dec(struct mlx5e_rss *rss) +{ + refcount_dec(&rss->refcnt); +} + +unsigned int mlx5e_rss_refcnt_read(struct mlx5e_rss *rss) +{ + return refcount_read(&rss->refcnt); +} + +u32 mlx5e_rss_get_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, + bool inner) +{ + struct mlx5e_tir *tir; + + WARN_ON(inner && !rss->inner_ft_support); + tir = rss_get_tir(rss, tt, inner); + WARN_ON(!tir); + + return mlx5e_tir_get_tirn(tir); +} + +/* Fill the "tirn" output parameter. + * Create the requested TIR if it's its first usage. + */ +int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss, + enum mlx5_traffic_types tt, + const struct mlx5e_lro_param *init_lro_param, + bool inner, u32 *tirn) +{ + struct mlx5e_tir *tir; + + tir = rss_get_tir(rss, tt, inner); + if (!tir) { /* TIR doesn't exist, create one */ + int err; + + err = mlx5e_rss_create_tir(rss, tt, init_lro_param, inner); + if (err) + return err; + tir = rss_get_tir(rss, tt, inner); + } + + *tirn = mlx5e_tir_get_tirn(tir); + return 0; +} + +static void mlx5e_rss_apply(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns) +{ + int err; + + err = mlx5e_rqt_redirect_indir(&rss->rqt, rqns, num_rqns, rss->hash.hfunc, &rss->indir); + if (err) + mlx5e_rss_warn(rss->mdev, "Failed to redirect RQT %#x to channels: err = %d\n", + mlx5e_rqt_get_rqtn(&rss->rqt), err); +} + +void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns) +{ + rss->enabled = true; + mlx5e_rss_apply(rss, rqns, num_rqns); +} + +void mlx5e_rss_disable(struct mlx5e_rss *rss) +{ + int err; + + rss->enabled = false; + err = mlx5e_rqt_redirect_direct(&rss->rqt, rss->drop_rqn); + if (err) + mlx5e_rss_warn(rss->mdev, "Failed to redirect RQT %#x to drop RQ %#x: err = %d\n", + mlx5e_rqt_get_rqtn(&rss->rqt), rss->drop_rqn, err); +} + +int mlx5e_rss_lro_set_param(struct mlx5e_rss *rss, struct mlx5e_lro_param *lro_param) +{ + struct mlx5e_tir_builder *builder; + enum mlx5_traffic_types tt; + int err, final_err; + + builder = mlx5e_tir_builder_alloc(true); + if (!builder) + return -ENOMEM; + + mlx5e_tir_builder_build_lro(builder, lro_param); + + final_err = 0; + + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { + struct mlx5e_tir *tir; + + tir = rss_get_tir(rss, tt, false); + if (!tir) + goto inner_tir; + err = mlx5e_tir_modify(tir, builder); + if (err) { + mlx5e_rss_warn(rss->mdev, "Failed to update LRO state of indirect TIR %#x for traffic type %d: err = %d\n", + mlx5e_tir_get_tirn(tir), tt, err); + if (!final_err) + final_err = err; + } + +inner_tir: + if (!rss->inner_ft_support) + continue; + + tir = rss_get_tir(rss, tt, true); + if (!tir) + continue; + err = mlx5e_tir_modify(tir, builder); + if (err) { + mlx5e_rss_warn(rss->mdev, "Failed to update LRO state of inner indirect TIR %#x for traffic type %d: err = %d\n", + mlx5e_tir_get_tirn(tir), tt, err); + if (!final_err) + final_err = err; + } + } + + mlx5e_tir_builder_free(builder); + return final_err; +} + +int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc) +{ + unsigned int i; + + if (indir) + for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) + indir[i] = rss->indir.table[i]; + + if (key) + memcpy(key, rss->hash.toeplitz_hash_key, + sizeof(rss->hash.toeplitz_hash_key)); + + if (hfunc) + *hfunc = rss->hash.hfunc; + + return 0; +} + +int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir, + const u8 *key, const u8 *hfunc, + u32 *rqns, unsigned int num_rqns) +{ + bool changed_indir = false; + bool changed_hash = false; + + if (hfunc && *hfunc != rss->hash.hfunc) { + switch (*hfunc) { + case ETH_RSS_HASH_XOR: + case ETH_RSS_HASH_TOP: + break; + default: + return -EINVAL; + } + changed_hash = true; + changed_indir = true; + rss->hash.hfunc = *hfunc; + } + + if (key) { + if (rss->hash.hfunc == ETH_RSS_HASH_TOP) + changed_hash = true; + memcpy(rss->hash.toeplitz_hash_key, key, + sizeof(rss->hash.toeplitz_hash_key)); + } + + if (indir) { + unsigned int i; + + changed_indir = true; + + for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) + rss->indir.table[i] = indir[i]; + } + + if (changed_indir && rss->enabled) + mlx5e_rss_apply(rss, rqns, num_rqns); + + if (changed_hash) + mlx5e_rss_update_tirs(rss); + + return 0; +} + +struct mlx5e_rss_params_hash mlx5e_rss_get_hash(struct mlx5e_rss *rss) +{ + return rss->hash; +} + +u8 mlx5e_rss_get_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt) +{ + return rss->rx_hash_fields[tt]; +} + +int mlx5e_rss_set_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, + u8 rx_hash_fields) +{ + u8 old_rx_hash_fields; + int err; + + old_rx_hash_fields = rss->rx_hash_fields[tt]; + + if (old_rx_hash_fields == rx_hash_fields) + return 0; + + rss->rx_hash_fields[tt] = rx_hash_fields; + + err = mlx5e_rss_update_tir(rss, tt, false); + if (err) { + rss->rx_hash_fields[tt] = old_rx_hash_fields; + mlx5e_rss_warn(rss->mdev, + "Failed to update RSS hash fields of indirect TIR for traffic type %d: err = %d\n", + tt, err); + return err; + } + + if (!(rss->inner_ft_support)) + return 0; + + err = mlx5e_rss_update_tir(rss, tt, true); + if (err) { + /* Partial update happened. Try to revert - it may fail too, but + * there is nothing more we can do. + */ + rss->rx_hash_fields[tt] = old_rx_hash_fields; + mlx5e_rss_warn(rss->mdev, + "Failed to update RSS hash fields of inner indirect TIR for traffic type %d: err = %d\n", + tt, err); + if (mlx5e_rss_update_tir(rss, tt, false)) + mlx5e_rss_warn(rss->mdev, + "Partial update of RSS hash fields happened: failed to revert indirect TIR for traffic type %d to the old values\n", + tt); + } + + return err; +} + +void mlx5e_rss_set_indir_uniform(struct mlx5e_rss *rss, unsigned int nch) +{ + mlx5e_rss_params_indir_init_uniform(&rss->indir, nch); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h new file mode 100644 index 000000000000..d522a10dadf3 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rss.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. */ + +#ifndef __MLX5_EN_RSS_H__ +#define __MLX5_EN_RSS_H__ + +#include "rqt.h" +#include "tir.h" +#include "fs.h" + +struct mlx5e_rss_params_traffic_type +mlx5e_rss_get_default_tt_config(enum mlx5_traffic_types tt); + +struct mlx5e_rss; + +struct mlx5e_rss *mlx5e_rss_alloc(void); +void mlx5e_rss_free(struct mlx5e_rss *rss); +int mlx5e_rss_init(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev, + bool inner_ft_support, u32 drop_rqn, + const struct mlx5e_lro_param *init_lro_param); +int mlx5e_rss_init_no_tirs(struct mlx5e_rss *rss, struct mlx5_core_dev *mdev, + bool inner_ft_support, u32 drop_rqn); +int mlx5e_rss_cleanup(struct mlx5e_rss *rss); + +void mlx5e_rss_refcnt_inc(struct mlx5e_rss *rss); +void mlx5e_rss_refcnt_dec(struct mlx5e_rss *rss); +unsigned int mlx5e_rss_refcnt_read(struct mlx5e_rss *rss); + +u32 mlx5e_rss_get_tirn(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, + bool inner); +int mlx5e_rss_obtain_tirn(struct mlx5e_rss *rss, + enum mlx5_traffic_types tt, + const struct mlx5e_lro_param *init_lro_param, + bool inner, u32 *tirn); + +void mlx5e_rss_enable(struct mlx5e_rss *rss, u32 *rqns, unsigned int num_rqns); +void mlx5e_rss_disable(struct mlx5e_rss *rss); + +int mlx5e_rss_lro_set_param(struct mlx5e_rss *rss, struct mlx5e_lro_param *lro_param); +int mlx5e_rss_get_rxfh(struct mlx5e_rss *rss, u32 *indir, u8 *key, u8 *hfunc); +int mlx5e_rss_set_rxfh(struct mlx5e_rss *rss, const u32 *indir, + const u8 *key, const u8 *hfunc, + u32 *rqns, unsigned int num_rqns); +struct mlx5e_rss_params_hash mlx5e_rss_get_hash(struct mlx5e_rss *rss); +u8 mlx5e_rss_get_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt); +int mlx5e_rss_set_hash_fields(struct mlx5e_rss *rss, enum mlx5_traffic_types tt, + u8 rx_hash_fields); +void mlx5e_rss_set_indir_uniform(struct mlx5e_rss *rss, unsigned int nch); +#endif /* __MLX5_EN_RSS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c new file mode 100644 index 000000000000..bf0313e2682b --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.c @@ -0,0 +1,690 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */ + +#include "rx_res.h" +#include "channels.h" +#include "params.h" + +#define MLX5E_MAX_NUM_RSS 16 + +struct mlx5e_rx_res { + struct mlx5_core_dev *mdev; + enum mlx5e_rx_res_features features; + unsigned int max_nch; + u32 drop_rqn; + + struct mlx5e_rss *rss[MLX5E_MAX_NUM_RSS]; + bool rss_active; + u32 rss_rqns[MLX5E_INDIR_RQT_SIZE]; + unsigned int rss_nch; + + struct { + struct mlx5e_rqt direct_rqt; + struct mlx5e_tir direct_tir; + struct mlx5e_rqt xsk_rqt; + struct mlx5e_tir xsk_tir; + } *channels; + + struct { + struct mlx5e_rqt rqt; + struct mlx5e_tir tir; + } ptp; +}; + +/* API for rx_res_rss_* */ + +static int mlx5e_rx_res_rss_init_def(struct mlx5e_rx_res *res, + const struct mlx5e_lro_param *init_lro_param, + unsigned int init_nch) +{ + bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT; + struct mlx5e_rss *rss; + int err; + + if (WARN_ON(res->rss[0])) + return -EINVAL; + + rss = mlx5e_rss_alloc(); + if (!rss) + return -ENOMEM; + + err = mlx5e_rss_init(rss, res->mdev, inner_ft_support, res->drop_rqn, + init_lro_param); + if (err) + goto err_rss_free; + + mlx5e_rss_set_indir_uniform(rss, init_nch); + + res->rss[0] = rss; + + return 0; + +err_rss_free: + mlx5e_rss_free(rss); + return err; +} + +int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int init_nch) +{ + bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT; + struct mlx5e_rss *rss; + int err, i; + + for (i = 1; i < MLX5E_MAX_NUM_RSS; i++) + if (!res->rss[i]) + break; + + if (i == MLX5E_MAX_NUM_RSS) + return -ENOSPC; + + rss = mlx5e_rss_alloc(); + if (!rss) + return -ENOMEM; + + err = mlx5e_rss_init_no_tirs(rss, res->mdev, inner_ft_support, res->drop_rqn); + if (err) + goto err_rss_free; + + mlx5e_rss_set_indir_uniform(rss, init_nch); + if (res->rss_active) + mlx5e_rss_enable(rss, res->rss_rqns, res->rss_nch); + + res->rss[i] = rss; + *rss_idx = i; + + return 0; + +err_rss_free: + mlx5e_rss_free(rss); + return err; +} + +static int __mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx) +{ + struct mlx5e_rss *rss = res->rss[rss_idx]; + int err; + + err = mlx5e_rss_cleanup(rss); + if (err) + return err; + + mlx5e_rss_free(rss); + res->rss[rss_idx] = NULL; + + return 0; +} + +int mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx) +{ + struct mlx5e_rss *rss; + + if (rss_idx >= MLX5E_MAX_NUM_RSS) + return -EINVAL; + + rss = res->rss[rss_idx]; + if (!rss) + return -EINVAL; + + return __mlx5e_rx_res_rss_destroy(res, rss_idx); +} + +static void mlx5e_rx_res_rss_destroy_all(struct mlx5e_rx_res *res) +{ + int i; + + for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) { + struct mlx5e_rss *rss = res->rss[i]; + int err; + + if (!rss) + continue; + + err = __mlx5e_rx_res_rss_destroy(res, i); + if (err) { + unsigned int refcount; + + refcount = mlx5e_rss_refcnt_read(rss); + mlx5_core_warn(res->mdev, + "Failed to destroy RSS context %d, refcount = %u, err = %d\n", + i, refcount, err); + } + } +} + +static void mlx5e_rx_res_rss_enable(struct mlx5e_rx_res *res) +{ + int i; + + res->rss_active = true; + + for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) { + struct mlx5e_rss *rss = res->rss[i]; + + if (!rss) + continue; + mlx5e_rss_enable(rss, res->rss_rqns, res->rss_nch); + } +} + +static void mlx5e_rx_res_rss_disable(struct mlx5e_rx_res *res) +{ + int i; + + res->rss_active = false; + + for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) { + struct mlx5e_rss *rss = res->rss[i]; + + if (!rss) + continue; + mlx5e_rss_disable(rss); + } +} + +/* Updates the indirection table SW shadow, does not update the HW resources yet */ +void mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res *res, unsigned int nch) +{ + WARN_ON_ONCE(res->rss_active); + mlx5e_rss_set_indir_uniform(res->rss[0], nch); +} + +int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx, + u32 *indir, u8 *key, u8 *hfunc) +{ + struct mlx5e_rss *rss; + + if (rss_idx >= MLX5E_MAX_NUM_RSS) + return -EINVAL; + + rss = res->rss[rss_idx]; + if (!rss) + return -ENOENT; + + return mlx5e_rss_get_rxfh(rss, indir, key, hfunc); +} + +int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx, + const u32 *indir, const u8 *key, const u8 *hfunc) +{ + struct mlx5e_rss *rss; + + if (rss_idx >= MLX5E_MAX_NUM_RSS) + return -EINVAL; + + rss = res->rss[rss_idx]; + if (!rss) + return -ENOENT; + + return mlx5e_rss_set_rxfh(rss, indir, key, hfunc, res->rss_rqns, res->rss_nch); +} + +u8 mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt) +{ + struct mlx5e_rss *rss = res->rss[0]; + + return mlx5e_rss_get_hash_fields(rss, tt); +} + +int mlx5e_rx_res_rss_set_hash_fields(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt, + u8 rx_hash_fields) +{ + struct mlx5e_rss *rss = res->rss[0]; + + return mlx5e_rss_set_hash_fields(rss, tt, rx_hash_fields); +} + +int mlx5e_rx_res_rss_cnt(struct mlx5e_rx_res *res) +{ + int i, cnt; + + cnt = 0; + for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) + if (res->rss[i]) + cnt++; + + return cnt; +} + +int mlx5e_rx_res_rss_index(struct mlx5e_rx_res *res, struct mlx5e_rss *rss) +{ + int i; + + if (!rss) + return -EINVAL; + + for (i = 0; i < MLX5E_MAX_NUM_RSS; i++) + if (rss == res->rss[i]) + return i; + + return -ENOENT; +} + +struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx) +{ + if (rss_idx >= MLX5E_MAX_NUM_RSS) + return NULL; + + return res->rss[rss_idx]; +} + +/* End of API rx_res_rss_* */ + +struct mlx5e_rx_res *mlx5e_rx_res_alloc(void) +{ + return kvzalloc(sizeof(struct mlx5e_rx_res), GFP_KERNEL); +} + +static int mlx5e_rx_res_channels_init(struct mlx5e_rx_res *res, + const struct mlx5e_lro_param *init_lro_param) +{ + bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT; + struct mlx5e_tir_builder *builder; + int err = 0; + int ix; + + builder = mlx5e_tir_builder_alloc(false); + if (!builder) + return -ENOMEM; + + res->channels = kvcalloc(res->max_nch, sizeof(*res->channels), GFP_KERNEL); + if (!res->channels) { + err = -ENOMEM; + goto out; + } + + for (ix = 0; ix < res->max_nch; ix++) { + err = mlx5e_rqt_init_direct(&res->channels[ix].direct_rqt, + res->mdev, false, res->drop_rqn); + if (err) { + mlx5_core_warn(res->mdev, "Failed to create a direct RQT: err = %d, ix = %u\n", + err, ix); + goto err_destroy_direct_rqts; + } + } + + for (ix = 0; ix < res->max_nch; ix++) { + mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn, + mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt), + inner_ft_support); + mlx5e_tir_builder_build_lro(builder, init_lro_param); + mlx5e_tir_builder_build_direct(builder); + + err = mlx5e_tir_init(&res->channels[ix].direct_tir, builder, res->mdev, true); + if (err) { + mlx5_core_warn(res->mdev, "Failed to create a direct TIR: err = %d, ix = %u\n", + err, ix); + goto err_destroy_direct_tirs; + } + + mlx5e_tir_builder_clear(builder); + } + + if (!(res->features & MLX5E_RX_RES_FEATURE_XSK)) + goto out; + + for (ix = 0; ix < res->max_nch; ix++) { + err = mlx5e_rqt_init_direct(&res->channels[ix].xsk_rqt, + res->mdev, false, res->drop_rqn); + if (err) { + mlx5_core_warn(res->mdev, "Failed to create an XSK RQT: err = %d, ix = %u\n", + err, ix); + goto err_destroy_xsk_rqts; + } + } + + for (ix = 0; ix < res->max_nch; ix++) { + mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn, + mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt), + inner_ft_support); + mlx5e_tir_builder_build_lro(builder, init_lro_param); + mlx5e_tir_builder_build_direct(builder); + + err = mlx5e_tir_init(&res->channels[ix].xsk_tir, builder, res->mdev, true); + if (err) { + mlx5_core_warn(res->mdev, "Failed to create an XSK TIR: err = %d, ix = %u\n", + err, ix); + goto err_destroy_xsk_tirs; + } + + mlx5e_tir_builder_clear(builder); + } + + goto out; + +err_destroy_xsk_tirs: + while (--ix >= 0) + mlx5e_tir_destroy(&res->channels[ix].xsk_tir); + + ix = res->max_nch; +err_destroy_xsk_rqts: + while (--ix >= 0) + mlx5e_rqt_destroy(&res->channels[ix].xsk_rqt); + + ix = res->max_nch; +err_destroy_direct_tirs: + while (--ix >= 0) + mlx5e_tir_destroy(&res->channels[ix].direct_tir); + + ix = res->max_nch; +err_destroy_direct_rqts: + while (--ix >= 0) + mlx5e_rqt_destroy(&res->channels[ix].direct_rqt); + + kvfree(res->channels); + +out: + mlx5e_tir_builder_free(builder); + + return err; +} + +static int mlx5e_rx_res_ptp_init(struct mlx5e_rx_res *res) +{ + bool inner_ft_support = res->features & MLX5E_RX_RES_FEATURE_INNER_FT; + struct mlx5e_tir_builder *builder; + int err; + + builder = mlx5e_tir_builder_alloc(false); + if (!builder) + return -ENOMEM; + + err = mlx5e_rqt_init_direct(&res->ptp.rqt, res->mdev, false, res->drop_rqn); + if (err) + goto out; + + mlx5e_tir_builder_build_rqt(builder, res->mdev->mlx5e_res.hw_objs.td.tdn, + mlx5e_rqt_get_rqtn(&res->ptp.rqt), + inner_ft_support); + mlx5e_tir_builder_build_direct(builder); + + err = mlx5e_tir_init(&res->ptp.tir, builder, res->mdev, true); + if (err) + goto err_destroy_ptp_rqt; + + goto out; + +err_destroy_ptp_rqt: + mlx5e_rqt_destroy(&res->ptp.rqt); + +out: + mlx5e_tir_builder_free(builder); + return err; +} + +static void mlx5e_rx_res_channels_destroy(struct mlx5e_rx_res *res) +{ + unsigned int ix; + + for (ix = 0; ix < res->max_nch; ix++) { + mlx5e_tir_destroy(&res->channels[ix].direct_tir); + mlx5e_rqt_destroy(&res->channels[ix].direct_rqt); + + if (!(res->features & MLX5E_RX_RES_FEATURE_XSK)) + continue; + + mlx5e_tir_destroy(&res->channels[ix].xsk_tir); + mlx5e_rqt_destroy(&res->channels[ix].xsk_rqt); + } + + kvfree(res->channels); +} + +static void mlx5e_rx_res_ptp_destroy(struct mlx5e_rx_res *res) +{ + mlx5e_tir_destroy(&res->ptp.tir); + mlx5e_rqt_destroy(&res->ptp.rqt); +} + +int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev, + enum mlx5e_rx_res_features features, unsigned int max_nch, + u32 drop_rqn, const struct mlx5e_lro_param *init_lro_param, + unsigned int init_nch) +{ + int err; + + res->mdev = mdev; + res->features = features; + res->max_nch = max_nch; + res->drop_rqn = drop_rqn; + + err = mlx5e_rx_res_rss_init_def(res, init_lro_param, init_nch); + if (err) + goto err_out; + + err = mlx5e_rx_res_channels_init(res, init_lro_param); + if (err) + goto err_rss_destroy; + + err = mlx5e_rx_res_ptp_init(res); + if (err) + goto err_channels_destroy; + + return 0; + +err_channels_destroy: + mlx5e_rx_res_channels_destroy(res); +err_rss_destroy: + __mlx5e_rx_res_rss_destroy(res, 0); +err_out: + return err; +} + +void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res) +{ + mlx5e_rx_res_ptp_destroy(res); + mlx5e_rx_res_channels_destroy(res); + mlx5e_rx_res_rss_destroy_all(res); +} + +void mlx5e_rx_res_free(struct mlx5e_rx_res *res) +{ + kvfree(res); +} + +u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix) +{ + return mlx5e_tir_get_tirn(&res->channels[ix].direct_tir); +} + +u32 mlx5e_rx_res_get_tirn_xsk(struct mlx5e_rx_res *res, unsigned int ix) +{ + WARN_ON(!(res->features & MLX5E_RX_RES_FEATURE_XSK)); + + return mlx5e_tir_get_tirn(&res->channels[ix].xsk_tir); +} + +u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt) +{ + struct mlx5e_rss *rss = res->rss[0]; + + return mlx5e_rss_get_tirn(rss, tt, false); +} + +u32 mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt) +{ + struct mlx5e_rss *rss = res->rss[0]; + + return mlx5e_rss_get_tirn(rss, tt, true); +} + +u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res) +{ + WARN_ON(!(res->features & MLX5E_RX_RES_FEATURE_PTP)); + return mlx5e_tir_get_tirn(&res->ptp.tir); +} + +u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix) +{ + return mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt); +} + +void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs) +{ + unsigned int nch, ix; + int err; + + nch = mlx5e_channels_get_num(chs); + + for (ix = 0; ix < chs->num; ix++) + mlx5e_channels_get_regular_rqn(chs, ix, &res->rss_rqns[ix]); + res->rss_nch = chs->num; + + mlx5e_rx_res_rss_enable(res); + + for (ix = 0; ix < nch; ix++) { + u32 rqn; + + mlx5e_channels_get_regular_rqn(chs, ix, &rqn); + err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, rqn); + if (err) + mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (channel %u): err = %d\n", + mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt), + rqn, ix, err); + + if (!(res->features & MLX5E_RX_RES_FEATURE_XSK)) + continue; + + if (!mlx5e_channels_get_xsk_rqn(chs, ix, &rqn)) + rqn = res->drop_rqn; + err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, rqn); + if (err) + mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to RQ %#x (channel %u): err = %d\n", + mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt), + rqn, ix, err); + } + for (ix = nch; ix < res->max_nch; ix++) { + err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn); + if (err) + mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (channel %u): err = %d\n", + mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt), + res->drop_rqn, ix, err); + + if (!(res->features & MLX5E_RX_RES_FEATURE_XSK)) + continue; + + err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, res->drop_rqn); + if (err) + mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to drop RQ %#x (channel %u): err = %d\n", + mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt), + res->drop_rqn, ix, err); + } + + if (res->features & MLX5E_RX_RES_FEATURE_PTP) { + u32 rqn; + + if (mlx5e_channels_get_ptp_rqn(chs, &rqn)) + rqn = res->drop_rqn; + + err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, rqn); + if (err) + mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to RQ %#x (PTP): err = %d\n", + mlx5e_rqt_get_rqtn(&res->ptp.rqt), + rqn, err); + } +} + +void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res) +{ + unsigned int ix; + int err; + + mlx5e_rx_res_rss_disable(res); + + for (ix = 0; ix < res->max_nch; ix++) { + err = mlx5e_rqt_redirect_direct(&res->channels[ix].direct_rqt, res->drop_rqn); + if (err) + mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (channel %u): err = %d\n", + mlx5e_rqt_get_rqtn(&res->channels[ix].direct_rqt), + res->drop_rqn, ix, err); + + if (!(res->features & MLX5E_RX_RES_FEATURE_XSK)) + continue; + + err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, res->drop_rqn); + if (err) + mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to drop RQ %#x (channel %u): err = %d\n", + mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt), + res->drop_rqn, ix, err); + } + + if (res->features & MLX5E_RX_RES_FEATURE_PTP) { + err = mlx5e_rqt_redirect_direct(&res->ptp.rqt, res->drop_rqn); + if (err) + mlx5_core_warn(res->mdev, "Failed to redirect direct RQT %#x to drop RQ %#x (PTP): err = %d\n", + mlx5e_rqt_get_rqtn(&res->ptp.rqt), + res->drop_rqn, err); + } +} + +int mlx5e_rx_res_xsk_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs, + unsigned int ix) +{ + u32 rqn; + int err; + + if (!mlx5e_channels_get_xsk_rqn(chs, ix, &rqn)) + return -EINVAL; + + err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, rqn); + if (err) + mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to XSK RQ %#x (channel %u): err = %d\n", + mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt), + rqn, ix, err); + return err; +} + +int mlx5e_rx_res_xsk_deactivate(struct mlx5e_rx_res *res, unsigned int ix) +{ + int err; + + err = mlx5e_rqt_redirect_direct(&res->channels[ix].xsk_rqt, res->drop_rqn); + if (err) + mlx5_core_warn(res->mdev, "Failed to redirect XSK RQT %#x to drop RQ %#x (channel %u): err = %d\n", + mlx5e_rqt_get_rqtn(&res->channels[ix].xsk_rqt), + res->drop_rqn, ix, err); + return err; +} + +int mlx5e_rx_res_lro_set_param(struct mlx5e_rx_res *res, struct mlx5e_lro_param *lro_param) +{ + struct mlx5e_tir_builder *builder; + int err, final_err; + unsigned int ix; + + builder = mlx5e_tir_builder_alloc(true); + if (!builder) + return -ENOMEM; + + mlx5e_tir_builder_build_lro(builder, lro_param); + + final_err = 0; + + for (ix = 0; ix < MLX5E_MAX_NUM_RSS; ix++) { + struct mlx5e_rss *rss = res->rss[ix]; + + if (!rss) + continue; + + err = mlx5e_rss_lro_set_param(rss, lro_param); + if (err) + final_err = final_err ? : err; + } + + for (ix = 0; ix < res->max_nch; ix++) { + err = mlx5e_tir_modify(&res->channels[ix].direct_tir, builder); + if (err) { + mlx5_core_warn(res->mdev, "Failed to update LRO state of direct TIR %#x for channel %u: err = %d\n", + mlx5e_tir_get_tirn(&res->channels[ix].direct_tir), ix, err); + if (!final_err) + final_err = err; + } + } + + mlx5e_tir_builder_free(builder); + return final_err; +} + +struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *res) +{ + return mlx5e_rss_get_hash(res->rss[0]); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h new file mode 100644 index 000000000000..4a15942d79f7 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rx_res.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */ + +#ifndef __MLX5_EN_RX_RES_H__ +#define __MLX5_EN_RX_RES_H__ + +#include <linux/kernel.h> +#include "rqt.h" +#include "tir.h" +#include "fs.h" +#include "rss.h" + +struct mlx5e_rx_res; + +struct mlx5e_channels; +struct mlx5e_rss_params_hash; + +enum mlx5e_rx_res_features { + MLX5E_RX_RES_FEATURE_INNER_FT = BIT(0), + MLX5E_RX_RES_FEATURE_XSK = BIT(1), + MLX5E_RX_RES_FEATURE_PTP = BIT(2), +}; + +/* Setup */ +struct mlx5e_rx_res *mlx5e_rx_res_alloc(void); +int mlx5e_rx_res_init(struct mlx5e_rx_res *res, struct mlx5_core_dev *mdev, + enum mlx5e_rx_res_features features, unsigned int max_nch, + u32 drop_rqn, const struct mlx5e_lro_param *init_lro_param, + unsigned int init_nch); +void mlx5e_rx_res_destroy(struct mlx5e_rx_res *res); +void mlx5e_rx_res_free(struct mlx5e_rx_res *res); + +/* TIRN getters for flow steering */ +u32 mlx5e_rx_res_get_tirn_direct(struct mlx5e_rx_res *res, unsigned int ix); +u32 mlx5e_rx_res_get_tirn_xsk(struct mlx5e_rx_res *res, unsigned int ix); +u32 mlx5e_rx_res_get_tirn_rss(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt); +u32 mlx5e_rx_res_get_tirn_rss_inner(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt); +u32 mlx5e_rx_res_get_tirn_ptp(struct mlx5e_rx_res *res); + +/* RQTN getters for modules that create their own TIRs */ +u32 mlx5e_rx_res_get_rqtn_direct(struct mlx5e_rx_res *res, unsigned int ix); + +/* Activate/deactivate API */ +void mlx5e_rx_res_channels_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs); +void mlx5e_rx_res_channels_deactivate(struct mlx5e_rx_res *res); +int mlx5e_rx_res_xsk_activate(struct mlx5e_rx_res *res, struct mlx5e_channels *chs, + unsigned int ix); +int mlx5e_rx_res_xsk_deactivate(struct mlx5e_rx_res *res, unsigned int ix); + +/* Configuration API */ +void mlx5e_rx_res_rss_set_indir_uniform(struct mlx5e_rx_res *res, unsigned int nch); +int mlx5e_rx_res_rss_get_rxfh(struct mlx5e_rx_res *res, u32 rss_idx, + u32 *indir, u8 *key, u8 *hfunc); +int mlx5e_rx_res_rss_set_rxfh(struct mlx5e_rx_res *res, u32 rss_idx, + const u32 *indir, const u8 *key, const u8 *hfunc); + +u8 mlx5e_rx_res_rss_get_hash_fields(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt); +int mlx5e_rx_res_rss_set_hash_fields(struct mlx5e_rx_res *res, enum mlx5_traffic_types tt, + u8 rx_hash_fields); +int mlx5e_rx_res_lro_set_param(struct mlx5e_rx_res *res, struct mlx5e_lro_param *lro_param); + +int mlx5e_rx_res_rss_init(struct mlx5e_rx_res *res, u32 *rss_idx, unsigned int init_nch); +int mlx5e_rx_res_rss_destroy(struct mlx5e_rx_res *res, u32 rss_idx); +int mlx5e_rx_res_rss_cnt(struct mlx5e_rx_res *res); +int mlx5e_rx_res_rss_index(struct mlx5e_rx_res *res, struct mlx5e_rss *rss); +struct mlx5e_rss *mlx5e_rx_res_rss_get(struct mlx5e_rx_res *res, u32 rss_idx); + +/* Workaround for hairpin */ +struct mlx5e_rss_params_hash mlx5e_rx_res_get_current_hash(struct mlx5e_rx_res *res); + +#endif /* __MLX5_EN_RX_RES_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c new file mode 100644 index 000000000000..a3e43e898a56 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.c @@ -0,0 +1,164 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. + +#include "en_tc.h" +#include "post_act.h" +#include "mlx5_core.h" + +struct mlx5e_post_act { + enum mlx5_flow_namespace_type ns_type; + struct mlx5_fs_chains *chains; + struct mlx5_flow_table *ft; + struct mlx5e_priv *priv; + struct xarray ids; +}; + +struct mlx5e_post_act_handle { + enum mlx5_flow_namespace_type ns_type; + struct mlx5_flow_attr *attr; + struct mlx5_flow_handle *rule; + u32 id; +}; + +#define MLX5_POST_ACTION_BITS (mlx5e_tc_attr_to_reg_mappings[FTEID_TO_REG].mlen) +#define MLX5_POST_ACTION_MAX GENMASK(MLX5_POST_ACTION_BITS - 1, 0) +#define MLX5_POST_ACTION_MASK MLX5_POST_ACTION_MAX + +struct mlx5e_post_act * +mlx5e_tc_post_act_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains, + enum mlx5_flow_namespace_type ns_type) +{ + struct mlx5e_post_act *post_act; + int err; + + if (ns_type == MLX5_FLOW_NAMESPACE_FDB && + !MLX5_CAP_ESW_FLOWTABLE_FDB(priv->mdev, ignore_flow_level)) { + mlx5_core_warn(priv->mdev, "firmware level support is missing\n"); + err = -EOPNOTSUPP; + goto err_check; + } else if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) { + mlx5_core_warn(priv->mdev, "firmware level support is missing\n"); + err = -EOPNOTSUPP; + goto err_check; + } + + post_act = kzalloc(sizeof(*post_act), GFP_KERNEL); + if (!post_act) { + err = -ENOMEM; + goto err_check; + } + post_act->ft = mlx5_chains_create_global_table(chains); + if (IS_ERR(post_act->ft)) { + err = PTR_ERR(post_act->ft); + mlx5_core_warn(priv->mdev, "failed to create post action table, err: %d\n", err); + goto err_ft; + } + post_act->chains = chains; + post_act->ns_type = ns_type; + post_act->priv = priv; + xa_init_flags(&post_act->ids, XA_FLAGS_ALLOC1); + return post_act; + +err_ft: + kfree(post_act); +err_check: + return ERR_PTR(err); +} + +void +mlx5e_tc_post_act_destroy(struct mlx5e_post_act *post_act) +{ + if (IS_ERR_OR_NULL(post_act)) + return; + + xa_destroy(&post_act->ids); + mlx5_chains_destroy_global_table(post_act->chains, post_act->ft); + kfree(post_act); +} + +struct mlx5e_post_act_handle * +mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *attr) +{ + u32 attr_sz = ns_to_attr_sz(post_act->ns_type); + struct mlx5e_post_act_handle *handle = NULL; + struct mlx5_flow_attr *post_attr = NULL; + struct mlx5_flow_spec *spec = NULL; + int err; + + handle = kzalloc(sizeof(*handle), GFP_KERNEL); + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + post_attr = mlx5_alloc_flow_attr(post_act->ns_type); + if (!handle || !spec || !post_attr) { + kfree(post_attr); + kvfree(spec); + kfree(handle); + return ERR_PTR(-ENOMEM); + } + + memcpy(post_attr, attr, attr_sz); + post_attr->chain = 0; + post_attr->prio = 0; + post_attr->ft = post_act->ft; + post_attr->inner_match_level = MLX5_MATCH_NONE; + post_attr->outer_match_level = MLX5_MATCH_NONE; + post_attr->action &= ~(MLX5_FLOW_CONTEXT_ACTION_DECAP); + + handle->ns_type = post_act->ns_type; + /* Splits were handled before post action */ + if (handle->ns_type == MLX5_FLOW_NAMESPACE_FDB) + post_attr->esw_attr->split_count = 0; + + err = xa_alloc(&post_act->ids, &handle->id, post_attr, + XA_LIMIT(1, MLX5_POST_ACTION_MAX), GFP_KERNEL); + if (err) + goto err_xarray; + + /* Post action rule matches on fte_id and executes original rule's + * tc rule action + */ + mlx5e_tc_match_to_reg_match(spec, FTEID_TO_REG, + handle->id, MLX5_POST_ACTION_MASK); + + handle->rule = mlx5_tc_rule_insert(post_act->priv, spec, post_attr); + if (IS_ERR(handle->rule)) { + err = PTR_ERR(handle->rule); + netdev_warn(post_act->priv->netdev, "Failed to add post action rule"); + goto err_rule; + } + handle->attr = post_attr; + + kvfree(spec); + return handle; + +err_rule: + xa_erase(&post_act->ids, handle->id); +err_xarray: + kfree(post_attr); + kvfree(spec); + kfree(handle); + return ERR_PTR(err); +} + +void +mlx5e_tc_post_act_del(struct mlx5e_post_act *post_act, struct mlx5e_post_act_handle *handle) +{ + mlx5_tc_rule_delete(post_act->priv, handle->rule, handle->attr); + xa_erase(&post_act->ids, handle->id); + kfree(handle->attr); + kfree(handle); +} + +struct mlx5_flow_table * +mlx5e_tc_post_act_get_ft(struct mlx5e_post_act *post_act) +{ + return post_act->ft; +} + +/* Allocate a header modify action to write the post action handle fte id to a register. */ +int +mlx5e_tc_post_act_set_handle(struct mlx5_core_dev *dev, + struct mlx5e_post_act_handle *handle, + struct mlx5e_tc_mod_hdr_acts *acts) +{ + return mlx5e_tc_match_to_reg_set(dev, acts, handle->ns_type, FTEID_TO_REG, handle->id); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h new file mode 100644 index 000000000000..b530ec1981a5 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/post_act.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#ifndef __MLX5_POST_ACTION_H__ +#define __MLX5_POST_ACTION_H__ + +#include "en.h" +#include "lib/fs_chains.h" + +struct mlx5_flow_attr; +struct mlx5e_priv; +struct mlx5e_tc_mod_hdr_acts; + +struct mlx5e_post_act * +mlx5e_tc_post_act_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains, + enum mlx5_flow_namespace_type ns_type); + +void +mlx5e_tc_post_act_destroy(struct mlx5e_post_act *post_act); + +struct mlx5e_post_act_handle * +mlx5e_tc_post_act_add(struct mlx5e_post_act *post_act, struct mlx5_flow_attr *attr); + +void +mlx5e_tc_post_act_del(struct mlx5e_post_act *post_act, struct mlx5e_post_act_handle *handle); + +struct mlx5_flow_table * +mlx5e_tc_post_act_get_ft(struct mlx5e_post_act *post_act); + +int +mlx5e_tc_post_act_set_handle(struct mlx5_core_dev *dev, + struct mlx5e_post_act_handle *handle, + struct mlx5e_tc_mod_hdr_acts *acts); + +#endif /* __MLX5_POST_ACTION_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c index d3ad78aa9d45..6552ecee3f9b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.c @@ -4,7 +4,8 @@ #include <linux/skbuff.h> #include <net/psample.h> #include "en/mapping.h" -#include "esw/sample.h" +#include "en/tc/post_act.h" +#include "sample.h" #include "eswitch.h" #include "en_tc.h" #include "fs_core.h" @@ -17,17 +18,18 @@ static const struct esw_vport_tbl_namespace mlx5_esw_vport_tbl_sample_ns = { .flags = MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT | MLX5_FLOW_TABLE_TUNNEL_EN_DECAP, }; -struct mlx5_esw_psample { - struct mlx5e_priv *priv; +struct mlx5e_tc_psample { + struct mlx5_eswitch *esw; struct mlx5_flow_table *termtbl; struct mlx5_flow_handle *termtbl_rule; DECLARE_HASHTABLE(hashtbl, 8); struct mutex ht_lock; /* protect hashtbl */ DECLARE_HASHTABLE(restore_hashtbl, 8); struct mutex restore_lock; /* protect restore_hashtbl */ + struct mlx5e_post_act *post_act; }; -struct mlx5_sampler { +struct mlx5e_sampler { struct hlist_node hlist; u32 sampler_id; u32 sample_ratio; @@ -36,29 +38,32 @@ struct mlx5_sampler { int count; }; -struct mlx5_sample_flow { - struct mlx5_sampler *sampler; - struct mlx5_sample_restore *restore; +struct mlx5e_sample_flow { + struct mlx5e_sampler *sampler; + struct mlx5e_sample_restore *restore; struct mlx5_flow_attr *pre_attr; struct mlx5_flow_handle *pre_rule; - struct mlx5_flow_handle *rule; + struct mlx5_flow_attr *post_attr; + struct mlx5_flow_handle *post_rule; + struct mlx5e_post_act_handle *post_act_handle; }; -struct mlx5_sample_restore { +struct mlx5e_sample_restore { struct hlist_node hlist; struct mlx5_modify_hdr *modify_hdr; struct mlx5_flow_handle *rule; + struct mlx5e_post_act_handle *post_act_handle; u32 obj_id; int count; }; static int -sampler_termtbl_create(struct mlx5_esw_psample *esw_psample) +sampler_termtbl_create(struct mlx5e_tc_psample *tc_psample) { - struct mlx5_core_dev *dev = esw_psample->priv->mdev; - struct mlx5_eswitch *esw = dev->priv.eswitch; + struct mlx5_eswitch *esw = tc_psample->esw; struct mlx5_flow_table_attr ft_attr = {}; struct mlx5_flow_destination dest = {}; + struct mlx5_core_dev *dev = esw->dev; struct mlx5_flow_namespace *root_ns; struct mlx5_flow_act act = {}; int err; @@ -79,20 +84,20 @@ sampler_termtbl_create(struct mlx5_esw_psample *esw_psample) ft_attr.prio = FDB_SLOW_PATH; ft_attr.max_fte = 1; ft_attr.level = 1; - esw_psample->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr); - if (IS_ERR(esw_psample->termtbl)) { - err = PTR_ERR(esw_psample->termtbl); + tc_psample->termtbl = mlx5_create_auto_grouped_flow_table(root_ns, &ft_attr); + if (IS_ERR(tc_psample->termtbl)) { + err = PTR_ERR(tc_psample->termtbl); mlx5_core_warn(dev, "failed to create termtbl, err: %d\n", err); return err; } act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; dest.vport.num = esw->manager_vport; - esw_psample->termtbl_rule = mlx5_add_flow_rules(esw_psample->termtbl, NULL, &act, &dest, 1); - if (IS_ERR(esw_psample->termtbl_rule)) { - err = PTR_ERR(esw_psample->termtbl_rule); + tc_psample->termtbl_rule = mlx5_add_flow_rules(tc_psample->termtbl, NULL, &act, &dest, 1); + if (IS_ERR(tc_psample->termtbl_rule)) { + err = PTR_ERR(tc_psample->termtbl_rule); mlx5_core_warn(dev, "failed to create termtbl rule, err: %d\n", err); - mlx5_destroy_flow_table(esw_psample->termtbl); + mlx5_destroy_flow_table(tc_psample->termtbl); return err; } @@ -100,14 +105,14 @@ sampler_termtbl_create(struct mlx5_esw_psample *esw_psample) } static void -sampler_termtbl_destroy(struct mlx5_esw_psample *esw_psample) +sampler_termtbl_destroy(struct mlx5e_tc_psample *tc_psample) { - mlx5_del_flow_rules(esw_psample->termtbl_rule); - mlx5_destroy_flow_table(esw_psample->termtbl); + mlx5_del_flow_rules(tc_psample->termtbl_rule); + mlx5_destroy_flow_table(tc_psample->termtbl); } static int -sampler_obj_create(struct mlx5_core_dev *mdev, struct mlx5_sampler *sampler) +sampler_obj_create(struct mlx5_core_dev *mdev, struct mlx5e_sampler *sampler) { u32 in[MLX5_ST_SZ_DW(create_sampler_obj_in)] = {}; u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)]; @@ -163,16 +168,16 @@ sampler_cmp(u32 sample_ratio1, u32 default_table_id1, u32 sample_ratio2, u32 def return sample_ratio1 != sample_ratio2 || default_table_id1 != default_table_id2; } -static struct mlx5_sampler * -sampler_get(struct mlx5_esw_psample *esw_psample, u32 sample_ratio, u32 default_table_id) +static struct mlx5e_sampler * +sampler_get(struct mlx5e_tc_psample *tc_psample, u32 sample_ratio, u32 default_table_id) { - struct mlx5_sampler *sampler; + struct mlx5e_sampler *sampler; u32 hash_key; int err; - mutex_lock(&esw_psample->ht_lock); + mutex_lock(&tc_psample->ht_lock); hash_key = sampler_hash(sample_ratio, default_table_id); - hash_for_each_possible(esw_psample->hashtbl, sampler, hlist, hash_key) + hash_for_each_possible(tc_psample->hashtbl, sampler, hlist, hash_key) if (!sampler_cmp(sampler->sample_ratio, sampler->default_table_id, sample_ratio, default_table_id)) goto add_ref; @@ -183,42 +188,49 @@ sampler_get(struct mlx5_esw_psample *esw_psample, u32 sample_ratio, u32 default_ goto err_alloc; } - sampler->sample_table_id = esw_psample->termtbl->id; + sampler->sample_table_id = tc_psample->termtbl->id; sampler->default_table_id = default_table_id; sampler->sample_ratio = sample_ratio; - err = sampler_obj_create(esw_psample->priv->mdev, sampler); + err = sampler_obj_create(tc_psample->esw->dev, sampler); if (err) goto err_create; - hash_add(esw_psample->hashtbl, &sampler->hlist, hash_key); + hash_add(tc_psample->hashtbl, &sampler->hlist, hash_key); add_ref: sampler->count++; - mutex_unlock(&esw_psample->ht_lock); + mutex_unlock(&tc_psample->ht_lock); return sampler; err_create: kfree(sampler); err_alloc: - mutex_unlock(&esw_psample->ht_lock); + mutex_unlock(&tc_psample->ht_lock); return ERR_PTR(err); } static void -sampler_put(struct mlx5_esw_psample *esw_psample, struct mlx5_sampler *sampler) +sampler_put(struct mlx5e_tc_psample *tc_psample, struct mlx5e_sampler *sampler) { - mutex_lock(&esw_psample->ht_lock); + mutex_lock(&tc_psample->ht_lock); if (--sampler->count == 0) { hash_del(&sampler->hlist); - sampler_obj_destroy(esw_psample->priv->mdev, sampler->sampler_id); + sampler_obj_destroy(tc_psample->esw->dev, sampler->sampler_id); kfree(sampler); } - mutex_unlock(&esw_psample->ht_lock); + mutex_unlock(&tc_psample->ht_lock); } +/* obj_id is used to restore the sample parameters. + * Set fte_id in original flow table, then match it in the default table. + * Only set it for NICs can preserve reg_c or decap action. For other cases, + * use the same match in the default table. + * Use one header rewrite for both obj_id and fte_id. + */ static struct mlx5_modify_hdr * -sample_metadata_rule_get(struct mlx5_core_dev *mdev, u32 obj_id) +sample_modify_hdr_get(struct mlx5_core_dev *mdev, u32 obj_id, + struct mlx5e_post_act_handle *handle) { struct mlx5e_tc_mod_hdr_acts mod_acts = {}; struct mlx5_modify_hdr *modify_hdr; @@ -229,6 +241,12 @@ sample_metadata_rule_get(struct mlx5_core_dev *mdev, u32 obj_id) if (err) goto err_set_regc0; + if (handle) { + err = mlx5e_tc_post_act_set_handle(mdev, handle, &mod_acts); + if (err) + goto err_post_act; + } + modify_hdr = mlx5_modify_header_alloc(mdev, MLX5_FLOW_NAMESPACE_FDB, mod_acts.num_actions, mod_acts.actions); @@ -241,23 +259,40 @@ sample_metadata_rule_get(struct mlx5_core_dev *mdev, u32 obj_id) return modify_hdr; err_modify_hdr: +err_post_act: dealloc_mod_hdr_actions(&mod_acts); err_set_regc0: return ERR_PTR(err); } -static struct mlx5_sample_restore * -sample_restore_get(struct mlx5_esw_psample *esw_psample, u32 obj_id) +static u32 +restore_hash(u32 obj_id, struct mlx5e_post_act_handle *post_act_handle) { - struct mlx5_core_dev *mdev = esw_psample->priv->mdev; - struct mlx5_eswitch *esw = mdev->priv.eswitch; - struct mlx5_sample_restore *restore; + return jhash_2words(obj_id, hash32_ptr(post_act_handle), 0); +} + +static bool +restore_equal(struct mlx5e_sample_restore *restore, u32 obj_id, + struct mlx5e_post_act_handle *post_act_handle) +{ + return restore->obj_id == obj_id && restore->post_act_handle == post_act_handle; +} + +static struct mlx5e_sample_restore * +sample_restore_get(struct mlx5e_tc_psample *tc_psample, u32 obj_id, + struct mlx5e_post_act_handle *post_act_handle) +{ + struct mlx5_eswitch *esw = tc_psample->esw; + struct mlx5_core_dev *mdev = esw->dev; + struct mlx5e_sample_restore *restore; struct mlx5_modify_hdr *modify_hdr; + u32 hash_key; int err; - mutex_lock(&esw_psample->restore_lock); - hash_for_each_possible(esw_psample->restore_hashtbl, restore, hlist, obj_id) - if (restore->obj_id == obj_id) + mutex_lock(&tc_psample->restore_lock); + hash_key = restore_hash(obj_id, post_act_handle); + hash_for_each_possible(tc_psample->restore_hashtbl, restore, hlist, hash_key) + if (restore_equal(restore, obj_id, post_act_handle)) goto add_ref; restore = kzalloc(sizeof(*restore), GFP_KERNEL); @@ -266,8 +301,9 @@ sample_restore_get(struct mlx5_esw_psample *esw_psample, u32 obj_id) goto err_alloc; } restore->obj_id = obj_id; + restore->post_act_handle = post_act_handle; - modify_hdr = sample_metadata_rule_get(mdev, obj_id); + modify_hdr = sample_modify_hdr_get(mdev, obj_id, post_act_handle); if (IS_ERR(modify_hdr)) { err = PTR_ERR(modify_hdr); goto err_modify_hdr; @@ -280,10 +316,10 @@ sample_restore_get(struct mlx5_esw_psample *esw_psample, u32 obj_id) goto err_restore; } - hash_add(esw_psample->restore_hashtbl, &restore->hlist, obj_id); + hash_add(tc_psample->restore_hashtbl, &restore->hlist, hash_key); add_ref: restore->count++; - mutex_unlock(&esw_psample->restore_lock); + mutex_unlock(&tc_psample->restore_lock); return restore; err_restore: @@ -291,26 +327,26 @@ err_restore: err_modify_hdr: kfree(restore); err_alloc: - mutex_unlock(&esw_psample->restore_lock); + mutex_unlock(&tc_psample->restore_lock); return ERR_PTR(err); } static void -sample_restore_put(struct mlx5_esw_psample *esw_psample, struct mlx5_sample_restore *restore) +sample_restore_put(struct mlx5e_tc_psample *tc_psample, struct mlx5e_sample_restore *restore) { - mutex_lock(&esw_psample->restore_lock); + mutex_lock(&tc_psample->restore_lock); if (--restore->count == 0) hash_del(&restore->hlist); - mutex_unlock(&esw_psample->restore_lock); + mutex_unlock(&tc_psample->restore_lock); if (!restore->count) { mlx5_del_flow_rules(restore->rule); - mlx5_modify_header_dealloc(esw_psample->priv->mdev, restore->modify_hdr); + mlx5_modify_header_dealloc(tc_psample->esw->dev, restore->modify_hdr); kfree(restore); } } -void mlx5_esw_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj) +void mlx5e_tc_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj) { u32 trunc_size = mapped_obj->sample.trunc_size; struct psample_group psample_group = {}; @@ -325,6 +361,87 @@ void mlx5_esw_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj psample_sample_packet(&psample_group, skb, mapped_obj->sample.rate, &md); } +static int +add_post_rule(struct mlx5_eswitch *esw, struct mlx5e_sample_flow *sample_flow, + struct mlx5_flow_spec *spec, struct mlx5_flow_attr *attr, + u32 *default_tbl_id) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + u32 attr_sz = ns_to_attr_sz(MLX5_FLOW_NAMESPACE_FDB); + struct mlx5_vport_tbl_attr per_vport_tbl_attr; + struct mlx5_flow_table *default_tbl; + struct mlx5_flow_attr *post_attr; + int err; + + /* Allocate default table per vport, chain and prio. Otherwise, there is + * only one default table for the same sampler object. Rules with different + * prio and chain may overlap. For CT sample action, per vport default + * table is needed to resotre the metadata. + */ + per_vport_tbl_attr.chain = attr->chain; + per_vport_tbl_attr.prio = attr->prio; + per_vport_tbl_attr.vport = esw_attr->in_rep->vport; + per_vport_tbl_attr.vport_ns = &mlx5_esw_vport_tbl_sample_ns; + default_tbl = mlx5_esw_vporttbl_get(esw, &per_vport_tbl_attr); + if (IS_ERR(default_tbl)) { + err = PTR_ERR(default_tbl); + goto err_default_tbl; + } + *default_tbl_id = default_tbl->id; + + post_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB); + if (!post_attr) { + err = -ENOMEM; + goto err_attr; + } + sample_flow->post_attr = post_attr; + memcpy(post_attr, attr, attr_sz); + /* Perform the original matches on the default table. + * Offload all actions except the sample action. + */ + post_attr->chain = 0; + post_attr->prio = 0; + post_attr->ft = default_tbl; + post_attr->flags = MLX5_ESW_ATTR_FLAG_NO_IN_PORT; + + /* When offloading sample and encap action, if there is no valid + * neigh data struct, a slow path rule is offloaded first. Source + * port metadata match is set at that time. A per vport table is + * already allocated. No need to match it again. So clear the source + * port metadata match. + */ + mlx5_eswitch_clear_rule_source_port(esw, spec); + sample_flow->post_rule = mlx5_eswitch_add_offloaded_rule(esw, spec, post_attr); + if (IS_ERR(sample_flow->post_rule)) { + err = PTR_ERR(sample_flow->post_rule); + goto err_rule; + } + return 0; + +err_rule: + kfree(post_attr); +err_attr: + mlx5_esw_vporttbl_put(esw, &per_vport_tbl_attr); +err_default_tbl: + return err; +} + +static void +del_post_rule(struct mlx5_eswitch *esw, struct mlx5e_sample_flow *sample_flow, + struct mlx5_flow_attr *attr) +{ + struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; + struct mlx5_vport_tbl_attr tbl_attr; + + mlx5_eswitch_del_offloaded_rule(esw, sample_flow->post_rule, sample_flow->post_attr); + kfree(sample_flow->post_attr); + tbl_attr.chain = attr->chain; + tbl_attr.prio = attr->prio; + tbl_attr.vport = esw_attr->in_rep->vport; + tbl_attr.vport_ns = &mlx5_esw_vport_tbl_sample_ns; + mlx5_esw_vporttbl_put(esw, &tbl_attr); +} + /* For the following typical flow table: * * +-------------------------------+ @@ -342,8 +459,9 @@ void mlx5_esw_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj * +---------------------+ * + original match + * +---------------------+ - * | - * v + * | set fte_id (if reg_c preserve cap) + * | do decap (if required) + * v * +------------------------------------------------+ * + Flow Sampler Object + * +------------------------------------------------+ @@ -353,80 +471,82 @@ void mlx5_esw_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj * +------------------------------------------------+ * | | * v v - * +-----------------------------+ +----------------------------------------+ - * + sample table + + default table per <vport, chain, prio> + - * +-----------------------------+ +----------------------------------------+ - * + forward to management vport + + original match + - * +-----------------------------+ +----------------------------------------+ - * + other actions + - * +----------------------------------------+ + * +-----------------------------+ +-------------------+ + * + sample table + + default table + + * +-----------------------------+ +-------------------+ + * + forward to management vport + | + * +-----------------------------+ | + * +-------+------+ + * | |reg_c preserve cap + * | |or decap action + * v v + * +-----------------+ +-------------+ + * + per vport table + + post action + + * +-----------------+ +-------------+ + * + original match + + * +-----------------+ + * + other actions + + * +-----------------+ */ struct mlx5_flow_handle * -mlx5_esw_sample_offload(struct mlx5_esw_psample *esw_psample, +mlx5e_tc_sample_offload(struct mlx5e_tc_psample *tc_psample, struct mlx5_flow_spec *spec, - struct mlx5_flow_attr *attr) + struct mlx5_flow_attr *attr, + u32 tunnel_id) { + struct mlx5e_post_act_handle *post_act_handle = NULL; struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; - struct mlx5_vport_tbl_attr per_vport_tbl_attr; struct mlx5_esw_flow_attr *pre_esw_attr; struct mlx5_mapped_obj restore_obj = {}; - struct mlx5_sample_flow *sample_flow; - struct mlx5_sample_attr *sample_attr; - struct mlx5_flow_table *default_tbl; + struct mlx5e_sample_flow *sample_flow; + struct mlx5e_sample_attr *sample_attr; struct mlx5_flow_attr *pre_attr; struct mlx5_eswitch *esw; + u32 default_tbl_id; u32 obj_id; int err; - if (IS_ERR_OR_NULL(esw_psample)) + if (IS_ERR_OR_NULL(tc_psample)) return ERR_PTR(-EOPNOTSUPP); /* If slow path flag is set, eg. when the neigh is invalid for encap, * don't offload sample action. */ - esw = esw_psample->priv->mdev->priv.eswitch; + esw = tc_psample->esw; if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) return mlx5_eswitch_add_offloaded_rule(esw, spec, attr); sample_flow = kzalloc(sizeof(*sample_flow), GFP_KERNEL); if (!sample_flow) return ERR_PTR(-ENOMEM); - esw_attr->sample->sample_flow = sample_flow; - - /* Allocate default table per vport, chain and prio. Otherwise, there is - * only one default table for the same sampler object. Rules with different - * prio and chain may overlap. For CT sample action, per vport default - * table is needed to resotre the metadata. - */ - per_vport_tbl_attr.chain = attr->chain; - per_vport_tbl_attr.prio = attr->prio; - per_vport_tbl_attr.vport = esw_attr->in_rep->vport; - per_vport_tbl_attr.vport_ns = &mlx5_esw_vport_tbl_sample_ns; - default_tbl = mlx5_esw_vporttbl_get(esw, &per_vport_tbl_attr); - if (IS_ERR(default_tbl)) { - err = PTR_ERR(default_tbl); - goto err_default_tbl; - } + sample_attr = attr->sample_attr; + sample_attr->sample_flow = sample_flow; - /* Perform the original matches on the default table. - * Offload all actions except the sample action. - */ - esw_attr->sample->sample_default_tbl = default_tbl; - /* When offloading sample and encap action, if there is no valid - * neigh data struct, a slow path rule is offloaded first. Source - * port metadata match is set at that time. A per vport table is - * already allocated. No need to match it again. So clear the source - * port metadata match. + /* For NICs with reg_c_preserve support or decap action, use + * post action instead of the per vport, chain and prio table. + * Only match the fte id instead of the same match in the + * original flow table. */ - mlx5_eswitch_clear_rule_source_port(esw, spec); - sample_flow->rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr); - if (IS_ERR(sample_flow->rule)) { - err = PTR_ERR(sample_flow->rule); - goto err_offload_rule; + if (MLX5_CAP_GEN(esw->dev, reg_c_preserve) || + attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP) { + struct mlx5_flow_table *ft; + + ft = mlx5e_tc_post_act_get_ft(tc_psample->post_act); + default_tbl_id = ft->id; + post_act_handle = mlx5e_tc_post_act_add(tc_psample->post_act, attr); + if (IS_ERR(post_act_handle)) { + err = PTR_ERR(post_act_handle); + goto err_post_act; + } + sample_flow->post_act_handle = post_act_handle; + } else { + err = add_post_rule(esw, sample_flow, spec, attr, &default_tbl_id); + if (err) + goto err_post_rule; } /* Create sampler object. */ - sample_flow->sampler = sampler_get(esw_psample, esw_attr->sample->rate, default_tbl->id); + sample_flow->sampler = sampler_get(tc_psample, sample_attr->rate, default_tbl_id); if (IS_ERR(sample_flow->sampler)) { err = PTR_ERR(sample_flow->sampler); goto err_sampler; @@ -434,16 +554,17 @@ mlx5_esw_sample_offload(struct mlx5_esw_psample *esw_psample, /* Create an id mapping reg_c0 value to sample object. */ restore_obj.type = MLX5_MAPPED_OBJ_SAMPLE; - restore_obj.sample.group_id = esw_attr->sample->group_num; - restore_obj.sample.rate = esw_attr->sample->rate; - restore_obj.sample.trunc_size = esw_attr->sample->trunc_size; + restore_obj.sample.group_id = sample_attr->group_num; + restore_obj.sample.rate = sample_attr->rate; + restore_obj.sample.trunc_size = sample_attr->trunc_size; + restore_obj.sample.tunnel_id = tunnel_id; err = mapping_add(esw->offloads.reg_c0_obj_pool, &restore_obj, &obj_id); if (err) goto err_obj_id; - esw_attr->sample->restore_obj_id = obj_id; + sample_attr->restore_obj_id = obj_id; /* Create sample restore context. */ - sample_flow->restore = sample_restore_get(esw_psample, obj_id); + sample_flow->restore = sample_restore_get(tc_psample, obj_id, post_act_handle); if (IS_ERR(sample_flow->restore)) { err = PTR_ERR(sample_flow->restore); goto err_sample_restore; @@ -455,21 +576,23 @@ mlx5_esw_sample_offload(struct mlx5_esw_psample *esw_psample, pre_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB); if (!pre_attr) { err = -ENOMEM; - goto err_alloc_flow_attr; - } - sample_attr = kzalloc(sizeof(*sample_attr), GFP_KERNEL); - if (!sample_attr) { - err = -ENOMEM; - goto err_alloc_sample_attr; + goto err_alloc_pre_flow_attr; } - pre_esw_attr = pre_attr->esw_attr; pre_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + /* For decap action, do decap in the original flow table instead of the + * default flow table. + */ + if (tunnel_id) + pre_attr->action |= MLX5_FLOW_CONTEXT_ACTION_DECAP; pre_attr->modify_hdr = sample_flow->restore->modify_hdr; pre_attr->flags = MLX5_ESW_ATTR_FLAG_SAMPLE; + pre_attr->inner_match_level = attr->inner_match_level; + pre_attr->outer_match_level = attr->outer_match_level; pre_attr->chain = attr->chain; pre_attr->prio = attr->prio; - pre_esw_attr->sample = sample_attr; - pre_esw_attr->sample->sampler_id = sample_flow->sampler->sampler_id; + pre_attr->sample_attr = attr->sample_attr; + sample_attr->sampler_id = sample_flow->sampler->sampler_id; + pre_esw_attr = pre_attr->esw_attr; pre_esw_attr->in_mdev = esw_attr->in_mdev; pre_esw_attr->in_rep = esw_attr->in_rep; sample_flow->pre_rule = mlx5_eswitch_add_offloaded_rule(esw, spec, pre_attr); @@ -479,108 +602,113 @@ mlx5_esw_sample_offload(struct mlx5_esw_psample *esw_psample, } sample_flow->pre_attr = pre_attr; - return sample_flow->rule; + return sample_flow->post_rule; err_pre_offload_rule: - kfree(sample_attr); -err_alloc_sample_attr: kfree(pre_attr); -err_alloc_flow_attr: - sample_restore_put(esw_psample, sample_flow->restore); +err_alloc_pre_flow_attr: + sample_restore_put(tc_psample, sample_flow->restore); err_sample_restore: mapping_remove(esw->offloads.reg_c0_obj_pool, obj_id); err_obj_id: - sampler_put(esw_psample, sample_flow->sampler); + sampler_put(tc_psample, sample_flow->sampler); err_sampler: - /* For sample offload, rule is added in default_tbl. No need to call - * mlx5_esw_chains_put_table() - */ - attr->prio = 0; - attr->chain = 0; - mlx5_eswitch_del_offloaded_rule(esw, sample_flow->rule, attr); -err_offload_rule: - mlx5_esw_vporttbl_put(esw, &per_vport_tbl_attr); -err_default_tbl: + if (!post_act_handle) + del_post_rule(esw, sample_flow, attr); +err_post_rule: + if (post_act_handle) + mlx5e_tc_post_act_del(tc_psample->post_act, post_act_handle); +err_post_act: kfree(sample_flow); return ERR_PTR(err); } void -mlx5_esw_sample_unoffload(struct mlx5_esw_psample *esw_psample, +mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *tc_psample, struct mlx5_flow_handle *rule, struct mlx5_flow_attr *attr) { struct mlx5_esw_flow_attr *esw_attr = attr->esw_attr; - struct mlx5_sample_flow *sample_flow; + struct mlx5e_sample_flow *sample_flow; struct mlx5_vport_tbl_attr tbl_attr; - struct mlx5_flow_attr *pre_attr; struct mlx5_eswitch *esw; - if (IS_ERR_OR_NULL(esw_psample)) + if (IS_ERR_OR_NULL(tc_psample)) return; /* If slow path flag is set, sample action is not offloaded. * No need to delete sample rule. */ - esw = esw_psample->priv->mdev->priv.eswitch; + esw = tc_psample->esw; if (attr->flags & MLX5_ESW_ATTR_FLAG_SLOW_PATH) { mlx5_eswitch_del_offloaded_rule(esw, rule, attr); return; } - sample_flow = esw_attr->sample->sample_flow; - pre_attr = sample_flow->pre_attr; - memset(pre_attr, 0, sizeof(*pre_attr)); - esw = esw_psample->priv->mdev->priv.eswitch; - mlx5_eswitch_del_offloaded_rule(esw, sample_flow->pre_rule, pre_attr); - mlx5_eswitch_del_offloaded_rule(esw, sample_flow->rule, attr); - - sample_restore_put(esw_psample, sample_flow->restore); - mapping_remove(esw->offloads.reg_c0_obj_pool, esw_attr->sample->restore_obj_id); - sampler_put(esw_psample, sample_flow->sampler); - tbl_attr.chain = attr->chain; - tbl_attr.prio = attr->prio; - tbl_attr.vport = esw_attr->in_rep->vport; - tbl_attr.vport_ns = &mlx5_esw_vport_tbl_sample_ns; - mlx5_esw_vporttbl_put(esw, &tbl_attr); + /* The following delete order can't be changed, otherwise, + * will hit fw syndromes. + */ + sample_flow = attr->sample_attr->sample_flow; + mlx5_eswitch_del_offloaded_rule(esw, sample_flow->pre_rule, sample_flow->pre_attr); + if (!sample_flow->post_act_handle) + mlx5_eswitch_del_offloaded_rule(esw, sample_flow->post_rule, + sample_flow->post_attr); + + sample_restore_put(tc_psample, sample_flow->restore); + mapping_remove(esw->offloads.reg_c0_obj_pool, attr->sample_attr->restore_obj_id); + sampler_put(tc_psample, sample_flow->sampler); + if (sample_flow->post_act_handle) { + mlx5e_tc_post_act_del(tc_psample->post_act, sample_flow->post_act_handle); + } else { + tbl_attr.chain = attr->chain; + tbl_attr.prio = attr->prio; + tbl_attr.vport = esw_attr->in_rep->vport; + tbl_attr.vport_ns = &mlx5_esw_vport_tbl_sample_ns; + mlx5_esw_vporttbl_put(esw, &tbl_attr); + kfree(sample_flow->post_attr); + } - kfree(pre_attr->esw_attr->sample); - kfree(pre_attr); + kfree(sample_flow->pre_attr); kfree(sample_flow); } -struct mlx5_esw_psample * -mlx5_esw_sample_init(struct mlx5e_priv *priv) +struct mlx5e_tc_psample * +mlx5e_tc_sample_init(struct mlx5_eswitch *esw, struct mlx5e_post_act *post_act) { - struct mlx5_esw_psample *esw_psample; + struct mlx5e_tc_psample *tc_psample; int err; - esw_psample = kzalloc(sizeof(*esw_psample), GFP_KERNEL); - if (!esw_psample) + tc_psample = kzalloc(sizeof(*tc_psample), GFP_KERNEL); + if (!tc_psample) return ERR_PTR(-ENOMEM); - esw_psample->priv = priv; - err = sampler_termtbl_create(esw_psample); + if (IS_ERR_OR_NULL(post_act)) { + err = PTR_ERR(post_act); + goto err_post_act; + } + tc_psample->post_act = post_act; + tc_psample->esw = esw; + err = sampler_termtbl_create(tc_psample); if (err) - goto err_termtbl; + goto err_post_act; - mutex_init(&esw_psample->ht_lock); - mutex_init(&esw_psample->restore_lock); + mutex_init(&tc_psample->ht_lock); + mutex_init(&tc_psample->restore_lock); - return esw_psample; + return tc_psample; -err_termtbl: - kfree(esw_psample); +err_post_act: + kfree(tc_psample); return ERR_PTR(err); } void -mlx5_esw_sample_cleanup(struct mlx5_esw_psample *esw_psample) +mlx5e_tc_sample_cleanup(struct mlx5e_tc_psample *tc_psample) { - if (IS_ERR_OR_NULL(esw_psample)) + if (IS_ERR_OR_NULL(tc_psample)) return; - mutex_destroy(&esw_psample->restore_lock); - mutex_destroy(&esw_psample->ht_lock); - sampler_termtbl_destroy(esw_psample); - kfree(esw_psample); + mutex_destroy(&tc_psample->restore_lock); + mutex_destroy(&tc_psample->ht_lock); + sampler_termtbl_destroy(tc_psample); + kfree(tc_psample); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h new file mode 100644 index 000000000000..db0146df9b30 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/sample.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021 Mellanox Technologies. */ + +#ifndef __MLX5_EN_TC_SAMPLE_H__ +#define __MLX5_EN_TC_SAMPLE_H__ + +#include "eswitch.h" + +struct mlx5_flow_attr; +struct mlx5e_tc_psample; +struct mlx5e_post_act; + +struct mlx5e_sample_attr { + u32 group_num; + u32 rate; + u32 trunc_size; + u32 restore_obj_id; + u32 sampler_id; + struct mlx5e_sample_flow *sample_flow; +}; + +void mlx5e_tc_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj); + +struct mlx5_flow_handle * +mlx5e_tc_sample_offload(struct mlx5e_tc_psample *sample_priv, + struct mlx5_flow_spec *spec, + struct mlx5_flow_attr *attr, + u32 tunnel_id); + +void +mlx5e_tc_sample_unoffload(struct mlx5e_tc_psample *sample_priv, + struct mlx5_flow_handle *rule, + struct mlx5_flow_attr *attr); + +struct mlx5e_tc_psample * +mlx5e_tc_sample_init(struct mlx5_eswitch *esw, struct mlx5e_post_act *post_act); + +void +mlx5e_tc_sample_cleanup(struct mlx5e_tc_psample *tc_psample); + +#endif /* __MLX5_EN_TC_SAMPLE_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index 91e7a01e32be..6c949abcd2e1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -19,6 +19,7 @@ #include "en/tc_ct.h" #include "en/mod_hdr.h" #include "en/mapping.h" +#include "en/tc/post_act.h" #include "en.h" #include "en_tc.h" #include "en_rep.h" @@ -32,10 +33,6 @@ #define MLX5_CT_STATE_RELATED_BIT BIT(5) #define MLX5_CT_STATE_INVALID_BIT BIT(6) -#define MLX5_FTE_ID_BITS (mlx5e_tc_attr_to_reg_mappings[FTEID_TO_REG].mlen) -#define MLX5_FTE_ID_MAX GENMASK(MLX5_FTE_ID_BITS - 1, 0) -#define MLX5_FTE_ID_MASK MLX5_FTE_ID_MAX - #define MLX5_CT_LABELS_BITS (mlx5e_tc_attr_to_reg_mappings[LABELS_TO_REG].mlen) #define MLX5_CT_LABELS_MASK GENMASK(MLX5_CT_LABELS_BITS - 1, 0) @@ -46,14 +43,13 @@ struct mlx5_tc_ct_priv { struct mlx5_core_dev *dev; const struct net_device *netdev; struct mod_hdr_tbl *mod_hdr_tbl; - struct idr fte_ids; struct xarray tuple_ids; struct rhashtable zone_ht; struct rhashtable ct_tuples_ht; struct rhashtable ct_tuples_nat_ht; struct mlx5_flow_table *ct; struct mlx5_flow_table *ct_nat; - struct mlx5_flow_table *post_ct; + struct mlx5e_post_act *post_act; struct mutex control_lock; /* guards parallel adds/dels */ struct mapping_ctx *zone_mapping; struct mapping_ctx *labels_mapping; @@ -64,11 +60,9 @@ struct mlx5_tc_ct_priv { struct mlx5_ct_flow { struct mlx5_flow_attr *pre_ct_attr; - struct mlx5_flow_attr *post_ct_attr; struct mlx5_flow_handle *pre_ct_rule; - struct mlx5_flow_handle *post_ct_rule; + struct mlx5e_post_act_handle *post_act_handle; struct mlx5_ct_ft *ft; - u32 fte_id; u32 chain_mapping; }; @@ -768,7 +762,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT; attr->dest_chain = 0; - attr->dest_ft = ct_priv->post_ct; + attr->dest_ft = mlx5e_tc_post_act_get_ft(ct_priv->post_act); attr->ft = nat ? ct_priv->ct_nat : ct_priv->ct; attr->outer_match_level = MLX5_MATCH_L4; attr->counter = entry->counter->counter; @@ -1432,7 +1426,7 @@ static int tc_ct_pre_ct_add_rules(struct mlx5_ct_ft *ct_ft, ctstate |= MLX5_CT_STATE_NAT_BIT; mlx5e_tc_match_to_reg_match(spec, CTSTATE_TO_REG, ctstate, ctstate); - dest.ft = ct_priv->post_ct; + dest.ft = mlx5e_tc_post_act_get_ft(ct_priv->post_act); rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); if (IS_ERR(rule)) { err = PTR_ERR(rule); @@ -1716,9 +1710,9 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft) * | do decap * v * +---------------------+ - * + pre_ct/pre_ct_nat + if matches +---------------------+ - * + zone+nat match +---------------->+ post_ct (see below) + - * +---------------------+ set zone +---------------------+ + * + pre_ct/pre_ct_nat + if matches +-------------------------+ + * + zone+nat match +---------------->+ post_act (see below) + + * +---------------------+ set zone +-------------------------+ * | set zone * v * +--------------------+ @@ -1732,7 +1726,7 @@ mlx5_tc_ct_del_ft_cb(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_ft *ft) * | do nat (if needed) * v * +--------------+ - * + post_ct + original filter actions + * + post_act + original filter actions * + fte_id match +------------------------> * +--------------+ */ @@ -1746,19 +1740,15 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv, struct mlx5e_priv *priv = netdev_priv(ct_priv->netdev); struct mlx5e_tc_mod_hdr_acts pre_mod_acts = {}; u32 attr_sz = ns_to_attr_sz(ct_priv->ns_type); - struct mlx5_flow_spec *post_ct_spec = NULL; + struct mlx5e_post_act_handle *handle; struct mlx5_flow_attr *pre_ct_attr; struct mlx5_modify_hdr *mod_hdr; - struct mlx5_flow_handle *rule; struct mlx5_ct_flow *ct_flow; int chain_mapping = 0, err; struct mlx5_ct_ft *ft; - u32 fte_id = 1; - post_ct_spec = kvzalloc(sizeof(*post_ct_spec), GFP_KERNEL); ct_flow = kzalloc(sizeof(*ct_flow), GFP_KERNEL); - if (!post_ct_spec || !ct_flow) { - kvfree(post_ct_spec); + if (!ct_flow) { kfree(ct_flow); return ERR_PTR(-ENOMEM); } @@ -1773,14 +1763,13 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv, } ct_flow->ft = ft; - err = idr_alloc_u32(&ct_priv->fte_ids, ct_flow, &fte_id, - MLX5_FTE_ID_MAX, GFP_KERNEL); - if (err) { - netdev_warn(priv->netdev, - "Failed to allocate fte id, err: %d\n", err); - goto err_idr; + handle = mlx5e_tc_post_act_add(ct_priv->post_act, attr); + if (IS_ERR(handle)) { + err = PTR_ERR(handle); + ct_dbg("Failed to allocate post action handle"); + goto err_post_act_handle; } - ct_flow->fte_id = fte_id; + ct_flow->post_act_handle = handle; /* Base flow attributes of both rules on original rule attribute */ ct_flow->pre_ct_attr = mlx5_alloc_flow_attr(ct_priv->ns_type); @@ -1789,15 +1778,8 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv, goto err_alloc_pre; } - ct_flow->post_ct_attr = mlx5_alloc_flow_attr(ct_priv->ns_type); - if (!ct_flow->post_ct_attr) { - err = -ENOMEM; - goto err_alloc_post; - } - pre_ct_attr = ct_flow->pre_ct_attr; memcpy(pre_ct_attr, attr, attr_sz); - memcpy(ct_flow->post_ct_attr, attr, attr_sz); /* Modify the original rule's action to fwd and modify, leave decap */ pre_ct_attr->action = attr->action & MLX5_FLOW_CONTEXT_ACTION_DECAP; @@ -1823,10 +1805,9 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv, goto err_mapping; } - err = mlx5e_tc_match_to_reg_set(priv->mdev, &pre_mod_acts, ct_priv->ns_type, - FTEID_TO_REG, fte_id); + err = mlx5e_tc_post_act_set_handle(priv->mdev, handle, &pre_mod_acts); if (err) { - ct_dbg("Failed to set fte_id register mapping"); + ct_dbg("Failed to set post action handle"); goto err_mapping; } @@ -1857,33 +1838,6 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv, } pre_ct_attr->modify_hdr = mod_hdr; - /* Post ct rule matches on fte_id and executes original rule's - * tc rule action - */ - mlx5e_tc_match_to_reg_match(post_ct_spec, FTEID_TO_REG, - fte_id, MLX5_FTE_ID_MASK); - - /* Put post_ct rule on post_ct flow table */ - ct_flow->post_ct_attr->chain = 0; - ct_flow->post_ct_attr->prio = 0; - ct_flow->post_ct_attr->ft = ct_priv->post_ct; - - /* Splits were handled before CT */ - if (ct_priv->ns_type == MLX5_FLOW_NAMESPACE_FDB) - ct_flow->post_ct_attr->esw_attr->split_count = 0; - - ct_flow->post_ct_attr->inner_match_level = MLX5_MATCH_NONE; - ct_flow->post_ct_attr->outer_match_level = MLX5_MATCH_NONE; - ct_flow->post_ct_attr->action &= ~(MLX5_FLOW_CONTEXT_ACTION_DECAP); - rule = mlx5_tc_rule_insert(priv, post_ct_spec, - ct_flow->post_ct_attr); - ct_flow->post_ct_rule = rule; - if (IS_ERR(ct_flow->post_ct_rule)) { - err = PTR_ERR(ct_flow->post_ct_rule); - ct_dbg("Failed to add post ct rule"); - goto err_insert_post_ct; - } - /* Change original rule point to ct table */ pre_ct_attr->dest_chain = 0; pre_ct_attr->dest_ft = nat ? ft->pre_ct_nat.ft : ft->pre_ct.ft; @@ -1897,28 +1851,21 @@ __mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *ct_priv, attr->ct_attr.ct_flow = ct_flow; dealloc_mod_hdr_actions(&pre_mod_acts); - kvfree(post_ct_spec); - return rule; + return ct_flow->pre_ct_rule; err_insert_orig: - mlx5_tc_rule_delete(priv, ct_flow->post_ct_rule, - ct_flow->post_ct_attr); -err_insert_post_ct: mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr); err_mapping: dealloc_mod_hdr_actions(&pre_mod_acts); mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping); err_get_chain: - kfree(ct_flow->post_ct_attr); -err_alloc_post: kfree(ct_flow->pre_ct_attr); err_alloc_pre: - idr_remove(&ct_priv->fte_ids, fte_id); -err_idr: + mlx5e_tc_post_act_del(ct_priv->post_act, handle); +err_post_act_handle: mlx5_tc_ct_del_ft_cb(ct_priv, ft); err_ft: - kvfree(post_ct_spec); kfree(ct_flow); netdev_warn(priv->netdev, "Failed to offload ct flow, err %d\n", err); return ERR_PTR(err); @@ -2029,16 +1976,13 @@ __mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *ct_priv, pre_ct_attr); mlx5_modify_header_dealloc(priv->mdev, pre_ct_attr->modify_hdr); - if (ct_flow->post_ct_rule) { - mlx5_tc_rule_delete(priv, ct_flow->post_ct_rule, - ct_flow->post_ct_attr); + if (ct_flow->post_act_handle) { mlx5_chains_put_chain_mapping(ct_priv->chains, ct_flow->chain_mapping); - idr_remove(&ct_priv->fte_ids, ct_flow->fte_id); + mlx5e_tc_post_act_del(ct_priv->post_act, ct_flow->post_act_handle); mlx5_tc_ct_del_ft_cb(ct_priv, ct_flow->ft); } kfree(ct_flow->pre_ct_attr); - kfree(ct_flow->post_ct_attr); kfree(ct_flow); } @@ -2064,11 +2008,6 @@ static int mlx5_tc_ct_init_check_esw_support(struct mlx5_eswitch *esw, const char **err_msg) { - if (!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ignore_flow_level)) { - *err_msg = "firmware level support is missing"; - return -EOPNOTSUPP; - } - if (!mlx5_eswitch_vlan_actions_supported(esw->dev, 1)) { /* vlan workaround should be avoided for multi chain rules. * This is just a sanity check as pop vlan action should @@ -2098,20 +2037,9 @@ mlx5_tc_ct_init_check_esw_support(struct mlx5_eswitch *esw, } static int -mlx5_tc_ct_init_check_nic_support(struct mlx5e_priv *priv, - const char **err_msg) -{ - if (!MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ignore_flow_level)) { - *err_msg = "firmware level support is missing"; - return -EOPNOTSUPP; - } - - return 0; -} - -static int mlx5_tc_ct_init_check_support(struct mlx5e_priv *priv, enum mlx5_flow_namespace_type ns_type, + struct mlx5e_post_act *post_act, const char **err_msg) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; @@ -2122,10 +2050,14 @@ mlx5_tc_ct_init_check_support(struct mlx5e_priv *priv, *err_msg = "tc skb extension missing"; return -EOPNOTSUPP; #endif + if (IS_ERR_OR_NULL(post_act)) { + *err_msg = "tc ct offload not supported, post action is missing"; + return -EOPNOTSUPP; + } + if (ns_type == MLX5_FLOW_NAMESPACE_FDB) return mlx5_tc_ct_init_check_esw_support(esw, err_msg); - else - return mlx5_tc_ct_init_check_nic_support(priv, err_msg); + return 0; } #define INIT_ERR_PREFIX "tc ct offload init failed" @@ -2133,19 +2065,19 @@ mlx5_tc_ct_init_check_support(struct mlx5e_priv *priv, struct mlx5_tc_ct_priv * mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains, struct mod_hdr_tbl *mod_hdr, - enum mlx5_flow_namespace_type ns_type) + enum mlx5_flow_namespace_type ns_type, + struct mlx5e_post_act *post_act) { struct mlx5_tc_ct_priv *ct_priv; struct mlx5_core_dev *dev; const char *msg; + u64 mapping_id; int err; dev = priv->mdev; - err = mlx5_tc_ct_init_check_support(priv, ns_type, &msg); + err = mlx5_tc_ct_init_check_support(priv, ns_type, post_act, &msg); if (err) { - mlx5_core_warn(dev, - "tc ct offload not supported, %s\n", - msg); + mlx5_core_warn(dev, "tc ct offload not supported, %s\n", msg); goto err_support; } @@ -2153,13 +2085,17 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains, if (!ct_priv) goto err_alloc; - ct_priv->zone_mapping = mapping_create(sizeof(u16), 0, true); + mapping_id = mlx5_query_nic_system_image_guid(dev); + + ct_priv->zone_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_ZONE, + sizeof(u16), 0, true); if (IS_ERR(ct_priv->zone_mapping)) { err = PTR_ERR(ct_priv->zone_mapping); goto err_mapping_zone; } - ct_priv->labels_mapping = mapping_create(sizeof(u32) * 4, 0, true); + ct_priv->labels_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_LABELS, + sizeof(u32) * 4, 0, true); if (IS_ERR(ct_priv->labels_mapping)) { err = PTR_ERR(ct_priv->labels_mapping); goto err_mapping_labels; @@ -2189,16 +2125,7 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains, goto err_ct_nat_tbl; } - ct_priv->post_ct = mlx5_chains_create_global_table(chains); - if (IS_ERR(ct_priv->post_ct)) { - err = PTR_ERR(ct_priv->post_ct); - mlx5_core_warn(dev, - "%s, failed to create post ct table err: %d\n", - INIT_ERR_PREFIX, err); - goto err_post_ct_tbl; - } - - idr_init(&ct_priv->fte_ids); + ct_priv->post_act = post_act; mutex_init(&ct_priv->control_lock); rhashtable_init(&ct_priv->zone_ht, &zone_params); rhashtable_init(&ct_priv->ct_tuples_ht, &tuples_ht_params); @@ -2206,8 +2133,6 @@ mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains, return ct_priv; -err_post_ct_tbl: - mlx5_chains_destroy_global_table(chains, ct_priv->ct_nat); err_ct_nat_tbl: mlx5_chains_destroy_global_table(chains, ct_priv->ct); err_ct_tbl: @@ -2232,7 +2157,6 @@ mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv) chains = ct_priv->chains; - mlx5_chains_destroy_global_table(chains, ct_priv->post_ct); mlx5_chains_destroy_global_table(chains, ct_priv->ct_nat); mlx5_chains_destroy_global_table(chains, ct_priv->ct); mapping_destroy(ct_priv->zone_mapping); @@ -2242,7 +2166,6 @@ mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv) rhashtable_destroy(&ct_priv->ct_tuples_nat_ht); rhashtable_destroy(&ct_priv->zone_ht); mutex_destroy(&ct_priv->control_lock); - idr_destroy(&ct_priv->fte_ids); kfree(ct_priv); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h index 644cf1641cde..363329f4aac6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h @@ -92,7 +92,8 @@ struct mlx5_ct_attr { struct mlx5_tc_ct_priv * mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains, struct mod_hdr_tbl *mod_hdr, - enum mlx5_flow_namespace_type ns_type); + enum mlx5_flow_namespace_type ns_type, + struct mlx5e_post_act *post_act); void mlx5_tc_ct_clean(struct mlx5_tc_ct_priv *ct_priv); @@ -132,7 +133,8 @@ mlx5e_tc_ct_restore_flow(struct mlx5_tc_ct_priv *ct_priv, static inline struct mlx5_tc_ct_priv * mlx5_tc_ct_init(struct mlx5e_priv *priv, struct mlx5_fs_chains *chains, struct mod_hdr_tbl *mod_hdr, - enum mlx5_flow_namespace_type ns_type) + enum mlx5_flow_namespace_type ns_type, + struct mlx5e_post_act *post_act) { return NULL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c index 1e2d117082d4..b4e986818794 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -525,7 +525,7 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, e->out_dev = attr.out_dev; e->route_dev_ifindex = attr.route_dev->ifindex; - /* It's importent to add the neigh to the hash table before checking + /* It's important to add the neigh to the hash table before checking * the neigh validity state. So if we'll get a notification, in case the * neigh changes it's validity state, we would find the relevant neigh * in the hash. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c index 2e846b741280..1c44c6c345f5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun_encap.c @@ -147,7 +147,7 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, mlx5e_rep_queue_neigh_stats_work(priv); list_for_each_entry(flow, flow_list, tmp_list) { - if (!mlx5e_is_offloaded_flow(flow)) + if (!mlx5e_is_offloaded_flow(flow) || !flow_flag_test(flow, SLOW)) continue; attr = flow->attr; esw_attr = attr->esw_attr; @@ -188,7 +188,7 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, int err; list_for_each_entry(flow, flow_list, tmp_list) { - if (!mlx5e_is_offloaded_flow(flow)) + if (!mlx5e_is_offloaded_flow(flow) || flow_flag_test(flow, SLOW)) continue; attr = flow->attr; esw_attr = attr->esw_attr; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c new file mode 100644 index 000000000000..de936dc4bc48 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.c @@ -0,0 +1,200 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */ + +#include "tir.h" +#include "params.h" +#include <linux/mlx5/transobj.h> + +#define MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ (64 * 1024) + +/* max() doesn't work inside square brackets. */ +#define MLX5E_TIR_CMD_IN_SZ_DW ( \ + MLX5_ST_SZ_DW(create_tir_in) > MLX5_ST_SZ_DW(modify_tir_in) ? \ + MLX5_ST_SZ_DW(create_tir_in) : MLX5_ST_SZ_DW(modify_tir_in) \ +) + +struct mlx5e_tir_builder { + u32 in[MLX5E_TIR_CMD_IN_SZ_DW]; + bool modify; +}; + +struct mlx5e_tir_builder *mlx5e_tir_builder_alloc(bool modify) +{ + struct mlx5e_tir_builder *builder; + + builder = kvzalloc(sizeof(*builder), GFP_KERNEL); + builder->modify = modify; + + return builder; +} + +void mlx5e_tir_builder_free(struct mlx5e_tir_builder *builder) +{ + kvfree(builder); +} + +void mlx5e_tir_builder_clear(struct mlx5e_tir_builder *builder) +{ + memset(builder->in, 0, sizeof(builder->in)); +} + +static void *mlx5e_tir_builder_get_tirc(struct mlx5e_tir_builder *builder) +{ + if (builder->modify) + return MLX5_ADDR_OF(modify_tir_in, builder->in, ctx); + return MLX5_ADDR_OF(create_tir_in, builder->in, ctx); +} + +void mlx5e_tir_builder_build_inline(struct mlx5e_tir_builder *builder, u32 tdn, u32 rqn) +{ + void *tirc = mlx5e_tir_builder_get_tirc(builder); + + WARN_ON(builder->modify); + + MLX5_SET(tirc, tirc, transport_domain, tdn); + MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT); + MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_NONE); + MLX5_SET(tirc, tirc, inline_rqn, rqn); +} + +void mlx5e_tir_builder_build_rqt(struct mlx5e_tir_builder *builder, u32 tdn, + u32 rqtn, bool inner_ft_support) +{ + void *tirc = mlx5e_tir_builder_get_tirc(builder); + + WARN_ON(builder->modify); + + MLX5_SET(tirc, tirc, transport_domain, tdn); + MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); + MLX5_SET(tirc, tirc, indirect_table, rqtn); + MLX5_SET(tirc, tirc, tunneled_offload_en, inner_ft_support); +} + +void mlx5e_tir_builder_build_lro(struct mlx5e_tir_builder *builder, + const struct mlx5e_lro_param *lro_param) +{ + void *tirc = mlx5e_tir_builder_get_tirc(builder); + const unsigned int rough_max_l2_l3_hdr_sz = 256; + + if (builder->modify) + MLX5_SET(modify_tir_in, builder->in, bitmask.lro, 1); + + if (!lro_param->enabled) + return; + + MLX5_SET(tirc, tirc, lro_enable_mask, + MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | + MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO); + MLX5_SET(tirc, tirc, lro_max_ip_payload_size, + (MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - rough_max_l2_l3_hdr_sz) >> 8); + MLX5_SET(tirc, tirc, lro_timeout_period_usecs, lro_param->timeout); +} + +static int mlx5e_hfunc_to_hw(u8 hfunc) +{ + switch (hfunc) { + case ETH_RSS_HASH_TOP: + return MLX5_RX_HASH_FN_TOEPLITZ; + case ETH_RSS_HASH_XOR: + return MLX5_RX_HASH_FN_INVERTED_XOR8; + default: + return MLX5_RX_HASH_FN_NONE; + } +} + +void mlx5e_tir_builder_build_rss(struct mlx5e_tir_builder *builder, + const struct mlx5e_rss_params_hash *rss_hash, + const struct mlx5e_rss_params_traffic_type *rss_tt, + bool inner) +{ + void *tirc = mlx5e_tir_builder_get_tirc(builder); + void *hfso; + + if (builder->modify) + MLX5_SET(modify_tir_in, builder->in, bitmask.hash, 1); + + MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_hfunc_to_hw(rss_hash->hfunc)); + if (rss_hash->hfunc == ETH_RSS_HASH_TOP) { + const size_t len = MLX5_FLD_SZ_BYTES(tirc, rx_hash_toeplitz_key); + void *rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key); + + MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); + memcpy(rss_key, rss_hash->toeplitz_hash_key, len); + } + + if (inner) + hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner); + else + hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, rss_tt->l3_prot_type); + MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, rss_tt->l4_prot_type); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, rss_tt->rx_hash_fields); +} + +void mlx5e_tir_builder_build_direct(struct mlx5e_tir_builder *builder) +{ + void *tirc = mlx5e_tir_builder_get_tirc(builder); + + WARN_ON(builder->modify); + + MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8); +} + +void mlx5e_tir_builder_build_tls(struct mlx5e_tir_builder *builder) +{ + void *tirc = mlx5e_tir_builder_get_tirc(builder); + + WARN_ON(builder->modify); + + MLX5_SET(tirc, tirc, tls_en, 1); + MLX5_SET(tirc, tirc, self_lb_block, + MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST | + MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST); +} + +int mlx5e_tir_init(struct mlx5e_tir *tir, struct mlx5e_tir_builder *builder, + struct mlx5_core_dev *mdev, bool reg) +{ + int err; + + tir->mdev = mdev; + + err = mlx5_core_create_tir(tir->mdev, builder->in, &tir->tirn); + if (err) + return err; + + if (reg) { + struct mlx5e_hw_objs *res = &tir->mdev->mlx5e_res.hw_objs; + + mutex_lock(&res->td.list_lock); + list_add(&tir->list, &res->td.tirs_list); + mutex_unlock(&res->td.list_lock); + } else { + INIT_LIST_HEAD(&tir->list); + } + + return 0; +} + +void mlx5e_tir_destroy(struct mlx5e_tir *tir) +{ + struct mlx5e_hw_objs *res = &tir->mdev->mlx5e_res.hw_objs; + + /* Skip mutex if list_del is no-op (the TIR wasn't registered in the + * list). list_empty will never return true for an item of tirs_list, + * and READ_ONCE/WRITE_ONCE in list_empty/list_del guarantee consistency + * of the list->next value. + */ + if (!list_empty(&tir->list)) { + mutex_lock(&res->td.list_lock); + list_del(&tir->list); + mutex_unlock(&res->td.list_lock); + } + + mlx5_core_destroy_tir(tir->mdev, tir->tirn); +} + +int mlx5e_tir_modify(struct mlx5e_tir *tir, struct mlx5e_tir_builder *builder) +{ + return mlx5_core_modify_tir(tir->mdev, tir->tirn, builder->in); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h new file mode 100644 index 000000000000..e45149a78ed9 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tir.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021, Mellanox Technologies inc. All rights reserved. */ + +#ifndef __MLX5_EN_TIR_H__ +#define __MLX5_EN_TIR_H__ + +#include <linux/kernel.h> + +struct mlx5e_rss_params_hash { + u8 hfunc; + u8 toeplitz_hash_key[40]; +}; + +struct mlx5e_rss_params_traffic_type { + u8 l3_prot_type; + u8 l4_prot_type; + u32 rx_hash_fields; +}; + +struct mlx5e_tir_builder; +struct mlx5e_lro_param; + +struct mlx5e_tir_builder *mlx5e_tir_builder_alloc(bool modify); +void mlx5e_tir_builder_free(struct mlx5e_tir_builder *builder); +void mlx5e_tir_builder_clear(struct mlx5e_tir_builder *builder); + +void mlx5e_tir_builder_build_inline(struct mlx5e_tir_builder *builder, u32 tdn, u32 rqn); +void mlx5e_tir_builder_build_rqt(struct mlx5e_tir_builder *builder, u32 tdn, + u32 rqtn, bool inner_ft_support); +void mlx5e_tir_builder_build_lro(struct mlx5e_tir_builder *builder, + const struct mlx5e_lro_param *lro_param); +void mlx5e_tir_builder_build_rss(struct mlx5e_tir_builder *builder, + const struct mlx5e_rss_params_hash *rss_hash, + const struct mlx5e_rss_params_traffic_type *rss_tt, + bool inner); +void mlx5e_tir_builder_build_direct(struct mlx5e_tir_builder *builder); +void mlx5e_tir_builder_build_tls(struct mlx5e_tir_builder *builder); + +struct mlx5_core_dev; + +struct mlx5e_tir { + struct mlx5_core_dev *mdev; + u32 tirn; + struct list_head list; +}; + +int mlx5e_tir_init(struct mlx5e_tir *tir, struct mlx5e_tir_builder *builder, + struct mlx5_core_dev *mdev, bool reg); +void mlx5e_tir_destroy(struct mlx5e_tir *tir); + +static inline u32 mlx5e_tir_get_tirn(struct mlx5e_tir *tir) +{ + return tir->tirn; +} + +int mlx5e_tir_modify(struct mlx5e_tir *tir, struct mlx5e_tir_builder *builder); + +#endif /* __MLX5_EN_TIR_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c index 7f94508594fb..d54607a42740 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/trap.c @@ -92,30 +92,19 @@ static void mlx5e_close_trap_rq(struct mlx5e_rq *rq) static int mlx5e_create_trap_direct_rq_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 rqn) { - void *tirc; - int inlen; - u32 *in; + struct mlx5e_tir_builder *builder; int err; - inlen = MLX5_ST_SZ_BYTES(create_tir_in); - in = kvzalloc(inlen, GFP_KERNEL); - if (!in) + builder = mlx5e_tir_builder_alloc(false); + if (!builder) return -ENOMEM; - tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); - MLX5_SET(tirc, tirc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn); - MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_NONE); - MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT); - MLX5_SET(tirc, tirc, inline_rqn, rqn); - err = mlx5e_create_tir(mdev, tir, in); - kvfree(in); + mlx5e_tir_builder_build_inline(builder, mdev->mlx5e_res.hw_objs.td.tdn, rqn); + err = mlx5e_tir_init(tir, builder, mdev, true); - return err; -} + mlx5e_tir_builder_free(builder); -static void mlx5e_destroy_trap_direct_rq_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir) -{ - mlx5e_destroy_tir(mdev, tir); + return err; } static void mlx5e_build_trap_params(struct mlx5_core_dev *mdev, @@ -173,7 +162,7 @@ err_napi_del: void mlx5e_close_trap(struct mlx5e_trap *trap) { - mlx5e_destroy_trap_direct_rq_tir(trap->mdev, &trap->tir); + mlx5e_tir_destroy(&trap->tir); mlx5e_close_trap_rq(&trap->rq); netif_napi_del(&trap->napi); kvfree(trap); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c index 71e8d66fa150..7b562d2c8a19 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/pool.c @@ -122,7 +122,7 @@ static int mlx5e_xsk_enable_locked(struct mlx5e_priv *priv, * any Fill Ring entries at the setup stage. */ - err = mlx5e_xsk_redirect_rqt_to_channel(priv, priv->channels.c[ix]); + err = mlx5e_rx_res_xsk_activate(priv->rx_res, &priv->channels, ix); if (unlikely(err)) goto err_deactivate; @@ -169,7 +169,7 @@ static int mlx5e_xsk_disable_locked(struct mlx5e_priv *priv, u16 ix) goto remove_pool; c = priv->channels.c[ix]; - mlx5e_xsk_redirect_rqt_to_drop(priv, ix); + mlx5e_rx_res_xsk_deactivate(priv->rx_res, ix); mlx5e_deactivate_xsk(c); mlx5e_close_xsk(c); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c index a8315f166696..538bc2419bd8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c @@ -126,7 +126,7 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, /* Create a separate SQ, so that when the buff pool is disabled, we could * close this SQ safely and stop receiving CQEs. In other case, e.g., if * the XDPSQ was used instead, we might run into trouble when the buff pool - * is disabled and then reenabled, but the SQ continues receiving CQEs + * is disabled and then re-enabled, but the SQ continues receiving CQEs * from the old buff pool. */ err = mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, pool, &c->xsksq, true); @@ -183,73 +183,3 @@ void mlx5e_deactivate_xsk(struct mlx5e_channel *c) mlx5e_deactivate_rq(&c->xskrq); /* TX queue is disabled on close. */ } - -static int mlx5e_redirect_xsk_rqt(struct mlx5e_priv *priv, u16 ix, u32 rqn) -{ - struct mlx5e_redirect_rqt_param direct_rrp = { - .is_rss = false, - { - .rqn = rqn, - }, - }; - - u32 rqtn = priv->xsk_tir[ix].rqt.rqtn; - - return mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp); -} - -int mlx5e_xsk_redirect_rqt_to_channel(struct mlx5e_priv *priv, struct mlx5e_channel *c) -{ - return mlx5e_redirect_xsk_rqt(priv, c->ix, c->xskrq.rqn); -} - -int mlx5e_xsk_redirect_rqt_to_drop(struct mlx5e_priv *priv, u16 ix) -{ - return mlx5e_redirect_xsk_rqt(priv, ix, priv->drop_rq.rqn); -} - -int mlx5e_xsk_redirect_rqts_to_channels(struct mlx5e_priv *priv, struct mlx5e_channels *chs) -{ - int err, i; - - if (!priv->xsk.refcnt) - return 0; - - for (i = 0; i < chs->num; i++) { - struct mlx5e_channel *c = chs->c[i]; - - if (!test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) - continue; - - err = mlx5e_xsk_redirect_rqt_to_channel(priv, c); - if (unlikely(err)) - goto err_stop; - } - - return 0; - -err_stop: - for (i--; i >= 0; i--) { - if (!test_bit(MLX5E_CHANNEL_STATE_XSK, chs->c[i]->state)) - continue; - - mlx5e_xsk_redirect_rqt_to_drop(priv, i); - } - - return err; -} - -void mlx5e_xsk_redirect_rqts_to_drop(struct mlx5e_priv *priv, struct mlx5e_channels *chs) -{ - int i; - - if (!priv->xsk.refcnt) - return; - - for (i = 0; i < chs->num; i++) { - if (!test_bit(MLX5E_CHANNEL_STATE_XSK, chs->c[i]->state)) - continue; - - mlx5e_xsk_redirect_rqt_to_drop(priv, i); - } -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h index ca20f1ff5e39..50e111b85efd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.h @@ -17,9 +17,5 @@ int mlx5e_open_xsk(struct mlx5e_priv *priv, struct mlx5e_params *params, void mlx5e_close_xsk(struct mlx5e_channel *c); void mlx5e_activate_xsk(struct mlx5e_channel *c); void mlx5e_deactivate_xsk(struct mlx5e_channel *c); -int mlx5e_xsk_redirect_rqt_to_channel(struct mlx5e_priv *priv, struct mlx5e_channel *c); -int mlx5e_xsk_redirect_rqt_to_drop(struct mlx5e_priv *priv, u16 ix); -int mlx5e_xsk_redirect_rqts_to_channels(struct mlx5e_priv *priv, struct mlx5e_channels *chs); -void mlx5e_xsk_redirect_rqts_to_drop(struct mlx5e_priv *priv, struct mlx5e_channels *chs); #endif /* __MLX5_EN_XSK_SETUP_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c index e51f60b55daa..4c4ee524176c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/fs_tcp.c @@ -16,13 +16,13 @@ struct mlx5e_accel_fs_tcp { struct mlx5_flow_handle *default_rules[ACCEL_FS_TCP_NUM_TYPES]; }; -static enum mlx5e_traffic_types fs_accel2tt(enum accel_fs_tcp_type i) +static enum mlx5_traffic_types fs_accel2tt(enum accel_fs_tcp_type i) { switch (i) { case ACCEL_FS_IPV4_TCP: - return MLX5E_TT_IPV4_TCP; + return MLX5_TT_IPV4_TCP; default: /* ACCEL_FS_IPV6_TCP */ - return MLX5E_TT_IPV6_TCP; + return MLX5_TT_IPV6_TCP; } } @@ -161,7 +161,7 @@ static int accel_fs_tcp_add_default_rule(struct mlx5e_priv *priv, fs_tcp = priv->fs.accel_tcp; accel_fs_t = &fs_tcp->tables[type]; - dest = mlx5e_ttc_get_default_dest(priv, fs_accel2tt(type)); + dest = mlx5_ttc_get_default_dest(priv->fs.ttc, fs_accel2tt(type)); rule = mlx5_add_flow_rules(accel_fs_t->t, NULL, &flow_act, &dest, 1); if (IS_ERR(rule)) { err = PTR_ERR(rule); @@ -307,7 +307,7 @@ static int accel_fs_tcp_disable(struct mlx5e_priv *priv) for (i = 0; i < ACCEL_FS_TCP_NUM_TYPES; i++) { /* Modify ttc rules destination to point back to the indir TIRs */ - err = mlx5e_ttc_fwd_default_dest(priv, fs_accel2tt(i)); + err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, fs_accel2tt(i)); if (err) { netdev_err(priv->netdev, "%s: modify ttc[%d] default destination failed, err(%d)\n", @@ -329,7 +329,7 @@ static int accel_fs_tcp_enable(struct mlx5e_priv *priv) dest.ft = priv->fs.accel_tcp->tables[i].t; /* Modify ttc rules destination to point on the accel_fs FTs */ - err = mlx5e_ttc_fwd_dest(priv, fs_accel2tt(i), &dest); + err = mlx5_ttc_fwd_dest(priv->fs.ttc, fs_accel2tt(i), &dest); if (err) { netdev_err(priv->netdev, "%s: modify ttc[%d] destination to accel failed, err(%d)\n", diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c index 34119ce92031..17da23dff0ed 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_fs.c @@ -41,11 +41,11 @@ struct mlx5e_ipsec_tx { }; /* IPsec RX flow steering */ -static enum mlx5e_traffic_types fs_esp2tt(enum accel_fs_esp_type i) +static enum mlx5_traffic_types fs_esp2tt(enum accel_fs_esp_type i) { if (i == ACCEL_FS_ESP4) - return MLX5E_TT_IPV4_IPSEC_ESP; - return MLX5E_TT_IPV6_IPSEC_ESP; + return MLX5_TT_IPV4_IPSEC_ESP; + return MLX5_TT_IPV6_IPSEC_ESP; } static int rx_err_add_rule(struct mlx5e_priv *priv, @@ -265,7 +265,8 @@ static int rx_create(struct mlx5e_priv *priv, enum accel_fs_esp_type type) accel_esp = priv->ipsec->rx_fs; fs_prot = &accel_esp->fs_prot[type]; - fs_prot->default_dest = mlx5e_ttc_get_default_dest(priv, fs_esp2tt(type)); + fs_prot->default_dest = + mlx5_ttc_get_default_dest(priv->fs.ttc, fs_esp2tt(type)); err = rx_err_create_ft(priv, fs_prot, &fs_prot->rx_err); if (err) @@ -301,7 +302,7 @@ static int rx_ft_get(struct mlx5e_priv *priv, enum accel_fs_esp_type type) /* connect */ dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest.ft = fs_prot->ft; - mlx5e_ttc_fwd_dest(priv, fs_esp2tt(type), &dest); + mlx5_ttc_fwd_dest(priv->fs.ttc, fs_esp2tt(type), &dest); out: mutex_unlock(&fs_prot->prot_mutex); @@ -320,7 +321,7 @@ static void rx_ft_put(struct mlx5e_priv *priv, enum accel_fs_esp_type type) goto out; /* disconnect */ - mlx5e_ttc_fwd_default_dest(priv, fs_esp2tt(type)); + mlx5_ttc_fwd_default_dest(priv->fs.ttc, fs_esp2tt(type)); /* remove FT */ rx_destroy(priv, type); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c index 4e58fade7a60..62abce008c7b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c @@ -49,7 +49,7 @@ struct mlx5e_ktls_offload_context_rx { struct mlx5e_rq_stats *rq_stats; struct mlx5e_tls_sw_stats *sw_stats; struct completion add_ctx; - u32 tirn; + struct mlx5e_tir tir; u32 key_id; u32 rxq; DECLARE_BITMAP(flags, MLX5E_NUM_PRIV_RX_FLAGS); @@ -99,31 +99,22 @@ mlx5e_ktls_rx_resync_create_resp_list(void) return resp_list; } -static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, u32 *tirn, u32 rqtn) +static int mlx5e_ktls_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 rqtn) { - int err, inlen; - void *tirc; - u32 *in; + struct mlx5e_tir_builder *builder; + int err; - inlen = MLX5_ST_SZ_BYTES(create_tir_in); - in = kvzalloc(inlen, GFP_KERNEL); - if (!in) + builder = mlx5e_tir_builder_alloc(false); + if (!builder) return -ENOMEM; - tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); - - MLX5_SET(tirc, tirc, transport_domain, mdev->mlx5e_res.hw_objs.td.tdn); - MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); - MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8); - MLX5_SET(tirc, tirc, indirect_table, rqtn); - MLX5_SET(tirc, tirc, tls_en, 1); - MLX5_SET(tirc, tirc, self_lb_block, - MLX5_TIRC_SELF_LB_BLOCK_BLOCK_UNICAST | - MLX5_TIRC_SELF_LB_BLOCK_BLOCK_MULTICAST); + mlx5e_tir_builder_build_rqt(builder, mdev->mlx5e_res.hw_objs.td.tdn, rqtn, false); + mlx5e_tir_builder_build_direct(builder); + mlx5e_tir_builder_build_tls(builder); + err = mlx5e_tir_init(tir, builder, mdev, false); - err = mlx5_core_create_tir(mdev, in, tirn); + mlx5e_tir_builder_free(builder); - kvfree(in); return err; } @@ -139,7 +130,8 @@ static void accel_rule_handle_work(struct work_struct *work) goto out; rule = mlx5e_accel_fs_add_sk(accel_rule->priv, priv_rx->sk, - priv_rx->tirn, MLX5_FS_DEFAULT_FLOW_TAG); + mlx5e_tir_get_tirn(&priv_rx->tir), + MLX5_FS_DEFAULT_FLOW_TAG); if (!IS_ERR_OR_NULL(rule)) accel_rule->rule = rule; out: @@ -173,8 +165,8 @@ post_static_params(struct mlx5e_icosq *sq, pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs); wqe = MLX5E_TLS_FETCH_SET_STATIC_PARAMS_WQE(sq, pi); mlx5e_ktls_build_static_params(wqe, sq->pc, sq->sqn, &priv_rx->crypto_info, - priv_rx->tirn, priv_rx->key_id, - priv_rx->resync.seq, false, + mlx5e_tir_get_tirn(&priv_rx->tir), + priv_rx->key_id, priv_rx->resync.seq, false, TLS_OFFLOAD_CTX_DIR_RX); wi = (struct mlx5e_icosq_wqe_info) { .wqe_type = MLX5E_ICOSQ_WQE_UMR_TLS, @@ -202,8 +194,9 @@ post_progress_params(struct mlx5e_icosq *sq, pi = mlx5e_icosq_get_next_pi(sq, num_wqebbs); wqe = MLX5E_TLS_FETCH_SET_PROGRESS_PARAMS_WQE(sq, pi); - mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, priv_rx->tirn, false, - next_record_tcp_sn, + mlx5e_ktls_build_progress_params(wqe, sq->pc, sq->sqn, + mlx5e_tir_get_tirn(&priv_rx->tir), + false, next_record_tcp_sn, TLS_OFFLOAD_CTX_DIR_RX); wi = (struct mlx5e_icosq_wqe_info) { .wqe_type = MLX5E_ICOSQ_WQE_SET_PSV_TLS, @@ -325,7 +318,7 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq, psv = &wqe->psv; psv->num_psv = 1 << 4; psv->l_key = sq->channel->mkey_be; - psv->psv_index[0] = cpu_to_be32(priv_rx->tirn); + psv->psv_index[0] = cpu_to_be32(mlx5e_tir_get_tirn(&priv_rx->tir)); psv->va = cpu_to_be64(buf->dma_addr); wi = (struct mlx5e_icosq_wqe_info) { @@ -635,9 +628,9 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, priv_rx->sw_stats = &priv->tls->sw_stats; mlx5e_set_ktls_rx_priv_ctx(tls_ctx, priv_rx); - rqtn = priv->direct_tir[rxq].rqt.rqtn; + rqtn = mlx5e_rx_res_get_rqtn_direct(priv->rx_res, rxq); - err = mlx5e_ktls_create_tir(mdev, &priv_rx->tirn, rqtn); + err = mlx5e_ktls_create_tir(mdev, &priv_rx->tir, rqtn); if (err) goto err_create_tir; @@ -658,7 +651,7 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, return 0; err_post_wqes: - mlx5_core_destroy_tir(mdev, priv_rx->tirn); + mlx5e_tir_destroy(&priv_rx->tir); err_create_tir: mlx5_ktls_destroy_key(mdev, priv_rx->key_id); err_create_key: @@ -693,7 +686,7 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx) if (priv_rx->rule.rule) mlx5e_accel_fs_del_sk(priv_rx->rule.rule); - mlx5_core_destroy_tir(mdev, priv_rx->tirn); + mlx5e_tir_destroy(&priv_rx->tir); mlx5_ktls_destroy_key(mdev, priv_rx->key_id); /* priv_rx should normally be freed here, but if there is an outstanding * GET_PSV, deallocation will be delayed until the CQE for GET_PSV is diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c index 25403af32859..fe5d82fa6e92 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c @@ -98,17 +98,17 @@ struct arfs_rule { for (j = 0; j < ARFS_HASH_SIZE; j++) \ hlist_for_each_entry_safe(hn, tmp, &hash[j], hlist) -static enum mlx5e_traffic_types arfs_get_tt(enum arfs_type type) +static enum mlx5_traffic_types arfs_get_tt(enum arfs_type type) { switch (type) { case ARFS_IPV4_TCP: - return MLX5E_TT_IPV4_TCP; + return MLX5_TT_IPV4_TCP; case ARFS_IPV4_UDP: - return MLX5E_TT_IPV4_UDP; + return MLX5_TT_IPV4_UDP; case ARFS_IPV6_TCP: - return MLX5E_TT_IPV6_TCP; + return MLX5_TT_IPV6_TCP; case ARFS_IPV6_UDP: - return MLX5E_TT_IPV6_UDP; + return MLX5_TT_IPV6_UDP; default: return -EINVAL; } @@ -120,7 +120,7 @@ static int arfs_disable(struct mlx5e_priv *priv) for (i = 0; i < ARFS_NUM_TYPES; i++) { /* Modify ttc rules destination back to their default */ - err = mlx5e_ttc_fwd_default_dest(priv, arfs_get_tt(i)); + err = mlx5_ttc_fwd_default_dest(priv->fs.ttc, arfs_get_tt(i)); if (err) { netdev_err(priv->netdev, "%s: modify ttc[%d] default destination failed, err(%d)\n", @@ -149,7 +149,7 @@ int mlx5e_arfs_enable(struct mlx5e_priv *priv) for (i = 0; i < ARFS_NUM_TYPES; i++) { dest.ft = priv->fs.arfs->arfs_tables[i].ft.t; /* Modify ttc rules destination to point on the aRFS FTs */ - err = mlx5e_ttc_fwd_dest(priv, arfs_get_tt(i), &dest); + err = mlx5_ttc_fwd_dest(priv->fs.ttc, arfs_get_tt(i), &dest); if (err) { netdev_err(priv->netdev, "%s: modify ttc[%d] dest to arfs, failed err(%d)\n", @@ -192,10 +192,9 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv, enum arfs_type type) { struct arfs_table *arfs_t = &priv->fs.arfs->arfs_tables[type]; - struct mlx5e_tir *tir = priv->indir_tir; struct mlx5_flow_destination dest = {}; MLX5_DECLARE_FLOW_ACT(flow_act); - enum mlx5e_traffic_types tt; + enum mlx5_traffic_types tt; int err = 0; dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; @@ -206,10 +205,10 @@ static int arfs_add_default_rule(struct mlx5e_priv *priv, return -EINVAL; } - /* FIXME: Must use mlx5e_ttc_get_default_dest(), + /* FIXME: Must use mlx5_ttc_get_default_dest(), * but can't since TTC default is not setup yet ! */ - dest.tir_num = tir[tt].tirn; + dest.tir_num = mlx5e_rx_res_get_tirn_rss(priv->rx_res, tt); arfs_t->default_rule = mlx5_add_flow_rules(arfs_t->ft.t, NULL, &flow_act, &dest, 1); @@ -553,7 +552,7 @@ static struct mlx5_flow_handle *arfs_add_rule(struct mlx5e_priv *priv, 16); } dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; - dest.tir_num = priv->direct_tir[arfs_rule->rxq].tirn; + dest.tir_num = mlx5e_rx_res_get_tirn_direct(priv->rx_res, arfs_rule->rxq); rule = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); if (IS_ERR(rule)) { err = PTR_ERR(rule); @@ -576,7 +575,7 @@ static void arfs_modify_rule_rq(struct mlx5e_priv *priv, int err = 0; dst.type = MLX5_FLOW_DESTINATION_TYPE_TIR; - dst.tir_num = priv->direct_tir[rxq].tirn; + dst.tir_num = mlx5e_rx_res_get_tirn_direct(priv->rx_res, rxq); err = mlx5_modify_rule_destination(rule, &dst, NULL); if (err) netdev_warn(priv->netdev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c index 8c166ee56d8b..84eb7201c142 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_common.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_common.c @@ -33,36 +33,9 @@ #include "en.h" /* mlx5e global resources should be placed in this file. - * Global resources are common to all the netdevices crated on the same nic. + * Global resources are common to all the netdevices created on the same nic. */ -int mlx5e_create_tir(struct mlx5_core_dev *mdev, struct mlx5e_tir *tir, u32 *in) -{ - struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs; - int err; - - err = mlx5_core_create_tir(mdev, in, &tir->tirn); - if (err) - return err; - - mutex_lock(&res->td.list_lock); - list_add(&tir->list, &res->td.tirs_list); - mutex_unlock(&res->td.list_lock); - - return 0; -} - -void mlx5e_destroy_tir(struct mlx5_core_dev *mdev, - struct mlx5e_tir *tir) -{ - struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs; - - mutex_lock(&res->td.list_lock); - mlx5_core_destroy_tir(mdev, tir->tirn); - list_del(&tir->list); - mutex_unlock(&res->td.list_lock); -} - void mlx5e_mkey_set_relaxed_ordering(struct mlx5_core_dev *mdev, void *mkc) { bool ro_pci_enable = pcie_relaxed_ordering_enabled(mdev->pdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index bd72572e03d1..2cfd12953909 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -420,6 +420,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, unsigned int count = ch->combined_count; struct mlx5e_params new_params; bool arfs_enabled; + int rss_cnt; bool opened; int err = 0; @@ -455,6 +456,27 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, goto out; } + /* Don't allow changing the number of channels if non-default RSS contexts exist, + * the kernel doesn't protect against set_channels operations that break them. + */ + rss_cnt = mlx5e_rx_res_rss_cnt(priv->rx_res) - 1; + if (rss_cnt) { + err = -EINVAL; + netdev_err(priv->netdev, "%s: Non-default RSS contexts exist (%d), cannot change the number of channels\n", + __func__, rss_cnt); + goto out; + } + + /* Don't allow changing the number of channels if MQPRIO mode channel offload is active, + * because it defines a partition over the channels queues. + */ + if (cur_params->mqprio.mode == TC_MQPRIO_MODE_CHANNEL) { + err = -EINVAL; + netdev_err(priv->netdev, "%s: MQPRIO mode channel offload is active, cannot change the number of channels\n", + __func__); + goto out; + } + new_params = *cur_params; new_params.num_channels = count; @@ -512,7 +534,9 @@ int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv, } static int mlx5e_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -630,7 +654,9 @@ int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, } static int mlx5e_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -1172,7 +1198,7 @@ static int mlx5e_set_link_ksettings(struct net_device *netdev, u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv) { - return sizeof(priv->rss_params.toeplitz_hash_key); + return sizeof_field(struct mlx5e_rss_params_hash, toeplitz_hash_key); } static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev) @@ -1194,88 +1220,64 @@ static u32 mlx5e_get_rxfh_indir_size(struct net_device *netdev) return mlx5e_ethtool_get_rxfh_indir_size(priv); } -int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, - u8 *hfunc) +static int mlx5e_get_rxfh_context(struct net_device *dev, u32 *indir, + u8 *key, u8 *hfunc, u32 rss_context) { - struct mlx5e_priv *priv = netdev_priv(netdev); - struct mlx5e_rss_params *rss = &priv->rss_params; - - if (indir) - memcpy(indir, rss->indirection_rqt, - sizeof(rss->indirection_rqt)); - - if (key) - memcpy(key, rss->toeplitz_hash_key, - sizeof(rss->toeplitz_hash_key)); - - if (hfunc) - *hfunc = rss->hfunc; + struct mlx5e_priv *priv = netdev_priv(dev); + int err; - return 0; + mutex_lock(&priv->state_lock); + err = mlx5e_rx_res_rss_get_rxfh(priv->rx_res, rss_context, indir, key, hfunc); + mutex_unlock(&priv->state_lock); + return err; } -int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, - const u8 *key, const u8 hfunc) +static int mlx5e_set_rxfh_context(struct net_device *dev, const u32 *indir, + const u8 *key, const u8 hfunc, + u32 *rss_context, bool delete) { struct mlx5e_priv *priv = netdev_priv(dev); - struct mlx5e_rss_params *rss = &priv->rss_params; - int inlen = MLX5_ST_SZ_BYTES(modify_tir_in); - bool refresh_tirs = false; - bool refresh_rqt = false; - void *in; - - if ((hfunc != ETH_RSS_HASH_NO_CHANGE) && - (hfunc != ETH_RSS_HASH_XOR) && - (hfunc != ETH_RSS_HASH_TOP)) - return -EINVAL; - - in = kvzalloc(inlen, GFP_KERNEL); - if (!in) - return -ENOMEM; + int err; mutex_lock(&priv->state_lock); - - if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != rss->hfunc) { - rss->hfunc = hfunc; - refresh_rqt = true; - refresh_tirs = true; - } - - if (indir) { - memcpy(rss->indirection_rqt, indir, - sizeof(rss->indirection_rqt)); - refresh_rqt = true; + if (delete) { + err = mlx5e_rx_res_rss_destroy(priv->rx_res, *rss_context); + goto unlock; } - if (key) { - memcpy(rss->toeplitz_hash_key, key, - sizeof(rss->toeplitz_hash_key)); - refresh_tirs = refresh_tirs || rss->hfunc == ETH_RSS_HASH_TOP; - } + if (*rss_context == ETH_RXFH_CONTEXT_ALLOC) { + unsigned int count = priv->channels.params.num_channels; - if (refresh_rqt && test_bit(MLX5E_STATE_OPENED, &priv->state)) { - struct mlx5e_redirect_rqt_param rrp = { - .is_rss = true, - { - .rss = { - .hfunc = rss->hfunc, - .channels = &priv->channels, - }, - }, - }; - u32 rqtn = priv->indir_rqt.rqtn; - - mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp); + err = mlx5e_rx_res_rss_init(priv->rx_res, rss_context, count); + if (err) + goto unlock; } - if (refresh_tirs) - mlx5e_modify_tirs_hash(priv, in); + err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, *rss_context, indir, key, + hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc); +unlock: mutex_unlock(&priv->state_lock); + return err; +} - kvfree(in); +int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, + u8 *hfunc) +{ + return mlx5e_get_rxfh_context(netdev, indir, key, hfunc, 0); +} - return 0; +int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct mlx5e_priv *priv = netdev_priv(dev); + int err; + + mutex_lock(&priv->state_lock); + err = mlx5e_rx_res_rss_set_rxfh(priv->rx_res, 0, indir, key, + hfunc == ETH_RSS_HASH_NO_CHANGE ? NULL : &hfunc); + mutex_unlock(&priv->state_lock); + return err; } #define MLX5E_PFC_PREVEN_AUTO_TOUT_MSEC 100 @@ -2358,6 +2360,8 @@ const struct ethtool_ops mlx5e_ethtool_ops = { .get_rxfh_indir_size = mlx5e_get_rxfh_indir_size, .get_rxfh = mlx5e_get_rxfh, .set_rxfh = mlx5e_set_rxfh, + .get_rxfh_context = mlx5e_get_rxfh_context, + .set_rxfh_context = mlx5e_set_rxfh_context, .get_rxnfc = mlx5e_get_rxnfc, .set_rxnfc = mlx5e_set_rxnfc, .get_tunable = mlx5e_get_tunable, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c index 0b75fab41ae8..c06b4b938ae7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c @@ -718,7 +718,7 @@ static int mlx5e_add_promisc_rule(struct mlx5e_priv *priv) if (!spec) return -ENOMEM; dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest.ft = priv->fs.ttc.ft.t; + dest.ft = mlx5_get_ttc_flow_table(priv->fs.ttc); rule_p = &priv->fs.promisc.rule; *rule_p = mlx5_add_flow_rules(ft, spec, &flow_act, &dest, 1); @@ -854,593 +854,59 @@ void mlx5e_destroy_flow_table(struct mlx5e_flow_table *ft) ft->t = NULL; } -static void mlx5e_cleanup_ttc_rules(struct mlx5e_ttc_table *ttc) -{ - int i; - - for (i = 0; i < MLX5E_NUM_TT; i++) { - if (!IS_ERR_OR_NULL(ttc->rules[i].rule)) { - mlx5_del_flow_rules(ttc->rules[i].rule); - ttc->rules[i].rule = NULL; - } - } - - for (i = 0; i < MLX5E_NUM_TUNNEL_TT; i++) { - if (!IS_ERR_OR_NULL(ttc->tunnel_rules[i])) { - mlx5_del_flow_rules(ttc->tunnel_rules[i]); - ttc->tunnel_rules[i] = NULL; - } - } -} - -struct mlx5e_etype_proto { - u16 etype; - u8 proto; -}; - -static struct mlx5e_etype_proto ttc_rules[] = { - [MLX5E_TT_IPV4_TCP] = { - .etype = ETH_P_IP, - .proto = IPPROTO_TCP, - }, - [MLX5E_TT_IPV6_TCP] = { - .etype = ETH_P_IPV6, - .proto = IPPROTO_TCP, - }, - [MLX5E_TT_IPV4_UDP] = { - .etype = ETH_P_IP, - .proto = IPPROTO_UDP, - }, - [MLX5E_TT_IPV6_UDP] = { - .etype = ETH_P_IPV6, - .proto = IPPROTO_UDP, - }, - [MLX5E_TT_IPV4_IPSEC_AH] = { - .etype = ETH_P_IP, - .proto = IPPROTO_AH, - }, - [MLX5E_TT_IPV6_IPSEC_AH] = { - .etype = ETH_P_IPV6, - .proto = IPPROTO_AH, - }, - [MLX5E_TT_IPV4_IPSEC_ESP] = { - .etype = ETH_P_IP, - .proto = IPPROTO_ESP, - }, - [MLX5E_TT_IPV6_IPSEC_ESP] = { - .etype = ETH_P_IPV6, - .proto = IPPROTO_ESP, - }, - [MLX5E_TT_IPV4] = { - .etype = ETH_P_IP, - .proto = 0, - }, - [MLX5E_TT_IPV6] = { - .etype = ETH_P_IPV6, - .proto = 0, - }, - [MLX5E_TT_ANY] = { - .etype = 0, - .proto = 0, - }, -}; - -static struct mlx5e_etype_proto ttc_tunnel_rules[] = { - [MLX5E_TT_IPV4_GRE] = { - .etype = ETH_P_IP, - .proto = IPPROTO_GRE, - }, - [MLX5E_TT_IPV6_GRE] = { - .etype = ETH_P_IPV6, - .proto = IPPROTO_GRE, - }, - [MLX5E_TT_IPV4_IPIP] = { - .etype = ETH_P_IP, - .proto = IPPROTO_IPIP, - }, - [MLX5E_TT_IPV6_IPIP] = { - .etype = ETH_P_IPV6, - .proto = IPPROTO_IPIP, - }, - [MLX5E_TT_IPV4_IPV6] = { - .etype = ETH_P_IP, - .proto = IPPROTO_IPV6, - }, - [MLX5E_TT_IPV6_IPV6] = { - .etype = ETH_P_IPV6, - .proto = IPPROTO_IPV6, - }, - -}; - -u8 mlx5e_get_proto_by_tunnel_type(enum mlx5e_tunnel_types tt) -{ - return ttc_tunnel_rules[tt].proto; -} - -static bool mlx5e_tunnel_proto_supported_rx(struct mlx5_core_dev *mdev, u8 proto_type) -{ - switch (proto_type) { - case IPPROTO_GRE: - return MLX5_CAP_ETH(mdev, tunnel_stateless_gre); - case IPPROTO_IPIP: - case IPPROTO_IPV6: - return (MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip) || - MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip_rx)); - default: - return false; - } -} - -static bool mlx5e_tunnel_any_rx_proto_supported(struct mlx5_core_dev *mdev) -{ - int tt; - - for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) { - if (mlx5e_tunnel_proto_supported_rx(mdev, ttc_tunnel_rules[tt].proto)) - return true; - } - return false; -} - -bool mlx5e_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev) -{ - return (mlx5e_tunnel_any_rx_proto_supported(mdev) && - MLX5_CAP_FLOWTABLE_NIC_RX(mdev, ft_field_support.inner_ip_version)); -} - -static u8 mlx5e_etype_to_ipv(u16 ethertype) -{ - if (ethertype == ETH_P_IP) - return 4; - - if (ethertype == ETH_P_IPV6) - return 6; - - return 0; -} - -static struct mlx5_flow_handle * -mlx5e_generate_ttc_rule(struct mlx5e_priv *priv, - struct mlx5_flow_table *ft, - struct mlx5_flow_destination *dest, - u16 etype, - u8 proto) -{ - int match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version); - MLX5_DECLARE_FLOW_ACT(flow_act); - struct mlx5_flow_handle *rule; - struct mlx5_flow_spec *spec; - int err = 0; - u8 ipv; - - spec = kvzalloc(sizeof(*spec), GFP_KERNEL); - if (!spec) - return ERR_PTR(-ENOMEM); - - if (proto) { - spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol); - MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto); - } - - ipv = mlx5e_etype_to_ipv(etype); - if (match_ipv_outer && ipv) { - spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version); - MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ipv); - } else if (etype) { - spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype); - MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype); - } - - rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1); - if (IS_ERR(rule)) { - err = PTR_ERR(rule); - netdev_err(priv->netdev, "%s: add rule failed\n", __func__); - } - - kvfree(spec); - return err ? ERR_PTR(err) : rule; -} - -static int mlx5e_generate_ttc_table_rules(struct mlx5e_priv *priv, - struct ttc_params *params, - struct mlx5e_ttc_table *ttc) -{ - struct mlx5_flow_destination dest = {}; - struct mlx5_flow_handle **trules; - struct mlx5e_ttc_rule *rules; - struct mlx5_flow_table *ft; - int tt; - int err; - - ft = ttc->ft.t; - rules = ttc->rules; - - dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; - for (tt = 0; tt < MLX5E_NUM_TT; tt++) { - struct mlx5e_ttc_rule *rule = &rules[tt]; - - if (tt == MLX5E_TT_ANY) - dest.tir_num = params->any_tt_tirn; - else - dest.tir_num = params->indir_tirn[tt]; - - rule->rule = mlx5e_generate_ttc_rule(priv, ft, &dest, - ttc_rules[tt].etype, - ttc_rules[tt].proto); - if (IS_ERR(rule->rule)) { - err = PTR_ERR(rule->rule); - rule->rule = NULL; - goto del_rules; - } - rule->default_dest = dest; - } - - if (!params->inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev)) - return 0; - - trules = ttc->tunnel_rules; - dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest.ft = params->inner_ttc->ft.t; - for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) { - if (!mlx5e_tunnel_proto_supported_rx(priv->mdev, - ttc_tunnel_rules[tt].proto)) - continue; - trules[tt] = mlx5e_generate_ttc_rule(priv, ft, &dest, - ttc_tunnel_rules[tt].etype, - ttc_tunnel_rules[tt].proto); - if (IS_ERR(trules[tt])) { - err = PTR_ERR(trules[tt]); - trules[tt] = NULL; - goto del_rules; - } - } - - return 0; - -del_rules: - mlx5e_cleanup_ttc_rules(ttc); - return err; -} - -static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc, - bool use_ipv) -{ - int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); - struct mlx5e_flow_table *ft = &ttc->ft; - int ix = 0; - u32 *in; - int err; - u8 *mc; - - ft->g = kcalloc(MLX5E_TTC_NUM_GROUPS, - sizeof(*ft->g), GFP_KERNEL); - if (!ft->g) - return -ENOMEM; - in = kvzalloc(inlen, GFP_KERNEL); - if (!in) { - kfree(ft->g); - ft->g = NULL; - return -ENOMEM; - } - - /* L4 Group */ - mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); - if (use_ipv) - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version); - else - MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); - MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); - MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5E_TTC_GROUP1_SIZE; - MLX5_SET_CFG(in, end_flow_index, ix - 1); - ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); - if (IS_ERR(ft->g[ft->num_groups])) - goto err; - ft->num_groups++; - - /* L3 Group */ - MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0); - MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5E_TTC_GROUP2_SIZE; - MLX5_SET_CFG(in, end_flow_index, ix - 1); - ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); - if (IS_ERR(ft->g[ft->num_groups])) - goto err; - ft->num_groups++; - - /* Any Group */ - memset(in, 0, inlen); - MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5E_TTC_GROUP3_SIZE; - MLX5_SET_CFG(in, end_flow_index, ix - 1); - ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); - if (IS_ERR(ft->g[ft->num_groups])) - goto err; - ft->num_groups++; - - kvfree(in); - return 0; - -err: - err = PTR_ERR(ft->g[ft->num_groups]); - ft->g[ft->num_groups] = NULL; - kvfree(in); - - return err; -} - -static struct mlx5_flow_handle * -mlx5e_generate_inner_ttc_rule(struct mlx5e_priv *priv, - struct mlx5_flow_table *ft, - struct mlx5_flow_destination *dest, - u16 etype, u8 proto) -{ - MLX5_DECLARE_FLOW_ACT(flow_act); - struct mlx5_flow_handle *rule; - struct mlx5_flow_spec *spec; - int err = 0; - u8 ipv; - - spec = kvzalloc(sizeof(*spec), GFP_KERNEL); - if (!spec) - return ERR_PTR(-ENOMEM); - - ipv = mlx5e_etype_to_ipv(etype); - if (etype && ipv) { - spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_version); - MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_version, ipv); - } - - if (proto) { - spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS; - MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_protocol); - MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_protocol, proto); - } - - rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1); - if (IS_ERR(rule)) { - err = PTR_ERR(rule); - netdev_err(priv->netdev, "%s: add rule failed\n", __func__); - } - - kvfree(spec); - return err ? ERR_PTR(err) : rule; -} - -static int mlx5e_generate_inner_ttc_table_rules(struct mlx5e_priv *priv, - struct ttc_params *params, - struct mlx5e_ttc_table *ttc) -{ - struct mlx5_flow_destination dest = {}; - struct mlx5e_ttc_rule *rules; - struct mlx5_flow_table *ft; - int err; - int tt; - - ft = ttc->ft.t; - rules = ttc->rules; - dest.type = MLX5_FLOW_DESTINATION_TYPE_TIR; - - for (tt = 0; tt < MLX5E_NUM_TT; tt++) { - struct mlx5e_ttc_rule *rule = &rules[tt]; - - if (tt == MLX5E_TT_ANY) - dest.tir_num = params->any_tt_tirn; - else - dest.tir_num = params->indir_tirn[tt]; - - rule->rule = mlx5e_generate_inner_ttc_rule(priv, ft, &dest, - ttc_rules[tt].etype, - ttc_rules[tt].proto); - if (IS_ERR(rule->rule)) { - err = PTR_ERR(rule->rule); - rule->rule = NULL; - goto del_rules; - } - rule->default_dest = dest; - } - - return 0; - -del_rules: - - mlx5e_cleanup_ttc_rules(ttc); - return err; -} - -static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc) -{ - int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); - struct mlx5e_flow_table *ft = &ttc->ft; - int ix = 0; - u32 *in; - int err; - u8 *mc; - - ft->g = kcalloc(MLX5E_INNER_TTC_NUM_GROUPS, sizeof(*ft->g), GFP_KERNEL); - if (!ft->g) - return -ENOMEM; - in = kvzalloc(inlen, GFP_KERNEL); - if (!in) { - kfree(ft->g); - ft->g = NULL; - return -ENOMEM; - } - - /* L4 Group */ - mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); - MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol); - MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version); - MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS); - MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5E_INNER_TTC_GROUP1_SIZE; - MLX5_SET_CFG(in, end_flow_index, ix - 1); - ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); - if (IS_ERR(ft->g[ft->num_groups])) - goto err; - ft->num_groups++; - - /* L3 Group */ - MLX5_SET(fte_match_param, mc, inner_headers.ip_protocol, 0); - MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5E_INNER_TTC_GROUP2_SIZE; - MLX5_SET_CFG(in, end_flow_index, ix - 1); - ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); - if (IS_ERR(ft->g[ft->num_groups])) - goto err; - ft->num_groups++; - - /* Any Group */ - memset(in, 0, inlen); - MLX5_SET_CFG(in, start_flow_index, ix); - ix += MLX5E_INNER_TTC_GROUP3_SIZE; - MLX5_SET_CFG(in, end_flow_index, ix - 1); - ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in); - if (IS_ERR(ft->g[ft->num_groups])) - goto err; - ft->num_groups++; - - kvfree(in); - return 0; - -err: - err = PTR_ERR(ft->g[ft->num_groups]); - ft->g[ft->num_groups] = NULL; - kvfree(in); - - return err; -} - -void mlx5e_set_ttc_basic_params(struct mlx5e_priv *priv, - struct ttc_params *ttc_params) -{ - ttc_params->any_tt_tirn = priv->direct_tir[0].tirn; - ttc_params->inner_ttc = &priv->fs.inner_ttc; -} - -void mlx5e_set_inner_ttc_ft_params(struct ttc_params *ttc_params) +static void mlx5e_set_inner_ttc_params(struct mlx5e_priv *priv, + struct ttc_params *ttc_params) { struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr; + int tt; - ft_attr->max_fte = MLX5E_INNER_TTC_TABLE_SIZE; + memset(ttc_params, 0, sizeof(*ttc_params)); + ttc_params->ns = mlx5_get_flow_namespace(priv->mdev, + MLX5_FLOW_NAMESPACE_KERNEL); ft_attr->level = MLX5E_INNER_TTC_FT_LEVEL; ft_attr->prio = MLX5E_NIC_PRIO; + + for (tt = 0; tt < MLX5_NUM_TT; tt++) { + ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR; + ttc_params->dests[tt].tir_num = + tt == MLX5_TT_ANY ? + mlx5e_rx_res_get_tirn_direct(priv->rx_res, 0) : + mlx5e_rx_res_get_tirn_rss_inner(priv->rx_res, + tt); + } } -void mlx5e_set_ttc_ft_params(struct ttc_params *ttc_params) +void mlx5e_set_ttc_params(struct mlx5e_priv *priv, + struct ttc_params *ttc_params, bool tunnel) { struct mlx5_flow_table_attr *ft_attr = &ttc_params->ft_attr; + int tt; - ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE; + memset(ttc_params, 0, sizeof(*ttc_params)); + ttc_params->ns = mlx5_get_flow_namespace(priv->mdev, + MLX5_FLOW_NAMESPACE_KERNEL); ft_attr->level = MLX5E_TTC_FT_LEVEL; ft_attr->prio = MLX5E_NIC_PRIO; -} - -int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params, - struct mlx5e_ttc_table *ttc) -{ - struct mlx5e_flow_table *ft = &ttc->ft; - int err; - if (!mlx5e_tunnel_inner_ft_supported(priv->mdev)) - return 0; - - ft->t = mlx5_create_flow_table(priv->fs.ns, ¶ms->ft_attr); - if (IS_ERR(ft->t)) { - err = PTR_ERR(ft->t); - ft->t = NULL; - return err; + for (tt = 0; tt < MLX5_NUM_TT; tt++) { + ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR; + ttc_params->dests[tt].tir_num = + tt == MLX5_TT_ANY ? + mlx5e_rx_res_get_tirn_direct(priv->rx_res, 0) : + mlx5e_rx_res_get_tirn_rss(priv->rx_res, tt); } - err = mlx5e_create_inner_ttc_table_groups(ttc); - if (err) - goto err; - - err = mlx5e_generate_inner_ttc_table_rules(priv, params, ttc); - if (err) - goto err; - - return 0; - -err: - mlx5e_destroy_flow_table(ft); - return err; -} - -void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv, - struct mlx5e_ttc_table *ttc) -{ - if (!mlx5e_tunnel_inner_ft_supported(priv->mdev)) + ttc_params->inner_ttc = tunnel; + if (!tunnel || !mlx5_tunnel_inner_ft_supported(priv->mdev)) return; - mlx5e_cleanup_ttc_rules(ttc); - mlx5e_destroy_flow_table(&ttc->ft); -} - -void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv, - struct mlx5e_ttc_table *ttc) -{ - mlx5e_cleanup_ttc_rules(ttc); - mlx5e_destroy_flow_table(&ttc->ft); -} - -int mlx5e_create_ttc_table(struct mlx5e_priv *priv, struct ttc_params *params, - struct mlx5e_ttc_table *ttc) -{ - bool match_ipv_outer = MLX5_CAP_FLOWTABLE_NIC_RX(priv->mdev, ft_field_support.outer_ip_version); - struct mlx5e_flow_table *ft = &ttc->ft; - int err; - - ft->t = mlx5_create_flow_table(priv->fs.ns, ¶ms->ft_attr); - if (IS_ERR(ft->t)) { - err = PTR_ERR(ft->t); - ft->t = NULL; - return err; + for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) { + ttc_params->tunnel_dests[tt].type = + MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; + ttc_params->tunnel_dests[tt].ft = + mlx5_get_ttc_flow_table(priv->fs.inner_ttc); } - - err = mlx5e_create_ttc_table_groups(ttc, match_ipv_outer); - if (err) - goto err; - - err = mlx5e_generate_ttc_table_rules(priv, params, ttc); - if (err) - goto err; - - return 0; -err: - mlx5e_destroy_flow_table(ft); - return err; -} - -int mlx5e_ttc_fwd_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type, - struct mlx5_flow_destination *new_dest) -{ - return mlx5_modify_rule_destination(priv->fs.ttc.rules[type].rule, new_dest, NULL); -} - -struct mlx5_flow_destination -mlx5e_ttc_get_default_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type) -{ - struct mlx5_flow_destination *dest = &priv->fs.ttc.rules[type].default_dest; - - WARN_ONCE(dest->type != MLX5_FLOW_DESTINATION_TYPE_TIR, - "TTC[%d] default dest is not setup yet", type); - - return *dest; -} - -int mlx5e_ttc_fwd_default_dest(struct mlx5e_priv *priv, enum mlx5e_traffic_types type) -{ - struct mlx5_flow_destination dest = mlx5e_ttc_get_default_dest(priv, type); - - return mlx5e_ttc_fwd_dest(priv, type, &dest); } static void mlx5e_del_l2_flow_rule(struct mlx5e_priv *priv, @@ -1473,7 +939,7 @@ static int mlx5e_add_l2_flow_rule(struct mlx5e_priv *priv, outer_headers.dmac_47_16); dest.type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; - dest.ft = priv->fs.ttc.ft.t; + dest.ft = mlx5_get_ttc_flow_table(priv->fs.ttc); switch (type) { case MLX5E_FULLMATCH: @@ -1769,10 +1235,47 @@ static void mlx5e_destroy_vlan_table(struct mlx5e_priv *priv) kvfree(priv->fs.vlan); } -int mlx5e_create_flow_steering(struct mlx5e_priv *priv) +static void mlx5e_destroy_inner_ttc_table(struct mlx5e_priv *priv) +{ + if (!mlx5_tunnel_inner_ft_supported(priv->mdev)) + return; + mlx5_destroy_ttc_table(priv->fs.inner_ttc); +} + +void mlx5e_destroy_ttc_table(struct mlx5e_priv *priv) +{ + mlx5_destroy_ttc_table(priv->fs.ttc); +} + +static int mlx5e_create_inner_ttc_table(struct mlx5e_priv *priv) { struct ttc_params ttc_params = {}; - int tt, err; + + if (!mlx5_tunnel_inner_ft_supported(priv->mdev)) + return 0; + + mlx5e_set_inner_ttc_params(priv, &ttc_params); + priv->fs.inner_ttc = mlx5_create_inner_ttc_table(priv->mdev, + &ttc_params); + if (IS_ERR(priv->fs.inner_ttc)) + return PTR_ERR(priv->fs.inner_ttc); + return 0; +} + +int mlx5e_create_ttc_table(struct mlx5e_priv *priv) +{ + struct ttc_params ttc_params = {}; + + mlx5e_set_ttc_params(priv, &ttc_params, true); + priv->fs.ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params); + if (IS_ERR(priv->fs.ttc)) + return PTR_ERR(priv->fs.ttc); + return 0; +} + +int mlx5e_create_flow_steering(struct mlx5e_priv *priv) +{ + int err; priv->fs.ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL); @@ -1787,23 +1290,15 @@ int mlx5e_create_flow_steering(struct mlx5e_priv *priv) priv->netdev->hw_features &= ~NETIF_F_NTUPLE; } - mlx5e_set_ttc_basic_params(priv, &ttc_params); - mlx5e_set_inner_ttc_ft_params(&ttc_params); - for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) - ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn; - - err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc); + err = mlx5e_create_inner_ttc_table(priv); if (err) { - netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n", + netdev_err(priv->netdev, + "Failed to create inner ttc table, err=%d\n", err); goto err_destroy_arfs_tables; } - mlx5e_set_ttc_ft_params(&ttc_params); - for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) - ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn; - - err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc); + err = mlx5e_create_ttc_table(priv); if (err) { netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", err); @@ -1837,9 +1332,9 @@ err_destory_vlan_table: err_destroy_l2_table: mlx5e_destroy_l2_table(priv); err_destroy_ttc_table: - mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); + mlx5e_destroy_ttc_table(priv); err_destroy_inner_ttc_table: - mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc); + mlx5e_destroy_inner_ttc_table(priv); err_destroy_arfs_tables: mlx5e_arfs_destroy_tables(priv); @@ -1851,8 +1346,8 @@ void mlx5e_destroy_flow_steering(struct mlx5e_priv *priv) mlx5e_ptp_free_rx_fs(priv); mlx5e_destroy_vlan_table(priv); mlx5e_destroy_l2_table(priv); - mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); - mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc); + mlx5e_destroy_ttc_table(priv); + mlx5e_destroy_inner_ttc_table(priv); mlx5e_arfs_destroy_tables(priv); mlx5e_ethtool_cleanup_steering(priv); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index b416a8ee2eed..03693fa74a70 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c @@ -35,11 +35,19 @@ #include "en/params.h" #include "en/xsk/pool.h" +static int flow_type_to_traffic_type(u32 flow_type); + +static u32 flow_type_mask(u32 flow_type) +{ + return flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS); +} + struct mlx5e_ethtool_rule { struct list_head list; struct ethtool_rx_flow_spec flow_spec; struct mlx5_flow_handle *rule; struct mlx5e_ethtool_table *eth_ft; + struct mlx5e_rss *rss; }; static void put_flow_table(struct mlx5e_ethtool_table *eth_ft) @@ -66,7 +74,7 @@ static struct mlx5e_ethtool_table *get_flow_table(struct mlx5e_priv *priv, int table_size; int prio; - switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + switch (flow_type_mask(fs->flow_type)) { case TCP_V4_FLOW: case UDP_V4_FLOW: case TCP_V6_FLOW: @@ -329,7 +337,7 @@ static int set_flow_attrs(u32 *match_c, u32 *match_v, outer_headers); void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v, outer_headers); - u32 flow_type = fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT); + u32 flow_type = flow_type_mask(fs->flow_type); switch (flow_type) { case TCP_V4_FLOW: @@ -397,10 +405,53 @@ static bool outer_header_zero(u32 *match_criteria) size - 1); } +static int flow_get_tirn(struct mlx5e_priv *priv, + struct mlx5e_ethtool_rule *eth_rule, + struct ethtool_rx_flow_spec *fs, + u32 rss_context, u32 *tirn) +{ + if (fs->flow_type & FLOW_RSS) { + struct mlx5e_lro_param lro_param; + struct mlx5e_rss *rss; + u32 flow_type; + int err; + int tt; + + rss = mlx5e_rx_res_rss_get(priv->rx_res, rss_context); + if (!rss) + return -ENOENT; + + flow_type = flow_type_mask(fs->flow_type); + tt = flow_type_to_traffic_type(flow_type); + if (tt < 0) + return -EINVAL; + + lro_param = mlx5e_get_lro_param(&priv->channels.params); + err = mlx5e_rss_obtain_tirn(rss, tt, &lro_param, false, tirn); + if (err) + return err; + eth_rule->rss = rss; + mlx5e_rss_refcnt_inc(eth_rule->rss); + } else { + struct mlx5e_params *params = &priv->channels.params; + enum mlx5e_rq_group group; + u16 ix; + + mlx5e_qid_get_ch_and_group(params, fs->ring_cookie, &ix, &group); + + *tirn = group == MLX5E_RQ_GROUP_XSK ? + mlx5e_rx_res_get_tirn_xsk(priv->rx_res, ix) : + mlx5e_rx_res_get_tirn_direct(priv->rx_res, ix); + } + + return 0; +} + static struct mlx5_flow_handle * add_ethtool_flow_rule(struct mlx5e_priv *priv, + struct mlx5e_ethtool_rule *eth_rule, struct mlx5_flow_table *ft, - struct ethtool_rx_flow_spec *fs) + struct ethtool_rx_flow_spec *fs, u32 rss_context) { struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND }; struct mlx5_flow_destination *dst = NULL; @@ -419,22 +470,17 @@ add_ethtool_flow_rule(struct mlx5e_priv *priv, if (fs->ring_cookie == RX_CLS_FLOW_DISC) { flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; } else { - struct mlx5e_params *params = &priv->channels.params; - enum mlx5e_rq_group group; - struct mlx5e_tir *tir; - u16 ix; - - mlx5e_qid_get_ch_and_group(params, fs->ring_cookie, &ix, &group); - tir = group == MLX5E_RQ_GROUP_XSK ? priv->xsk_tir : priv->direct_tir; - dst = kzalloc(sizeof(*dst), GFP_KERNEL); if (!dst) { err = -ENOMEM; goto free; } + err = flow_get_tirn(priv, eth_rule, fs, rss_context, &dst->tir_num); + if (err) + goto free; + dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR; - dst->tir_num = tir[ix].tirn; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; } @@ -458,6 +504,8 @@ static void del_ethtool_rule(struct mlx5e_priv *priv, { if (eth_rule->rule) mlx5_del_flow_rules(eth_rule->rule); + if (eth_rule->rss) + mlx5e_rss_refcnt_dec(eth_rule->rss); list_del(ð_rule->list); priv->fs.ethtool.tot_num_rules--; put_flow_table(eth_rule->eth_ft); @@ -618,7 +666,7 @@ static int validate_flow(struct mlx5e_priv *priv, fs->ring_cookie)) return -EINVAL; - switch (fs->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT)) { + switch (flow_type_mask(fs->flow_type)) { case ETHER_FLOW: num_tuples += validate_ethter(fs); break; @@ -667,7 +715,7 @@ static int validate_flow(struct mlx5e_priv *priv, static int mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv, - struct ethtool_rx_flow_spec *fs) + struct ethtool_rx_flow_spec *fs, u32 rss_context) { struct mlx5e_ethtool_table *eth_ft; struct mlx5e_ethtool_rule *eth_rule; @@ -698,7 +746,7 @@ mlx5e_ethtool_flow_replace(struct mlx5e_priv *priv, err = -EINVAL; goto del_ethtool_rule; } - rule = add_ethtool_flow_rule(priv, eth_ft->ft, fs); + rule = add_ethtool_flow_rule(priv, eth_rule, eth_ft->ft, fs, rss_context); if (IS_ERR(rule)) { err = PTR_ERR(rule); goto del_ethtool_rule; @@ -744,10 +792,20 @@ mlx5e_ethtool_get_flow(struct mlx5e_priv *priv, return -EINVAL; list_for_each_entry(eth_rule, &priv->fs.ethtool.rules, list) { - if (eth_rule->flow_spec.location == location) { - info->fs = eth_rule->flow_spec; + int index; + + if (eth_rule->flow_spec.location != location) + continue; + if (!info) return 0; - } + info->fs = eth_rule->flow_spec; + if (!eth_rule->rss) + return 0; + index = mlx5e_rx_res_rss_index(priv->rx_res, eth_rule->rss); + if (index < 0) + return index; + info->rss_context = index; + return 0; } return -ENOENT; @@ -763,7 +821,7 @@ mlx5e_ethtool_get_all_flows(struct mlx5e_priv *priv, info->data = MAX_NUM_OF_ETHTOOL_RULES; while ((!err || err == -ENOENT) && idx < info->rule_cnt) { - err = mlx5e_ethtool_get_flow(priv, info, location); + err = mlx5e_ethtool_get_flow(priv, NULL, location); if (!err) rule_locs[idx++] = location; location++; @@ -785,45 +843,44 @@ void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv) INIT_LIST_HEAD(&priv->fs.ethtool.rules); } -static enum mlx5e_traffic_types flow_type_to_traffic_type(u32 flow_type) +static int flow_type_to_traffic_type(u32 flow_type) { switch (flow_type) { case TCP_V4_FLOW: - return MLX5E_TT_IPV4_TCP; + return MLX5_TT_IPV4_TCP; case TCP_V6_FLOW: - return MLX5E_TT_IPV6_TCP; + return MLX5_TT_IPV6_TCP; case UDP_V4_FLOW: - return MLX5E_TT_IPV4_UDP; + return MLX5_TT_IPV4_UDP; case UDP_V6_FLOW: - return MLX5E_TT_IPV6_UDP; + return MLX5_TT_IPV6_UDP; case AH_V4_FLOW: - return MLX5E_TT_IPV4_IPSEC_AH; + return MLX5_TT_IPV4_IPSEC_AH; case AH_V6_FLOW: - return MLX5E_TT_IPV6_IPSEC_AH; + return MLX5_TT_IPV6_IPSEC_AH; case ESP_V4_FLOW: - return MLX5E_TT_IPV4_IPSEC_ESP; + return MLX5_TT_IPV4_IPSEC_ESP; case ESP_V6_FLOW: - return MLX5E_TT_IPV6_IPSEC_ESP; + return MLX5_TT_IPV6_IPSEC_ESP; case IPV4_FLOW: - return MLX5E_TT_IPV4; + return MLX5_TT_IPV4; case IPV6_FLOW: - return MLX5E_TT_IPV6; + return MLX5_TT_IPV6; default: - return MLX5E_NUM_INDIR_TIRS; + return -EINVAL; } } static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv, struct ethtool_rxnfc *nfc) { - int inlen = MLX5_ST_SZ_BYTES(modify_tir_in); - enum mlx5e_traffic_types tt; u8 rx_hash_field = 0; - void *in; + int err; + int tt; tt = flow_type_to_traffic_type(nfc->flow_type); - if (tt == MLX5E_NUM_INDIR_TIRS) - return -EINVAL; + if (tt < 0) + return tt; /* RSS does not support anything other than hashing to queues * on src IP, dest IP, TCP/UDP src port and TCP/UDP dest @@ -848,35 +905,24 @@ static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv, if (nfc->data & RXH_L4_B_2_3) rx_hash_field |= MLX5_HASH_FIELD_SEL_L4_DPORT; - in = kvzalloc(inlen, GFP_KERNEL); - if (!in) - return -ENOMEM; - mutex_lock(&priv->state_lock); - - if (rx_hash_field == priv->rss_params.rx_hash_fields[tt]) - goto out; - - priv->rss_params.rx_hash_fields[tt] = rx_hash_field; - mlx5e_modify_tirs_hash(priv, in); - -out: + err = mlx5e_rx_res_rss_set_hash_fields(priv->rx_res, tt, rx_hash_field); mutex_unlock(&priv->state_lock); - kvfree(in); - return 0; + + return err; } static int mlx5e_get_rss_hash_opt(struct mlx5e_priv *priv, struct ethtool_rxnfc *nfc) { - enum mlx5e_traffic_types tt; u32 hash_field = 0; + int tt; tt = flow_type_to_traffic_type(nfc->flow_type); - if (tt == MLX5E_NUM_INDIR_TIRS) - return -EINVAL; + if (tt < 0) + return tt; - hash_field = priv->rss_params.rx_hash_fields[tt]; + hash_field = mlx5e_rx_res_rss_get_hash_fields(priv->rx_res, tt); nfc->data = 0; if (hash_field & MLX5_HASH_FIELD_SEL_SRC_IP) @@ -898,7 +944,7 @@ int mlx5e_ethtool_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) switch (cmd->cmd) { case ETHTOOL_SRXCLSRLINS: - err = mlx5e_ethtool_flow_replace(priv, &cmd->fs); + err = mlx5e_ethtool_flow_replace(priv, &cmd->fs, cmd->rss_context); break; case ETHTOOL_SRXCLSRLDEL: err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 24f919ef9b8e..47efd858964d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -1619,7 +1619,7 @@ static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param) (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas)); MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode); - MLX5_SET(cqc, cqc, c_eqn, eqn); + MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn); MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index); MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); @@ -1711,7 +1711,7 @@ static int mlx5e_open_sqs(struct mlx5e_channel *c, { int err, tc; - for (tc = 0; tc < params->num_tc; tc++) { + for (tc = 0; tc < mlx5e_get_dcb_num_tc(params); tc++) { int txq_ix = c->ix + tc * params->num_channels; err = mlx5e_open_txqsq(c, c->priv->tisn[c->lag_port][tc], txq_ix, @@ -1992,7 +1992,7 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, c->pdev = mlx5_core_dma_dev(priv->mdev); c->netdev = priv->netdev; c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.hw_objs.mkey.key); - c->num_tc = params->num_tc; + c->num_tc = mlx5e_get_dcb_num_tc(params); c->xdp = !!params->xdp_prog; c->stats = &priv->channel_stats[ix].ch; c->aff_mask = irq_get_effective_affinity_mask(irq); @@ -2185,400 +2185,14 @@ void mlx5e_close_channels(struct mlx5e_channels *chs) chs->num = 0; } -static int -mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, struct mlx5e_rqt *rqt) -{ - struct mlx5_core_dev *mdev = priv->mdev; - void *rqtc; - int inlen; - int err; - u32 *in; - int i; - - inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; - in = kvzalloc(inlen, GFP_KERNEL); - if (!in) - return -ENOMEM; - - rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); - - MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); - MLX5_SET(rqtc, rqtc, rqt_max_size, sz); - - for (i = 0; i < sz; i++) - MLX5_SET(rqtc, rqtc, rq_num[i], priv->drop_rq.rqn); - - err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn); - if (!err) - rqt->enabled = true; - - kvfree(in); - return err; -} - -void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt) -{ - rqt->enabled = false; - mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn); -} - -int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv) -{ - struct mlx5e_rqt *rqt = &priv->indir_rqt; - int err; - - err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, rqt); - if (err) - mlx5_core_warn(priv->mdev, "create indirect rqts failed, %d\n", err); - return err; -} - -int mlx5e_create_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n) -{ - int err; - int ix; - - for (ix = 0; ix < n; ix++) { - err = mlx5e_create_rqt(priv, 1 /*size */, &tirs[ix].rqt); - if (unlikely(err)) - goto err_destroy_rqts; - } - - return 0; - -err_destroy_rqts: - mlx5_core_warn(priv->mdev, "create rqts failed, %d\n", err); - for (ix--; ix >= 0; ix--) - mlx5e_destroy_rqt(priv, &tirs[ix].rqt); - - return err; -} - -void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n) -{ - int i; - - for (i = 0; i < n; i++) - mlx5e_destroy_rqt(priv, &tirs[i].rqt); -} - -static int mlx5e_rx_hash_fn(int hfunc) -{ - return (hfunc == ETH_RSS_HASH_TOP) ? - MLX5_RX_HASH_FN_TOEPLITZ : - MLX5_RX_HASH_FN_INVERTED_XOR8; -} - -int mlx5e_bits_invert(unsigned long a, int size) -{ - int inv = 0; - int i; - - for (i = 0; i < size; i++) - inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i; - - return inv; -} - -static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, int sz, - struct mlx5e_redirect_rqt_param rrp, void *rqtc) -{ - int i; - - for (i = 0; i < sz; i++) { - u32 rqn; - - if (rrp.is_rss) { - int ix = i; - - if (rrp.rss.hfunc == ETH_RSS_HASH_XOR) - ix = mlx5e_bits_invert(i, ilog2(sz)); - - ix = priv->rss_params.indirection_rqt[ix]; - rqn = rrp.rss.channels->c[ix]->rq.rqn; - } else { - rqn = rrp.rqn; - } - MLX5_SET(rqtc, rqtc, rq_num[i], rqn); - } -} - -int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, - struct mlx5e_redirect_rqt_param rrp) -{ - struct mlx5_core_dev *mdev = priv->mdev; - void *rqtc; - int inlen; - u32 *in; - int err; - - inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz; - in = kvzalloc(inlen, GFP_KERNEL); - if (!in) - return -ENOMEM; - - rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx); - - MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); - MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1); - mlx5e_fill_rqt_rqns(priv, sz, rrp, rqtc); - err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen); - - kvfree(in); - return err; -} - -static u32 mlx5e_get_direct_rqn(struct mlx5e_priv *priv, int ix, - struct mlx5e_redirect_rqt_param rrp) -{ - if (!rrp.is_rss) - return rrp.rqn; - - if (ix >= rrp.rss.channels->num) - return priv->drop_rq.rqn; - - return rrp.rss.channels->c[ix]->rq.rqn; -} - -static void mlx5e_redirect_rqts(struct mlx5e_priv *priv, - struct mlx5e_redirect_rqt_param rrp, - struct mlx5e_redirect_rqt_param *ptp_rrp) -{ - u32 rqtn; - int ix; - - if (priv->indir_rqt.enabled) { - /* RSS RQ table */ - rqtn = priv->indir_rqt.rqtn; - mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp); - } - - for (ix = 0; ix < priv->max_nch; ix++) { - struct mlx5e_redirect_rqt_param direct_rrp = { - .is_rss = false, - { - .rqn = mlx5e_get_direct_rqn(priv, ix, rrp) - }, - }; - - /* Direct RQ Tables */ - if (!priv->direct_tir[ix].rqt.enabled) - continue; - - rqtn = priv->direct_tir[ix].rqt.rqtn; - mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp); - } - if (ptp_rrp) { - rqtn = priv->ptp_tir.rqt.rqtn; - mlx5e_redirect_rqt(priv, rqtn, 1, *ptp_rrp); - } -} - -static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv, - struct mlx5e_channels *chs) -{ - bool rx_ptp_support = priv->profile->rx_ptp_support; - struct mlx5e_redirect_rqt_param *ptp_rrp_p = NULL; - struct mlx5e_redirect_rqt_param rrp = { - .is_rss = true, - { - .rss = { - .channels = chs, - .hfunc = priv->rss_params.hfunc, - } - }, - }; - struct mlx5e_redirect_rqt_param ptp_rrp; - - if (rx_ptp_support) { - u32 ptp_rqn; - - ptp_rrp.is_rss = false; - ptp_rrp.rqn = mlx5e_ptp_get_rqn(priv->channels.ptp, &ptp_rqn) ? - priv->drop_rq.rqn : ptp_rqn; - ptp_rrp_p = &ptp_rrp; - } - mlx5e_redirect_rqts(priv, rrp, ptp_rrp_p); -} - -static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv) -{ - bool rx_ptp_support = priv->profile->rx_ptp_support; - struct mlx5e_redirect_rqt_param drop_rrp = { - .is_rss = false, - { - .rqn = priv->drop_rq.rqn, - }, - }; - - mlx5e_redirect_rqts(priv, drop_rrp, rx_ptp_support ? &drop_rrp : NULL); -} - -static const struct mlx5e_tirc_config tirc_default_config[MLX5E_NUM_INDIR_TIRS] = { - [MLX5E_TT_IPV4_TCP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, - .l4_prot_type = MLX5_L4_PROT_TYPE_TCP, - .rx_hash_fields = MLX5_HASH_IP_L4PORTS, - }, - [MLX5E_TT_IPV6_TCP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, - .l4_prot_type = MLX5_L4_PROT_TYPE_TCP, - .rx_hash_fields = MLX5_HASH_IP_L4PORTS, - }, - [MLX5E_TT_IPV4_UDP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, - .l4_prot_type = MLX5_L4_PROT_TYPE_UDP, - .rx_hash_fields = MLX5_HASH_IP_L4PORTS, - }, - [MLX5E_TT_IPV6_UDP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, - .l4_prot_type = MLX5_L4_PROT_TYPE_UDP, - .rx_hash_fields = MLX5_HASH_IP_L4PORTS, - }, - [MLX5E_TT_IPV4_IPSEC_AH] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, - .l4_prot_type = 0, - .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI, - }, - [MLX5E_TT_IPV6_IPSEC_AH] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, - .l4_prot_type = 0, - .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI, - }, - [MLX5E_TT_IPV4_IPSEC_ESP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, - .l4_prot_type = 0, - .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI, - }, - [MLX5E_TT_IPV6_IPSEC_ESP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, - .l4_prot_type = 0, - .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI, - }, - [MLX5E_TT_IPV4] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, - .l4_prot_type = 0, - .rx_hash_fields = MLX5_HASH_IP, - }, - [MLX5E_TT_IPV6] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, - .l4_prot_type = 0, - .rx_hash_fields = MLX5_HASH_IP, - }, -}; - -struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt) -{ - return tirc_default_config[tt]; -} - -static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc) -{ - if (!params->lro_en) - return; - -#define ROUGH_MAX_L2_L3_HDR_SZ 256 - - MLX5_SET(tirc, tirc, lro_enable_mask, - MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO | - MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO); - MLX5_SET(tirc, tirc, lro_max_ip_payload_size, - (MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ - ROUGH_MAX_L2_L3_HDR_SZ) >> 8); - MLX5_SET(tirc, tirc, lro_timeout_period_usecs, params->lro_timeout); -} - -void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params, - const struct mlx5e_tirc_config *ttconfig, - void *tirc, bool inner) -{ - void *hfso = inner ? MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner) : - MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); - - MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_rx_hash_fn(rss_params->hfunc)); - if (rss_params->hfunc == ETH_RSS_HASH_TOP) { - void *rss_key = MLX5_ADDR_OF(tirc, tirc, - rx_hash_toeplitz_key); - size_t len = MLX5_FLD_SZ_BYTES(tirc, - rx_hash_toeplitz_key); - - MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); - memcpy(rss_key, rss_params->toeplitz_hash_key, len); - } - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, - ttconfig->l3_prot_type); - MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, - ttconfig->l4_prot_type); - MLX5_SET(rx_hash_field_select, hfso, selected_fields, - ttconfig->rx_hash_fields); -} - -static void mlx5e_update_rx_hash_fields(struct mlx5e_tirc_config *ttconfig, - enum mlx5e_traffic_types tt, - u32 rx_hash_fields) -{ - *ttconfig = tirc_default_config[tt]; - ttconfig->rx_hash_fields = rx_hash_fields; -} - -void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in) -{ - void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx); - struct mlx5e_rss_params *rss = &priv->rss_params; - struct mlx5_core_dev *mdev = priv->mdev; - int ctxlen = MLX5_ST_SZ_BYTES(tirc); - struct mlx5e_tirc_config ttconfig; - int tt; - - MLX5_SET(modify_tir_in, in, bitmask.hash, 1); - - for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { - memset(tirc, 0, ctxlen); - mlx5e_update_rx_hash_fields(&ttconfig, tt, - rss->rx_hash_fields[tt]); - mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, false); - mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in); - } - - /* Verify inner tirs resources allocated */ - if (!priv->inner_indir_tir[0].tirn) - return; - - for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { - memset(tirc, 0, ctxlen); - mlx5e_update_rx_hash_fields(&ttconfig, tt, - rss->rx_hash_fields[tt]); - mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, true); - mlx5_core_modify_tir(mdev, priv->inner_indir_tir[tt].tirn, in); - } -} - static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv) { - struct mlx5_core_dev *mdev = priv->mdev; - - void *in; - void *tirc; - int inlen; - int err; - int tt; - int ix; - - inlen = MLX5_ST_SZ_BYTES(modify_tir_in); - in = kvzalloc(inlen, GFP_KERNEL); - if (!in) - return -ENOMEM; - - MLX5_SET(modify_tir_in, in, bitmask.lro, 1); - tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx); + struct mlx5e_rx_res *res = priv->rx_res; + struct mlx5e_lro_param lro_param; - mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc); + lro_param = mlx5e_get_lro_param(&priv->channels.params); - for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { - err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in); - if (err) - goto free_in; - } - - for (ix = 0; ix < priv->max_nch; ix++) { - err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn, in); - if (err) - goto free_in; - } - -free_in: - kvfree(in); - - return err; + return mlx5e_rx_res_lro_set_param(res, &lro_param); } static MLX5E_DEFINE_PREACTIVATE_WRAPPER_CTX(mlx5e_modify_tirs_lro); @@ -2649,22 +2263,34 @@ void mlx5e_set_netdev_mtu_boundaries(struct mlx5e_priv *priv) ETH_MAX_MTU); } -static void mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc) +static int mlx5e_netdev_set_tcs(struct net_device *netdev, u16 nch, u8 ntc, + struct tc_mqprio_qopt_offload *mqprio) { - int tc; + int tc, err; netdev_reset_tc(netdev); if (ntc == 1) - return; + return 0; - netdev_set_num_tc(netdev, ntc); + err = netdev_set_num_tc(netdev, ntc); + if (err) { + netdev_WARN(netdev, "netdev_set_num_tc failed (%d), ntc = %d\n", err, ntc); + return err; + } - /* Map netdev TCs to offset 0 - * We have our own UP to TXQ mapping for QoS - */ - for (tc = 0; tc < ntc; tc++) - netdev_set_tc_queue(netdev, tc, nch, 0); + for (tc = 0; tc < ntc; tc++) { + u16 count, offset; + + /* For DCB mode, map netdev TCs to offset 0 + * We have our own UP to TXQ mapping for QoS + */ + count = mqprio ? mqprio->qopt.count[tc] : nch; + offset = mqprio ? mqprio->qopt.offset[tc] : 0; + netdev_set_tc_queue(netdev, tc, count, offset); + } + + return 0; } int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv) @@ -2674,7 +2300,7 @@ int mlx5e_update_tx_netdev_queues(struct mlx5e_priv *priv) qos_queues = mlx5e_qos_cur_leaf_nodes(priv); nch = priv->channels.params.num_channels; - ntc = priv->channels.params.num_tc; + ntc = mlx5e_get_dcb_num_tc(&priv->channels.params); num_txqs = nch * ntc + qos_queues; if (MLX5E_GET_PFLAG(&priv->channels.params, MLX5E_PFLAG_TX_PORT_TS)) num_txqs += ntc; @@ -2698,11 +2324,12 @@ static int mlx5e_update_netdev_queues(struct mlx5e_priv *priv) old_ntc = netdev->num_tc ? : 1; nch = priv->channels.params.num_channels; - ntc = priv->channels.params.num_tc; + ntc = mlx5e_get_dcb_num_tc(&priv->channels.params); num_rxqs = nch * priv->profile->rq_groups; - mlx5e_netdev_set_tcs(netdev, nch, ntc); - + err = mlx5e_netdev_set_tcs(netdev, nch, ntc, NULL); + if (err) + goto err_out; err = mlx5e_update_tx_netdev_queues(priv); if (err) goto err_tcs; @@ -2723,7 +2350,8 @@ err_txqs: WARN_ON_ONCE(netif_set_real_num_tx_queues(netdev, old_num_txqs)); err_tcs: - mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc); + mlx5e_netdev_set_tcs(netdev, old_num_txqs / old_ntc, old_ntc, NULL); +err_out: return err; } @@ -2759,9 +2387,9 @@ int mlx5e_num_channels_changed(struct mlx5e_priv *priv) mlx5e_set_default_xps_cpumasks(priv, &priv->channels.params); - if (!netif_is_rxfh_configured(priv->netdev)) - mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt, - MLX5E_INDIR_RQT_SIZE, count); + /* This function may be called on attach, before priv->rx_res is created. */ + if (!netif_is_rxfh_configured(priv->netdev) && priv->rx_res) + mlx5e_rx_res_rss_set_indir_uniform(priv->rx_res, count); return 0; } @@ -2773,7 +2401,7 @@ static void mlx5e_build_txq_maps(struct mlx5e_priv *priv) int i, ch, tc, num_tc; ch = priv->channels.num; - num_tc = priv->channels.params.num_tc; + num_tc = mlx5e_get_dcb_num_tc(&priv->channels.params); for (i = 0; i < ch; i++) { for (tc = 0; tc < num_tc; tc++) { @@ -2804,7 +2432,7 @@ static void mlx5e_update_num_tc_x_num_ch(struct mlx5e_priv *priv) { /* Sync with mlx5e_select_queue. */ WRITE_ONCE(priv->num_tc_x_num_ch, - priv->channels.params.num_tc * priv->channels.num); + mlx5e_get_dcb_num_tc(&priv->channels.params) * priv->channels.num); } void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) @@ -2820,16 +2448,15 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) mlx5e_add_sqs_fwd_rules(priv); mlx5e_wait_channels_min_rx_wqes(&priv->channels); - mlx5e_redirect_rqts_to_channels(priv, &priv->channels); - mlx5e_xsk_redirect_rqts_to_channels(priv, &priv->channels); + if (priv->rx_res) + mlx5e_rx_res_channels_activate(priv->rx_res, &priv->channels); } void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) { - mlx5e_xsk_redirect_rqts_to_drop(priv, &priv->channels); - - mlx5e_redirect_rqts_to_drop(priv); + if (priv->rx_res) + mlx5e_rx_res_channels_deactivate(priv->rx_res); if (mlx5e_is_vport_rep(priv)) mlx5e_remove_sqs_fwd_rules(priv); @@ -3204,224 +2831,152 @@ static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv) mlx5e_destroy_tises(priv); } -static void mlx5e_build_indir_tir_ctx_common(struct mlx5e_priv *priv, - u32 rqtn, u32 *tirc) -{ - MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.hw_objs.td.tdn); - MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); - MLX5_SET(tirc, tirc, indirect_table, rqtn); - MLX5_SET(tirc, tirc, tunneled_offload_en, - priv->channels.params.tunneled_offload_en); - - mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc); -} - -static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, - enum mlx5e_traffic_types tt, - u32 *tirc) +static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable) { - mlx5e_build_indir_tir_ctx_common(priv, priv->indir_rqt.rqtn, tirc); - mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, - &tirc_default_config[tt], tirc, false); -} + int err = 0; + int i; -static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc) -{ - mlx5e_build_indir_tir_ctx_common(priv, rqtn, tirc); - MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8); -} + for (i = 0; i < chs->num; i++) { + err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable); + if (err) + return err; + } -static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv, - enum mlx5e_traffic_types tt, - u32 *tirc) -{ - mlx5e_build_indir_tir_ctx_common(priv, priv->indir_rqt.rqtn, tirc); - mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, - &tirc_default_config[tt], tirc, true); + return 0; } -int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv, bool inner_ttc) +static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd) { - struct mlx5e_tir *tir; - void *tirc; - int inlen; - int i = 0; int err; - u32 *in; - int tt; - - inlen = MLX5_ST_SZ_BYTES(create_tir_in); - in = kvzalloc(inlen, GFP_KERNEL); - if (!in) - return -ENOMEM; - - for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { - memset(in, 0, inlen); - tir = &priv->indir_tir[tt]; - tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); - mlx5e_build_indir_tir_ctx(priv, tt, tirc); - err = mlx5e_create_tir(priv->mdev, tir, in); - if (err) { - mlx5_core_warn(priv->mdev, "create indirect tirs failed, %d\n", err); - goto err_destroy_inner_tirs; - } - } - - if (!inner_ttc || !mlx5e_tunnel_inner_ft_supported(priv->mdev)) - goto out; + int i; - for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) { - memset(in, 0, inlen); - tir = &priv->inner_indir_tir[i]; - tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); - mlx5e_build_inner_indir_tir_ctx(priv, i, tirc); - err = mlx5e_create_tir(priv->mdev, tir, in); - if (err) { - mlx5_core_warn(priv->mdev, "create inner indirect tirs failed, %d\n", err); - goto err_destroy_inner_tirs; - } + for (i = 0; i < chs->num; i++) { + err = mlx5e_modify_rq_vsd(&chs->c[i]->rq, vsd); + if (err) + return err; } - -out: - kvfree(in); + if (chs->ptp && test_bit(MLX5E_PTP_STATE_RX, chs->ptp->state)) + return mlx5e_modify_rq_vsd(&chs->ptp->rq, vsd); return 0; - -err_destroy_inner_tirs: - for (i--; i >= 0; i--) - mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]); - - for (tt--; tt >= 0; tt--) - mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]); - - kvfree(in); - - return err; } -int mlx5e_create_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n) +static int mlx5e_setup_tc_mqprio_dcb(struct mlx5e_priv *priv, + struct tc_mqprio_qopt *mqprio) { - struct mlx5e_tir *tir; - void *tirc; - int inlen; - int err = 0; - u32 *in; - int ix; - - inlen = MLX5_ST_SZ_BYTES(create_tir_in); - in = kvzalloc(inlen, GFP_KERNEL); - if (!in) - return -ENOMEM; + struct mlx5e_params new_params; + u8 tc = mqprio->num_tc; + int err; - for (ix = 0; ix < n; ix++) { - memset(in, 0, inlen); - tir = &tirs[ix]; - tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); - mlx5e_build_direct_tir_ctx(priv, tir->rqt.rqtn, tirc); - err = mlx5e_create_tir(priv->mdev, tir, in); - if (unlikely(err)) - goto err_destroy_ch_tirs; - } + mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; - goto out; + if (tc && tc != MLX5E_MAX_NUM_TC) + return -EINVAL; -err_destroy_ch_tirs: - mlx5_core_warn(priv->mdev, "create tirs failed, %d\n", err); - for (ix--; ix >= 0; ix--) - mlx5e_destroy_tir(priv->mdev, &tirs[ix]); + new_params = priv->channels.params; + new_params.mqprio.mode = TC_MQPRIO_MODE_DCB; + new_params.mqprio.num_tc = tc ? tc : 1; -out: - kvfree(in); + err = mlx5e_safe_switch_params(priv, &new_params, + mlx5e_num_channels_changed_ctx, NULL, true); + priv->max_opened_tc = max_t(u8, priv->max_opened_tc, + mlx5e_get_dcb_num_tc(&priv->channels.params)); return err; } -void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv) +static int mlx5e_mqprio_channel_validate(struct mlx5e_priv *priv, + struct tc_mqprio_qopt_offload *mqprio) { + struct net_device *netdev = priv->netdev; + int agg_count = 0; int i; - for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) - mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]); - - /* Verify inner tirs resources allocated */ - if (!priv->inner_indir_tir[0].tirn) - return; - - for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) - mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]); -} - -void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv, struct mlx5e_tir *tirs, int n) -{ - int i; + if (mqprio->qopt.offset[0] != 0 || mqprio->qopt.num_tc < 1 || + mqprio->qopt.num_tc > MLX5E_MAX_NUM_MQPRIO_CH_TC) + return -EINVAL; - for (i = 0; i < n; i++) - mlx5e_destroy_tir(priv->mdev, &tirs[i]); -} + for (i = 0; i < mqprio->qopt.num_tc; i++) { + if (!mqprio->qopt.count[i]) { + netdev_err(netdev, "Zero size for queue-group (%d) is not supported\n", i); + return -EINVAL; + } + if (mqprio->min_rate[i]) { + netdev_err(netdev, "Min tx rate is not supported\n"); + return -EINVAL; + } + if (mqprio->max_rate[i]) { + netdev_err(netdev, "Max tx rate is not supported\n"); + return -EINVAL; + } -static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable) -{ - int err = 0; - int i; + if (mqprio->qopt.offset[i] != agg_count) { + netdev_err(netdev, "Discontinuous queues config is not supported\n"); + return -EINVAL; + } + agg_count += mqprio->qopt.count[i]; + } - for (i = 0; i < chs->num; i++) { - err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable); - if (err) - return err; + if (priv->channels.params.num_channels < agg_count) { + netdev_err(netdev, "Num of queues (%d) exceeds available (%d)\n", + agg_count, priv->channels.params.num_channels); + return -EINVAL; } return 0; } -static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd) +static int mlx5e_mqprio_channel_set_tcs_ctx(struct mlx5e_priv *priv, void *ctx) { - int err; - int i; + struct tc_mqprio_qopt_offload *mqprio = (struct tc_mqprio_qopt_offload *)ctx; + struct net_device *netdev = priv->netdev; + u8 num_tc; - for (i = 0; i < chs->num; i++) { - err = mlx5e_modify_rq_vsd(&chs->c[i]->rq, vsd); - if (err) - return err; - } - if (chs->ptp && test_bit(MLX5E_PTP_STATE_RX, chs->ptp->state)) - return mlx5e_modify_rq_vsd(&chs->ptp->rq, vsd); + if (priv->channels.params.mqprio.mode != TC_MQPRIO_MODE_CHANNEL) + return -EINVAL; + + num_tc = priv->channels.params.mqprio.num_tc; + mlx5e_netdev_set_tcs(netdev, 0, num_tc, mqprio); return 0; } -static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv, - struct tc_mqprio_qopt *mqprio) +static int mlx5e_setup_tc_mqprio_channel(struct mlx5e_priv *priv, + struct tc_mqprio_qopt_offload *mqprio) { struct mlx5e_params new_params; - u8 tc = mqprio->num_tc; - int err = 0; + int err; - mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; + err = mlx5e_mqprio_channel_validate(priv, mqprio); + if (err) + return err; - if (tc && tc != MLX5E_MAX_NUM_TC) - return -EINVAL; + new_params = priv->channels.params; + new_params.mqprio.mode = TC_MQPRIO_MODE_CHANNEL; + new_params.mqprio.num_tc = mqprio->qopt.num_tc; + err = mlx5e_safe_switch_params(priv, &new_params, + mlx5e_mqprio_channel_set_tcs_ctx, mqprio, true); - mutex_lock(&priv->state_lock); + return err; +} +static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv, + struct tc_mqprio_qopt_offload *mqprio) +{ /* MQPRIO is another toplevel qdisc that can't be attached * simultaneously with the offloaded HTB. */ - if (WARN_ON(priv->htb.maj_id)) { - err = -EINVAL; - goto out; - } - - new_params = priv->channels.params; - new_params.num_tc = tc ? tc : 1; - - err = mlx5e_safe_switch_params(priv, &new_params, - mlx5e_num_channels_changed_ctx, NULL, true); + if (WARN_ON(priv->htb.maj_id)) + return -EINVAL; -out: - priv->max_opened_tc = max_t(u8, priv->max_opened_tc, - priv->channels.params.num_tc); - mutex_unlock(&priv->state_lock); - return err; + switch (mqprio->mode) { + case TC_MQPRIO_MODE_DCB: + return mlx5e_setup_tc_mqprio_dcb(priv, &mqprio->qopt); + case TC_MQPRIO_MODE_CHANNEL: + return mlx5e_setup_tc_mqprio_channel(priv, mqprio); + default: + return -EOPNOTSUPP; + } } static int mlx5e_setup_tc_htb(struct mlx5e_priv *priv, struct tc_htb_qopt_offload *htb) @@ -3445,8 +3000,7 @@ static int mlx5e_setup_tc_htb(struct mlx5e_priv *priv, struct tc_htb_qopt_offloa return mlx5e_htb_leaf_to_inner(priv, htb->parent_classid, htb->classid, htb->rate, htb->ceil, htb->extack); case TC_HTB_LEAF_DEL: - return mlx5e_htb_leaf_del(priv, htb->classid, &htb->moved_qid, &htb->qid, - htb->extack); + return mlx5e_htb_leaf_del(priv, &htb->classid, htb->extack); case TC_HTB_LEAF_DEL_LAST: case TC_HTB_LEAF_DEL_LAST_FORCE: return mlx5e_htb_leaf_del_last(priv, htb->classid, @@ -3493,7 +3047,10 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, priv, priv, true); } case TC_SETUP_QDISC_MQPRIO: - return mlx5e_setup_tc_mqprio(priv, type_data); + mutex_lock(&priv->state_lock); + err = mlx5e_setup_tc_mqprio(priv, type_data); + mutex_unlock(&priv->state_lock); + return err; case TC_SETUP_QDISC_HTB: mutex_lock(&priv->state_lock); err = mlx5e_setup_tc_htb(priv, type_data); @@ -4582,7 +4139,7 @@ const struct net_device_ops mlx5e_netdev_ops = { .ndo_set_features = mlx5e_set_features, .ndo_fix_features = mlx5e_fix_features, .ndo_change_mtu = mlx5e_change_nic_mtu, - .ndo_do_ioctl = mlx5e_ioctl, + .ndo_eth_ioctl = mlx5e_ioctl, .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate, .ndo_features_check = mlx5e_features_check, .ndo_tx_timeout = mlx5e_tx_timeout, @@ -4611,15 +4168,6 @@ const struct net_device_ops mlx5e_netdev_ops = { .ndo_get_devlink_port = mlx5e_get_devlink_port, }; -void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len, - int num_channels) -{ - int i; - - for (i = 0; i < len; i++) - indirection_rqt[i] = i % num_channels; -} - static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout) { int i; @@ -4632,24 +4180,8 @@ static u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeo return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]); } -void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params, - u16 num_channels) -{ - enum mlx5e_traffic_types tt; - - rss_params->hfunc = ETH_RSS_HASH_TOP; - netdev_rss_key_fill(rss_params->toeplitz_hash_key, - sizeof(rss_params->toeplitz_hash_key)); - mlx5e_build_default_indir_rqt(rss_params->indirection_rqt, - MLX5E_INDIR_RQT_SIZE, num_channels); - for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) - rss_params->rx_hash_fields[tt] = - tirc_default_config[tt].rx_hash_fields; -} - void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 mtu) { - struct mlx5e_rss_params *rss_params = &priv->rss_params; struct mlx5e_params *params = &priv->channels.params; struct mlx5_core_dev *mdev = priv->mdev; u8 rx_cq_period_mode; @@ -4660,12 +4192,12 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 params->hard_mtu = MLX5E_ETH_HARD_MTU; params->num_channels = min_t(unsigned int, MLX5E_MAX_NUM_CHANNELS / 2, priv->max_nch); - params->num_tc = 1; + params->mqprio.num_tc = 1; /* Set an initial non-zero value, so that mlx5e_select_queue won't * divide by zero if called before first activating channels. */ - priv->num_tc_x_num_ch = params->num_channels * params->num_tc; + priv->num_tc_x_num_ch = params->num_channels * params->mqprio.num_tc; /* SQ */ params->log_sq_size = is_kdump_kernel() ? @@ -4709,10 +4241,7 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 /* TX inline */ mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode); - /* RSS */ - mlx5e_build_rss_params(rss_params, params->num_channels); - params->tunneled_offload_en = - mlx5e_tunnel_inner_ft_supported(mdev); + params->tunneled_offload_en = mlx5_tunnel_inner_ft_supported(mdev); /* AF_XDP */ params->xsk = xsk; @@ -4772,8 +4301,8 @@ static bool mlx5e_tunnel_any_tx_proto_supported(struct mlx5_core_dev *mdev) { int tt; - for (tt = 0; tt < MLX5E_NUM_TUNNEL_TT; tt++) { - if (mlx5e_tunnel_proto_supported_tx(mdev, mlx5e_get_proto_by_tunnel_type(tt))) + for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) { + if (mlx5e_tunnel_proto_supported_tx(mdev, mlx5_get_proto_by_tunnel_type(tt))) return true; } return (mlx5_vxlan_allowed(mdev->vxlan) || mlx5_geneve_tx_allowed(mdev)); @@ -4812,7 +4341,14 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_TX; netdev->hw_enc_features |= NETIF_F_HW_VLAN_CTAG_RX; + /* Tunneled LRO is not supported in the driver, and the same RQs are + * shared between inner and outer TIRs, so the driver can't disable LRO + * for inner TIRs while having it enabled for outer TIRs. Due to this, + * block LRO altogether if the firmware declares tunneled LRO support. + */ if (!!MLX5_CAP_ETH(mdev, lro_cap) && + !MLX5_CAP_ETH(mdev, tunnel_lro_vxlan) && + !MLX5_CAP_ETH(mdev, tunnel_lro_gre) && mlx5e_check_fragmented_striding_rq_cap(mdev)) netdev->vlan_features |= NETIF_F_LRO; @@ -4939,7 +4475,6 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); - struct devlink_port *dl_port; int err; mlx5e_build_nic_params(priv, &priv->xsk, netdev->mtu); @@ -4955,19 +4490,13 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, if (err) mlx5_core_err(mdev, "TLS initialization failed, %d\n", err); - dl_port = mlx5e_devlink_get_dl_port(priv); - if (dl_port->registered) - mlx5e_health_create_reporters(priv); - + mlx5e_health_create_reporters(priv); return 0; } static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) { - struct devlink_port *dl_port = mlx5e_devlink_get_dl_port(priv); - - if (dl_port->registered) - mlx5e_health_destroy_reporters(priv); + mlx5e_health_destroy_reporters(priv); mlx5e_tls_cleanup(priv); mlx5e_ipsec_cleanup(priv); } @@ -4975,9 +4504,14 @@ static void mlx5e_nic_cleanup(struct mlx5e_priv *priv) static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; - u16 max_nch = priv->max_nch; + enum mlx5e_rx_res_features features; + struct mlx5e_lro_param lro_param; int err; + priv->rx_res = mlx5e_rx_res_alloc(); + if (!priv->rx_res) + return -ENOMEM; + mlx5e_create_q_counters(priv); err = mlx5e_open_drop_rq(priv, &priv->drop_rq); @@ -4986,42 +4520,20 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) goto err_destroy_q_counters; } - err = mlx5e_create_indirect_rqt(priv); + features = MLX5E_RX_RES_FEATURE_XSK | MLX5E_RX_RES_FEATURE_PTP; + if (priv->channels.params.tunneled_offload_en) + features |= MLX5E_RX_RES_FEATURE_INNER_FT; + lro_param = mlx5e_get_lro_param(&priv->channels.params); + err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, features, + priv->max_nch, priv->drop_rq.rqn, &lro_param, + priv->channels.params.num_channels); if (err) goto err_close_drop_rq; - err = mlx5e_create_direct_rqts(priv, priv->direct_tir, max_nch); - if (err) - goto err_destroy_indirect_rqts; - - err = mlx5e_create_indirect_tirs(priv, true); - if (err) - goto err_destroy_direct_rqts; - - err = mlx5e_create_direct_tirs(priv, priv->direct_tir, max_nch); - if (err) - goto err_destroy_indirect_tirs; - - err = mlx5e_create_direct_rqts(priv, priv->xsk_tir, max_nch); - if (unlikely(err)) - goto err_destroy_direct_tirs; - - err = mlx5e_create_direct_tirs(priv, priv->xsk_tir, max_nch); - if (unlikely(err)) - goto err_destroy_xsk_rqts; - - err = mlx5e_create_direct_rqts(priv, &priv->ptp_tir, 1); - if (err) - goto err_destroy_xsk_tirs; - - err = mlx5e_create_direct_tirs(priv, &priv->ptp_tir, 1); - if (err) - goto err_destroy_ptp_rqt; - err = mlx5e_create_flow_steering(priv); if (err) { mlx5_core_warn(mdev, "create flow steering failed, %d\n", err); - goto err_destroy_ptp_direct_tir; + goto err_destroy_rx_res; } err = mlx5e_tc_nic_init(priv); @@ -5042,46 +4554,27 @@ err_tc_nic_cleanup: mlx5e_tc_nic_cleanup(priv); err_destroy_flow_steering: mlx5e_destroy_flow_steering(priv); -err_destroy_ptp_direct_tir: - mlx5e_destroy_direct_tirs(priv, &priv->ptp_tir, 1); -err_destroy_ptp_rqt: - mlx5e_destroy_direct_rqts(priv, &priv->ptp_tir, 1); -err_destroy_xsk_tirs: - mlx5e_destroy_direct_tirs(priv, priv->xsk_tir, max_nch); -err_destroy_xsk_rqts: - mlx5e_destroy_direct_rqts(priv, priv->xsk_tir, max_nch); -err_destroy_direct_tirs: - mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch); -err_destroy_indirect_tirs: - mlx5e_destroy_indirect_tirs(priv); -err_destroy_direct_rqts: - mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch); -err_destroy_indirect_rqts: - mlx5e_destroy_rqt(priv, &priv->indir_rqt); +err_destroy_rx_res: + mlx5e_rx_res_destroy(priv->rx_res); err_close_drop_rq: mlx5e_close_drop_rq(&priv->drop_rq); err_destroy_q_counters: mlx5e_destroy_q_counters(priv); + mlx5e_rx_res_free(priv->rx_res); + priv->rx_res = NULL; return err; } static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv) { - u16 max_nch = priv->max_nch; - mlx5e_accel_cleanup_rx(priv); mlx5e_tc_nic_cleanup(priv); mlx5e_destroy_flow_steering(priv); - mlx5e_destroy_direct_tirs(priv, &priv->ptp_tir, 1); - mlx5e_destroy_direct_rqts(priv, &priv->ptp_tir, 1); - mlx5e_destroy_direct_tirs(priv, priv->xsk_tir, max_nch); - mlx5e_destroy_direct_rqts(priv, priv->xsk_tir, max_nch); - mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch); - mlx5e_destroy_indirect_tirs(priv); - mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch); - mlx5e_destroy_rqt(priv, &priv->indir_rqt); + mlx5e_rx_res_destroy(priv->rx_res); mlx5e_close_drop_rq(&priv->drop_rq); mlx5e_destroy_q_counters(priv); + mlx5e_rx_res_free(priv->rx_res); + priv->rx_res = NULL; } static int mlx5e_init_nic_tx(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index bf94bcb6fa5d..ae71a17fdb27 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -49,6 +49,7 @@ #include "en/devlink.h" #include "fs_core.h" #include "lib/mlx5.h" +#include "lib/devcom.h" #define CREATE_TRACE_POINTS #include "diag/en_rep_tracepoint.h" #include "en_accel/ipsec.h" @@ -250,7 +251,9 @@ static int mlx5e_rep_set_channels(struct net_device *dev, } static int mlx5e_rep_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -258,7 +261,9 @@ static int mlx5e_rep_get_coalesce(struct net_device *netdev, } static int mlx5e_rep_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -310,6 +315,8 @@ static void mlx5e_sqs2vport_stop(struct mlx5_eswitch *esw, rpriv = mlx5e_rep_to_rep_priv(rep); list_for_each_entry_safe(rep_sq, tmp, &rpriv->vport_sqs_list, list) { mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule); + if (rep_sq->send_to_vport_rule_peer) + mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule_peer); list_del(&rep_sq->list); kfree(rep_sq); } @@ -319,6 +326,7 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, struct mlx5_eswitch_rep *rep, u32 *sqns_array, int sqns_num) { + struct mlx5_eswitch *peer_esw = NULL; struct mlx5_flow_handle *flow_rule; struct mlx5e_rep_priv *rpriv; struct mlx5e_rep_sq *rep_sq; @@ -329,6 +337,10 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, return 0; rpriv = mlx5e_rep_to_rep_priv(rep); + if (mlx5_devcom_is_paired(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS)) + peer_esw = mlx5_devcom_get_peer_data(esw->dev->priv.devcom, + MLX5_DEVCOM_ESW_OFFLOADS); + for (i = 0; i < sqns_num; i++) { rep_sq = kzalloc(sizeof(*rep_sq), GFP_KERNEL); if (!rep_sq) { @@ -337,7 +349,7 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, } /* Add re-inject rule to the PF/representor sqs */ - flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw, rep, + flow_rule = mlx5_eswitch_add_send_to_vport_rule(esw, esw, rep, sqns_array[i]); if (IS_ERR(flow_rule)) { err = PTR_ERR(flow_rule); @@ -345,12 +357,34 @@ static int mlx5e_sqs2vport_start(struct mlx5_eswitch *esw, goto out_err; } rep_sq->send_to_vport_rule = flow_rule; + rep_sq->sqn = sqns_array[i]; + + if (peer_esw) { + flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw, + rep, sqns_array[i]); + if (IS_ERR(flow_rule)) { + err = PTR_ERR(flow_rule); + mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule); + kfree(rep_sq); + goto out_err; + } + rep_sq->send_to_vport_rule_peer = flow_rule; + } + list_add(&rep_sq->list, &rpriv->vport_sqs_list); } + + if (peer_esw) + mlx5_devcom_release_peer_data(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS); + return 0; out_err: mlx5e_sqs2vport_stop(esw, rep); + + if (peer_esw) + mlx5_devcom_release_peer_data(esw->dev->priv.devcom, MLX5_DEVCOM_ESW_OFFLOADS); + return err; } @@ -364,7 +398,8 @@ int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) int err = -ENOMEM; u32 *sqs; - sqs = kcalloc(priv->channels.num * priv->channels.params.num_tc, sizeof(*sqs), GFP_KERNEL); + sqs = kcalloc(priv->channels.num * mlx5e_get_dcb_num_tc(&priv->channels.params), + sizeof(*sqs), GFP_KERNEL); if (!sqs) goto out; @@ -581,13 +616,10 @@ static void mlx5e_build_rep_params(struct net_device *netdev) params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); mlx5e_set_rx_cq_mode_params(params, cq_period_mode); - params->num_tc = 1; + params->mqprio.num_tc = 1; params->tunneled_offload_en = false; mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode); - - /* RSS */ - mlx5e_build_rss_params(&priv->rss_params, params->num_channels); } static void mlx5e_build_rep_netdev(struct net_device *netdev, @@ -651,25 +683,23 @@ static int mlx5e_create_rep_ttc_table(struct mlx5e_priv *priv) struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep = rpriv->rep; struct ttc_params ttc_params = {}; - int tt, err; + int err; priv->fs.ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL); /* The inner_ttc in the ttc params is intentionally not set */ - ttc_params.any_tt_tirn = priv->direct_tir[0].tirn; - mlx5e_set_ttc_ft_params(&ttc_params); + mlx5e_set_ttc_params(priv, &ttc_params, false); if (rep->vport != MLX5_VPORT_UPLINK) /* To give uplik rep TTC a lower level for chaining from root ft */ ttc_params.ft_attr.level = MLX5E_TTC_FT_LEVEL + 1; - for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) - ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn; - - err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc); - if (err) { - netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n", err); + priv->fs.ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params); + if (IS_ERR(priv->fs.ttc)) { + err = PTR_ERR(priv->fs.ttc); + netdev_err(priv->netdev, "Failed to create rep ttc table, err=%d\n", + err); return err; } return 0; @@ -687,7 +717,7 @@ static int mlx5e_create_rep_root_ft(struct mlx5e_priv *priv) /* non uplik reps will skip any bypass tables and go directly to * their own ttc */ - rpriv->root_ft = priv->fs.ttc.ft.t; + rpriv->root_ft = mlx5_get_ttc_flow_table(priv->fs.ttc); return 0; } @@ -760,9 +790,13 @@ int mlx5e_rep_bond_update(struct mlx5e_priv *priv, bool cleanup) static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; - u16 max_nch = priv->max_nch; + struct mlx5e_lro_param lro_param; int err; + priv->rx_res = mlx5e_rx_res_alloc(); + if (!priv->rx_res) + return -ENOMEM; + mlx5e_init_l2_addr(priv); err = mlx5e_open_drop_rq(priv, &priv->drop_rq); @@ -771,25 +805,16 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) return err; } - err = mlx5e_create_indirect_rqt(priv); + lro_param = mlx5e_get_lro_param(&priv->channels.params); + err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0, + priv->max_nch, priv->drop_rq.rqn, &lro_param, + priv->channels.params.num_channels); if (err) goto err_close_drop_rq; - err = mlx5e_create_direct_rqts(priv, priv->direct_tir, max_nch); - if (err) - goto err_destroy_indirect_rqts; - - err = mlx5e_create_indirect_tirs(priv, false); - if (err) - goto err_destroy_direct_rqts; - - err = mlx5e_create_direct_tirs(priv, priv->direct_tir, max_nch); - if (err) - goto err_destroy_indirect_tirs; - err = mlx5e_create_rep_ttc_table(priv); if (err) - goto err_destroy_direct_tirs; + goto err_destroy_rx_res; err = mlx5e_create_rep_root_ft(priv); if (err) @@ -806,33 +831,26 @@ static int mlx5e_init_rep_rx(struct mlx5e_priv *priv) err_destroy_root_ft: mlx5e_destroy_rep_root_ft(priv); err_destroy_ttc_table: - mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); -err_destroy_direct_tirs: - mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch); -err_destroy_indirect_tirs: - mlx5e_destroy_indirect_tirs(priv); -err_destroy_direct_rqts: - mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch); -err_destroy_indirect_rqts: - mlx5e_destroy_rqt(priv, &priv->indir_rqt); + mlx5_destroy_ttc_table(priv->fs.ttc); +err_destroy_rx_res: + mlx5e_rx_res_destroy(priv->rx_res); err_close_drop_rq: mlx5e_close_drop_rq(&priv->drop_rq); + mlx5e_rx_res_free(priv->rx_res); + priv->rx_res = NULL; return err; } static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) { - u16 max_nch = priv->max_nch; - mlx5e_ethtool_cleanup_steering(priv); rep_vport_rx_rule_destroy(priv); mlx5e_destroy_rep_root_ft(priv); - mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); - mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch); - mlx5e_destroy_indirect_tirs(priv); - mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch); - mlx5e_destroy_rqt(priv, &priv->indir_rqt); + mlx5_destroy_ttc_table(priv->fs.ttc); + mlx5e_rx_res_destroy(priv->rx_res); mlx5e_close_drop_rq(&priv->drop_rq); + mlx5e_rx_res_free(priv->rx_res); + priv->rx_res = NULL; } static int mlx5e_init_ul_rep_rx(struct mlx5e_priv *priv) @@ -1264,10 +1282,64 @@ static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep) return rpriv->netdev; } +static void mlx5e_vport_rep_event_unpair(struct mlx5_eswitch_rep *rep) +{ + struct mlx5e_rep_priv *rpriv; + struct mlx5e_rep_sq *rep_sq; + + rpriv = mlx5e_rep_to_rep_priv(rep); + list_for_each_entry(rep_sq, &rpriv->vport_sqs_list, list) { + if (!rep_sq->send_to_vport_rule_peer) + continue; + mlx5_eswitch_del_send_to_vport_rule(rep_sq->send_to_vport_rule_peer); + rep_sq->send_to_vport_rule_peer = NULL; + } +} + +static int mlx5e_vport_rep_event_pair(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep, + struct mlx5_eswitch *peer_esw) +{ + struct mlx5_flow_handle *flow_rule; + struct mlx5e_rep_priv *rpriv; + struct mlx5e_rep_sq *rep_sq; + + rpriv = mlx5e_rep_to_rep_priv(rep); + list_for_each_entry(rep_sq, &rpriv->vport_sqs_list, list) { + if (rep_sq->send_to_vport_rule_peer) + continue; + flow_rule = mlx5_eswitch_add_send_to_vport_rule(peer_esw, esw, rep, rep_sq->sqn); + if (IS_ERR(flow_rule)) + goto err_out; + rep_sq->send_to_vport_rule_peer = flow_rule; + } + + return 0; +err_out: + mlx5e_vport_rep_event_unpair(rep); + return PTR_ERR(flow_rule); +} + +static int mlx5e_vport_rep_event(struct mlx5_eswitch *esw, + struct mlx5_eswitch_rep *rep, + enum mlx5_switchdev_event event, + void *data) +{ + int err = 0; + + if (event == MLX5_SWITCHDEV_EVENT_PAIR) + err = mlx5e_vport_rep_event_pair(esw, rep, data); + else if (event == MLX5_SWITCHDEV_EVENT_UNPAIR) + mlx5e_vport_rep_event_unpair(rep); + + return err; +} + static const struct mlx5_eswitch_rep_ops rep_ops = { .load = mlx5e_vport_rep_load, .unload = mlx5e_vport_rep_unload, - .get_proto_dev = mlx5e_vport_rep_get_proto_dev + .get_proto_dev = mlx5e_vport_rep_get_proto_dev, + .event = mlx5e_vport_rep_event, }; static int mlx5e_rep_probe(struct auxiliary_device *adev, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h index 47a2dfb7792a..48a203a9e7d9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h @@ -60,6 +60,7 @@ struct mlx5e_neigh_update_table { struct mlx5_tc_ct_priv; struct mlx5e_rep_bond; struct mlx5e_tc_tun_encap; +struct mlx5e_post_act; struct mlx5_rep_uplink_priv { /* Filters DB - instantiated by the uplink representor and shared by @@ -88,8 +89,9 @@ struct mlx5_rep_uplink_priv { /* maps tun_enc_opts to a unique id*/ struct mapping_ctx *tunnel_enc_opts_mapping; + struct mlx5e_post_act *post_act; struct mlx5_tc_ct_priv *ct_priv; - struct mlx5_esw_psample *esw_psample; + struct mlx5e_tc_psample *tc_psample; /* support eswitch vports bonding */ struct mlx5e_rep_bond *bond; @@ -146,7 +148,7 @@ struct mlx5e_neigh_hash_entry { */ refcount_t refcnt; - /* Save the last reported time offloaded trafic pass over one of the + /* Save the last reported time offloaded traffic pass over one of the * neigh hash entry flows. Use it to periodically update the neigh * 'used' value and avoid neigh deleting by the kernel. */ @@ -207,6 +209,8 @@ struct mlx5e_encap_entry { struct mlx5e_rep_sq { struct mlx5_flow_handle *send_to_vport_rule; + struct mlx5_flow_handle *send_to_vport_rule_peer; + u32 sqn; struct list_head list; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index d273758255c3..ba8164792016 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -34,25 +34,20 @@ #include <net/flow_offload.h> #include <net/sch_generic.h> #include <net/pkt_cls.h> -#include <net/tc_act/tc_gact.h> -#include <net/tc_act/tc_skbedit.h> #include <linux/mlx5/fs.h> #include <linux/mlx5/device.h> #include <linux/rhashtable.h> #include <linux/refcount.h> #include <linux/completion.h> -#include <net/tc_act/tc_mirred.h> -#include <net/tc_act/tc_vlan.h> -#include <net/tc_act/tc_tunnel_key.h> #include <net/tc_act/tc_pedit.h> #include <net/tc_act/tc_csum.h> -#include <net/tc_act/tc_mpls.h> #include <net/psample.h> #include <net/arp.h> #include <net/ipv6_stubs.h> #include <net/bareudp.h> #include <net/bonding.h> #include "en.h" +#include "en/tc/post_act.h" #include "en_rep.h" #include "en/rep/tc.h" #include "en/rep/neigh.h" @@ -66,7 +61,7 @@ #include "en/mod_hdr.h" #include "en/tc_priv.h" #include "en/tc_tun_encap.h" -#include "esw/sample.h" +#include "en/tc/sample.h" #include "lib/devcom.h" #include "lib/geneve.h" #include "lib/fs_chains.h" @@ -103,7 +98,7 @@ struct mlx5e_tc_attr_to_reg_mapping mlx5e_tc_attr_to_reg_mappings[] = { [MARK_TO_REG] = mark_to_reg_ct, [LABELS_TO_REG] = labels_to_reg_ct, [FTEID_TO_REG] = fteid_to_reg_ct, - /* For NIC rules we store the retore metadata directly + /* For NIC rules we store the restore metadata directly * into reg_b that is passed to SW since we don't * jump between steering domains. */ @@ -252,7 +247,7 @@ get_ct_priv(struct mlx5e_priv *priv) } #if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) -static struct mlx5_esw_psample * +static struct mlx5e_tc_psample * get_sample_priv(struct mlx5e_priv *priv) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; @@ -263,7 +258,7 @@ get_sample_priv(struct mlx5e_priv *priv) uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); uplink_priv = &uplink_rpriv->uplink_priv; - return uplink_priv->esw_psample; + return uplink_priv->tc_psample; } return NULL; @@ -340,12 +335,12 @@ struct mlx5e_hairpin { struct mlx5_core_dev *func_mdev; struct mlx5e_priv *func_priv; u32 tdn; - u32 tirn; + struct mlx5e_tir direct_tir; int num_channels; struct mlx5e_rqt indir_rqt; - u32 indir_tirn[MLX5E_NUM_INDIR_TIRS]; - struct mlx5e_ttc_table ttc; + struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS]; + struct mlx5_ttc_table *ttc; }; struct mlx5e_hairpin_entry { @@ -482,126 +477,101 @@ struct mlx5_core_dev *mlx5e_hairpin_get_mdev(struct net *net, int ifindex) static int mlx5e_hairpin_create_transport(struct mlx5e_hairpin *hp) { - u32 in[MLX5_ST_SZ_DW(create_tir_in)] = {}; - void *tirc; + struct mlx5e_tir_builder *builder; int err; + builder = mlx5e_tir_builder_alloc(false); + if (!builder) + return -ENOMEM; + err = mlx5_core_alloc_transport_domain(hp->func_mdev, &hp->tdn); if (err) - goto alloc_tdn_err; - - tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); - - MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT); - MLX5_SET(tirc, tirc, inline_rqn, hp->pair->rqn[0]); - MLX5_SET(tirc, tirc, transport_domain, hp->tdn); + goto out; - err = mlx5_core_create_tir(hp->func_mdev, in, &hp->tirn); + mlx5e_tir_builder_build_inline(builder, hp->tdn, hp->pair->rqn[0]); + err = mlx5e_tir_init(&hp->direct_tir, builder, hp->func_mdev, false); if (err) goto create_tir_err; - return 0; +out: + mlx5e_tir_builder_free(builder); + return err; create_tir_err: mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn); -alloc_tdn_err: - return err; + + goto out; } static void mlx5e_hairpin_destroy_transport(struct mlx5e_hairpin *hp) { - mlx5_core_destroy_tir(hp->func_mdev, hp->tirn); + mlx5e_tir_destroy(&hp->direct_tir); mlx5_core_dealloc_transport_domain(hp->func_mdev, hp->tdn); } -static int mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc) -{ - struct mlx5e_priv *priv = hp->func_priv; - int i, ix, sz = MLX5E_INDIR_RQT_SIZE; - u32 *indirection_rqt, rqn; - - indirection_rqt = kcalloc(sz, sizeof(*indirection_rqt), GFP_KERNEL); - if (!indirection_rqt) - return -ENOMEM; - - mlx5e_build_default_indir_rqt(indirection_rqt, sz, - hp->num_channels); - - for (i = 0; i < sz; i++) { - ix = i; - if (priv->rss_params.hfunc == ETH_RSS_HASH_XOR) - ix = mlx5e_bits_invert(i, ilog2(sz)); - ix = indirection_rqt[ix]; - rqn = hp->pair->rqn[ix]; - MLX5_SET(rqtc, rqtc, rq_num[i], rqn); - } - - kfree(indirection_rqt); - return 0; -} - static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp) { - int inlen, err, sz = MLX5E_INDIR_RQT_SIZE; struct mlx5e_priv *priv = hp->func_priv; struct mlx5_core_dev *mdev = priv->mdev; - void *rqtc; - u32 *in; + struct mlx5e_rss_params_indir *indir; + int err; - inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz; - in = kvzalloc(inlen, GFP_KERNEL); - if (!in) + indir = kvmalloc(sizeof(*indir), GFP_KERNEL); + if (!indir) return -ENOMEM; - rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context); - - MLX5_SET(rqtc, rqtc, rqt_actual_size, sz); - MLX5_SET(rqtc, rqtc, rqt_max_size, sz); - - err = mlx5e_hairpin_fill_rqt_rqns(hp, rqtc); - if (err) - goto out; - - err = mlx5_core_create_rqt(mdev, in, inlen, &hp->indir_rqt.rqtn); - if (!err) - hp->indir_rqt.enabled = true; + mlx5e_rss_params_indir_init_uniform(indir, hp->num_channels); + err = mlx5e_rqt_init_indir(&hp->indir_rqt, mdev, hp->pair->rqn, hp->num_channels, + mlx5e_rx_res_get_current_hash(priv->rx_res).hfunc, + indir); -out: - kvfree(in); + kvfree(indir); return err; } static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp) { struct mlx5e_priv *priv = hp->func_priv; - u32 in[MLX5_ST_SZ_DW(create_tir_in)]; - int tt, i, err; - void *tirc; + struct mlx5e_rss_params_hash rss_hash; + enum mlx5_traffic_types tt, max_tt; + struct mlx5e_tir_builder *builder; + int err = 0; + + builder = mlx5e_tir_builder_alloc(false); + if (!builder) + return -ENOMEM; + + rss_hash = mlx5e_rx_res_get_current_hash(priv->rx_res); for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { - struct mlx5e_tirc_config ttconfig = mlx5e_tirc_get_default_config(tt); + struct mlx5e_rss_params_traffic_type rss_tt; - memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in)); - tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); + rss_tt = mlx5e_rss_get_default_tt_config(tt); - MLX5_SET(tirc, tirc, transport_domain, hp->tdn); - MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); - MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn); - mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &ttconfig, tirc, false); + mlx5e_tir_builder_build_rqt(builder, hp->tdn, + mlx5e_rqt_get_rqtn(&hp->indir_rqt), + false); + mlx5e_tir_builder_build_rss(builder, &rss_hash, &rss_tt, false); - err = mlx5_core_create_tir(hp->func_mdev, in, - &hp->indir_tirn[tt]); + err = mlx5e_tir_init(&hp->indir_tir[tt], builder, hp->func_mdev, false); if (err) { mlx5_core_warn(hp->func_mdev, "create indirect tirs failed, %d\n", err); goto err_destroy_tirs; } + + mlx5e_tir_builder_clear(builder); } - return 0; -err_destroy_tirs: - for (i = 0; i < tt; i++) - mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[i]); +out: + mlx5e_tir_builder_free(builder); return err; + +err_destroy_tirs: + max_tt = tt; + for (tt = 0; tt < max_tt; tt++) + mlx5e_tir_destroy(&hp->indir_tir[tt]); + + goto out; } static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp) @@ -609,7 +579,7 @@ static void mlx5e_hairpin_destroy_indirect_tirs(struct mlx5e_hairpin *hp) int tt; for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) - mlx5_core_destroy_tir(hp->func_mdev, hp->indir_tirn[tt]); + mlx5e_tir_destroy(&hp->indir_tir[tt]); } static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp, @@ -620,12 +590,16 @@ static void mlx5e_hairpin_set_ttc_params(struct mlx5e_hairpin *hp, memset(ttc_params, 0, sizeof(*ttc_params)); - ttc_params->any_tt_tirn = hp->tirn; - - for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) - ttc_params->indir_tirn[tt] = hp->indir_tirn[tt]; + ttc_params->ns = mlx5_get_flow_namespace(hp->func_mdev, + MLX5_FLOW_NAMESPACE_KERNEL); + for (tt = 0; tt < MLX5_NUM_TT; tt++) { + ttc_params->dests[tt].type = MLX5_FLOW_DESTINATION_TYPE_TIR; + ttc_params->dests[tt].tir_num = + tt == MLX5_TT_ANY ? + mlx5e_tir_get_tirn(&hp->direct_tir) : + mlx5e_tir_get_tirn(&hp->indir_tir[tt]); + } - ft_attr->max_fte = MLX5E_TTC_TABLE_SIZE; ft_attr->level = MLX5E_TC_TTC_FT_LEVEL; ft_attr->prio = MLX5E_TC_PRIO; } @@ -645,30 +619,31 @@ static int mlx5e_hairpin_rss_init(struct mlx5e_hairpin *hp) goto err_create_indirect_tirs; mlx5e_hairpin_set_ttc_params(hp, &ttc_params); - err = mlx5e_create_ttc_table(priv, &ttc_params, &hp->ttc); - if (err) + hp->ttc = mlx5_create_ttc_table(priv->mdev, &ttc_params); + if (IS_ERR(hp->ttc)) { + err = PTR_ERR(hp->ttc); goto err_create_ttc_table; + } netdev_dbg(priv->netdev, "add hairpin: using %d channels rss ttc table id %x\n", - hp->num_channels, hp->ttc.ft.t->id); + hp->num_channels, + mlx5_get_ttc_flow_table(priv->fs.ttc)->id); return 0; err_create_ttc_table: mlx5e_hairpin_destroy_indirect_tirs(hp); err_create_indirect_tirs: - mlx5e_destroy_rqt(priv, &hp->indir_rqt); + mlx5e_rqt_destroy(&hp->indir_rqt); return err; } static void mlx5e_hairpin_rss_cleanup(struct mlx5e_hairpin *hp) { - struct mlx5e_priv *priv = hp->func_priv; - - mlx5e_destroy_ttc_table(priv, &hp->ttc); + mlx5_destroy_ttc_table(hp->ttc); mlx5e_hairpin_destroy_indirect_tirs(hp); - mlx5e_destroy_rqt(priv, &hp->indir_rqt); + mlx5e_rqt_destroy(&hp->indir_rqt); } static struct mlx5e_hairpin * @@ -903,16 +878,17 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv, } netdev_dbg(priv->netdev, "add hairpin: tirn %x rqn %x peer %s sqn %x prio %d (log) data %d packets %d\n", - hp->tirn, hp->pair->rqn[0], + mlx5e_tir_get_tirn(&hp->direct_tir), hp->pair->rqn[0], dev_name(hp->pair->peer_mdev->device), hp->pair->sqn[0], match_prio, params.log_data_size, params.log_num_packets); attach_flow: if (hpe->hp->num_channels > 1) { flow_flag_set(flow, HAIRPIN_RSS); - flow->attr->nic_attr->hairpin_ft = hpe->hp->ttc.ft.t; + flow->attr->nic_attr->hairpin_ft = + mlx5_get_ttc_flow_table(hpe->hp->ttc); } else { - flow->attr->nic_attr->hairpin_tirn = hpe->hp->tirn; + flow->attr->nic_attr->hairpin_tirn = mlx5e_tir_get_tirn(&hpe->hp->direct_tir); } flow->hpe = hpe; @@ -1056,15 +1032,17 @@ err_ft_get: static int mlx5e_tc_add_nic_flow(struct mlx5e_priv *priv, - struct mlx5e_tc_flow_parse_attr *parse_attr, struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack) { + struct mlx5e_tc_flow_parse_attr *parse_attr; struct mlx5_flow_attr *attr = flow->attr; struct mlx5_core_dev *dev = priv->mdev; - struct mlx5_fc *counter = NULL; + struct mlx5_fc *counter; int err; + parse_attr = attr->parse_attr; + if (flow_flag_test(flow, HAIRPIN)) { err = mlx5e_hairpin_flow_add(priv, flow, parse_attr, extack); if (err) @@ -1170,7 +1148,8 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw, mod_hdr_acts); #if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) } else if (flow_flag_test(flow, SAMPLE)) { - rule = mlx5_esw_sample_offload(get_sample_priv(flow->priv), spec, attr); + rule = mlx5e_tc_sample_offload(get_sample_priv(flow->priv), spec, attr, + mlx5e_tc_get_flow_tun_id(flow)); #endif } else { rule = mlx5_eswitch_add_offloaded_rule(esw, spec, attr); @@ -1209,7 +1188,7 @@ void mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw, #if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) if (flow_flag_test(flow, SAMPLE)) { - mlx5_esw_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr); + mlx5e_tc_sample_unoffload(get_sample_priv(flow->priv), flow->rule[0], attr); return; } #endif @@ -1338,6 +1317,7 @@ bool mlx5e_tc_is_vf_tunnel(struct net_device *out_dev, struct net_device *route_ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *route_dev, u16 *vport) { struct mlx5e_priv *out_priv, *route_priv; + struct mlx5_devcom *devcom = NULL; struct mlx5_core_dev *route_mdev; struct mlx5_eswitch *esw; u16 vhca_id; @@ -1349,7 +1329,24 @@ int mlx5e_tc_query_route_vport(struct net_device *out_dev, struct net_device *ro route_mdev = route_priv->mdev; vhca_id = MLX5_CAP_GEN(route_mdev, vhca_id); + if (mlx5_lag_is_active(out_priv->mdev)) { + /* In lag case we may get devices from different eswitch instances. + * If we failed to get vport num, it means, mostly, that we on the wrong + * eswitch. + */ + err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport); + if (err != -ENOENT) + return err; + + devcom = out_priv->mdev->priv.devcom; + esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); + if (!esw) + return -ENODEV; + } + err = mlx5_eswitch_vhca_id_to_vport(esw, vhca_id, vport); + if (devcom) + mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); return err; } @@ -1384,9 +1381,9 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, bool vf_tun = false, encap_valid = true; struct net_device *encap_dev = NULL; struct mlx5_esw_flow_attr *esw_attr; - struct mlx5_fc *counter = NULL; struct mlx5e_rep_priv *rpriv; struct mlx5e_priv *out_priv; + struct mlx5_fc *counter; u32 max_prio, max_chain; int err = 0; int out_index; @@ -1573,6 +1570,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, else mlx5e_detach_mod_hdr(priv, flow); } + kfree(attr->sample_attr); kvfree(attr->parse_attr); kvfree(attr->esw_attr->rx_tun_attr); @@ -1582,7 +1580,6 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, if (flow_flag_test(flow, L3_TO_L2_DECAP)) mlx5e_detach_decap(priv, flow); - kfree(flow->attr->esw_attr->sample); kfree(flow->attr); } @@ -1647,17 +1644,22 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, } } -static int flow_has_tc_fwd_action(struct flow_cls_offload *f) +static bool flow_requires_tunnel_mapping(u32 chain, struct flow_cls_offload *f) { struct flow_rule *rule = flow_cls_offload_flow_rule(f); struct flow_action *flow_action = &rule->action; const struct flow_action_entry *act; int i; + if (chain) + return false; + flow_action_for_each(i, act, flow_action) { switch (act->id) { case FLOW_ACTION_GOTO: return true; + case FLOW_ACTION_SAMPLE: + return true; default: continue; } @@ -1898,7 +1900,7 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, return -EOPNOTSUPP; needs_mapping = !!flow->attr->chain; - sets_mapping = !flow->attr->chain && flow_has_tc_fwd_action(f); + sets_mapping = flow_requires_tunnel_mapping(flow->attr->chain, f); *match_inner = !needs_mapping; if ((needs_mapping || sets_mapping) && @@ -2471,7 +2473,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, spec->match_criteria_enable |= MLX5_MATCH_MISC_PARAMETERS_3; } } - /* Currenlty supported only for MPLS over UDP */ + /* Currently supported only for MPLS over UDP */ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS) && !netif_is_bareudp(filter_dev)) { NL_SET_ERR_MSG_MOD(extack, @@ -2725,7 +2727,9 @@ static int offload_pedit_fields(struct mlx5e_priv *priv, if (s_mask && a_mask) { NL_SET_ERR_MSG_MOD(extack, "can't set and add to the same HW field"); - printk(KERN_WARNING "mlx5: can't set and add to the same HW field (%x)\n", f->field); + netdev_warn(priv->netdev, + "mlx5: can't set and add to the same HW field (%x)\n", + f->field); return -EOPNOTSUPP; } @@ -2764,8 +2768,9 @@ static int offload_pedit_fields(struct mlx5e_priv *priv, if (first < next_z && next_z < last) { NL_SET_ERR_MSG_MOD(extack, "rewrite of few sub-fields isn't supported"); - printk(KERN_WARNING "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n", - mask); + netdev_warn(priv->netdev, + "mlx5: rewrite of few sub-fields (mask %lx) isn't offloaded\n", + mask); return -EOPNOTSUPP; } @@ -3352,10 +3357,10 @@ static int validate_goto_chain(struct mlx5e_priv *priv, static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct flow_action *flow_action, - struct mlx5e_tc_flow_parse_attr *parse_attr, struct mlx5e_tc_flow *flow, struct netlink_ext_ack *extack) { + struct mlx5e_tc_flow_parse_attr *parse_attr; struct mlx5_flow_attr *attr = flow->attr; struct pedit_headers_action hdrs[2] = {}; const struct flow_action_entry *act; @@ -3371,8 +3376,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, return -EOPNOTSUPP; nic_attr = attr->nic_attr; - nic_attr->flow_tag = MLX5_FS_DEFAULT_FLOW_TAG; + parse_attr = attr->parse_attr; flow_action_for_each(i, act, flow_action) { switch (act->id) { @@ -3381,10 +3386,8 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, MLX5_FLOW_CONTEXT_ACTION_COUNT; break; case FLOW_ACTION_DROP: - action |= MLX5_FLOW_CONTEXT_ACTION_DROP; - if (MLX5_CAP_FLOWTABLE(priv->mdev, - flow_table_properties_nic_receive.flow_counter)) - action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; + action |= MLX5_FLOW_CONTEXT_ACTION_DROP | + MLX5_FLOW_CONTEXT_ACTION_COUNT; break; case FLOW_ACTION_MANGLE: case FLOW_ACTION_ADD: @@ -3425,7 +3428,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, "device is not on same HW, can't offload"); netdev_warn(priv->netdev, "device %s not on same HW, can't offload\n", peer_dev->name); - return -EINVAL; + return -EOPNOTSUPP; } } break; @@ -3435,7 +3438,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, if (mark & ~MLX5E_TC_FLOW_ID_MASK) { NL_SET_ERR_MSG_MOD(extack, "Bad flow mark - only 16 bit is supported"); - return -EINVAL; + return -EOPNOTSUPP; } nic_attr->flow_tag = mark; @@ -3732,20 +3735,19 @@ static int verify_uplink_forwarding(struct mlx5e_priv *priv, static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct flow_action *flow_action, struct mlx5e_tc_flow *flow, - struct netlink_ext_ack *extack, - struct net_device *filter_dev) + struct netlink_ext_ack *extack) { struct pedit_headers_action hdrs[2] = {}; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5e_tc_flow_parse_attr *parse_attr; struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5e_sample_attr sample_attr = {}; const struct ip_tunnel_info *info = NULL; struct mlx5_flow_attr *attr = flow->attr; int ifindexes[MLX5_MAX_FLOW_FWD_VPORTS]; bool ft_flow = mlx5e_is_ft_flow(flow); const struct flow_action_entry *act; struct mlx5_esw_flow_attr *esw_attr; - struct mlx5_sample_attr sample = {}; bool encap = false, decap = false; u32 action = attr->action; int err, i, if_count = 0; @@ -3798,7 +3800,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, "mpls pop supported only as first action"); return -EOPNOTSUPP; } - if (!netif_is_bareudp(filter_dev)) { + if (!netif_is_bareudp(parse_attr->filter_dev)) { NL_SET_ERR_MSG_MOD(extack, "mpls pop supported only on bareudp devices"); return -EOPNOTSUPP; @@ -3947,7 +3949,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, "devices %s %s not on same switch HW, can't offload forwarding\n", priv->netdev->name, out_dev->name); - return -EINVAL; + return -EOPNOTSUPP; } } break; @@ -4016,10 +4018,10 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, NL_SET_ERR_MSG_MOD(extack, "Sample action with connection tracking is not supported"); return -EOPNOTSUPP; } - sample.rate = act->sample.rate; - sample.group_num = act->sample.psample_group->group_num; + sample_attr.rate = act->sample.rate; + sample_attr.group_num = act->sample.psample_group->group_num; if (act->sample.truncate) - sample.trunc_size = act->sample.trunc_size; + sample_attr.trunc_size = act->sample.trunc_size; flow_flag_set(flow, SAMPLE); break; default: @@ -4104,10 +4106,10 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, * no errors after parsing. */ if (flow_flag_test(flow, SAMPLE)) { - esw_attr->sample = kzalloc(sizeof(*esw_attr->sample), GFP_KERNEL); - if (!esw_attr->sample) + attr->sample_attr = kzalloc(sizeof(*attr->sample_attr), GFP_KERNEL); + if (!attr->sample_attr) return -ENOMEM; - *esw_attr->sample = sample; + *attr->sample_attr = sample_attr; } return 0; @@ -4300,7 +4302,7 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv, if (err) goto err_free; - err = parse_tc_fdb_actions(priv, &rule->action, flow, extack, filter_dev); + err = parse_tc_fdb_actions(priv, &rule->action, flow, extack); if (err) goto err_free; @@ -4446,11 +4448,11 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv, if (err) goto err_free; - err = parse_tc_nic_actions(priv, &rule->action, parse_attr, flow, extack); + err = parse_tc_nic_actions(priv, &rule->action, flow, extack); if (err) goto err_free; - err = mlx5e_tc_add_nic_flow(priv, parse_attr, flow, extack); + err = mlx5e_tc_add_nic_flow(priv, flow, extack); if (err) goto err_free; @@ -4705,7 +4707,7 @@ static int apply_police_params(struct mlx5e_priv *priv, u64 rate, rate_mbps = max_t(u32, rate, 1); } - err = mlx5_esw_modify_vport_rate(esw, vport_num, rate_mbps); + err = mlx5_esw_qos_modify_vport_rate(esw, vport_num, rate_mbps); if (err) NL_SET_ERR_MSG_MOD(extack, "failed applying action to hardware"); @@ -4877,6 +4879,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv) struct mlx5_core_dev *dev = priv->mdev; struct mapping_ctx *chains_mapping; struct mlx5_chains_attr attr = {}; + u64 mapping_id; int err; mlx5e_mod_hdr_tbl_init(&tc->mod_hdr); @@ -4890,8 +4893,12 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv) lockdep_set_class(&tc->ht.mutex, &tc_ht_lock_key); - chains_mapping = mapping_create(sizeof(struct mlx5_mapped_obj), - MLX5E_TC_TABLE_CHAIN_TAG_MASK, true); + mapping_id = mlx5_query_nic_system_image_guid(dev); + + chains_mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN, + sizeof(struct mlx5_mapped_obj), + MLX5E_TC_TABLE_CHAIN_TAG_MASK, true); + if (IS_ERR(chains_mapping)) { err = PTR_ERR(chains_mapping); goto err_mapping; @@ -4913,8 +4920,9 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv) goto err_chains; } + tc->post_act = mlx5e_tc_post_act_init(priv, tc->chains, MLX5_FLOW_NAMESPACE_KERNEL); tc->ct = mlx5_tc_ct_init(priv, tc->chains, &priv->fs.tc.mod_hdr, - MLX5_FLOW_NAMESPACE_KERNEL); + MLX5_FLOW_NAMESPACE_KERNEL, tc->post_act); tc->netdevice_nb.notifier_call = mlx5e_tc_netdev_event; err = register_netdevice_notifier_dev_net(priv->netdev, @@ -4930,6 +4938,7 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv) err_reg: mlx5_tc_ct_clean(tc->ct); + mlx5e_tc_post_act_destroy(tc->post_act); mlx5_chains_destroy(tc->chains); err_chains: mapping_destroy(chains_mapping); @@ -4968,6 +4977,7 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) mutex_destroy(&tc->t_lock); mlx5_tc_ct_clean(tc->ct); + mlx5e_tc_post_act_destroy(tc->post_act); mapping_destroy(tc->mapping); mlx5_chains_destroy(tc->chains); } @@ -4980,6 +4990,7 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht) struct mapping_ctx *mapping; struct mlx5_eswitch *esw; struct mlx5e_priv *priv; + u64 mapping_id; int err = 0; uplink_priv = container_of(tc_ht, struct mlx5_rep_uplink_priv, tc_ht); @@ -4987,17 +4998,24 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht) priv = netdev_priv(rpriv->netdev); esw = priv->mdev->priv.eswitch; + uplink_priv->post_act = mlx5e_tc_post_act_init(priv, esw_chains(esw), + MLX5_FLOW_NAMESPACE_FDB); uplink_priv->ct_priv = mlx5_tc_ct_init(netdev_priv(priv->netdev), esw_chains(esw), &esw->offloads.mod_hdr, - MLX5_FLOW_NAMESPACE_FDB); + MLX5_FLOW_NAMESPACE_FDB, + uplink_priv->post_act); #if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) - uplink_priv->esw_psample = mlx5_esw_sample_init(netdev_priv(priv->netdev)); + uplink_priv->tc_psample = mlx5e_tc_sample_init(esw, uplink_priv->post_act); #endif - mapping = mapping_create(sizeof(struct tunnel_match_key), - TUNNEL_INFO_BITS_MASK, true); + mapping_id = mlx5_query_nic_system_image_guid(esw->dev); + + mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL, + sizeof(struct tunnel_match_key), + TUNNEL_INFO_BITS_MASK, true); + if (IS_ERR(mapping)) { err = PTR_ERR(mapping); goto err_tun_mapping; @@ -5005,7 +5023,8 @@ int mlx5e_tc_esw_init(struct rhashtable *tc_ht) uplink_priv->tunnel_mapping = mapping; /* 0xFFF is reserved for stack devices slow path table mark */ - mapping = mapping_create(sz_enc_opts, ENC_OPTS_BITS_MASK - 1, true); + mapping = mapping_create_for_id(mapping_id, MAPPING_TYPE_TUNNEL_ENC_OPTS, + sz_enc_opts, ENC_OPTS_BITS_MASK - 1, true); if (IS_ERR(mapping)) { err = PTR_ERR(mapping); goto err_enc_opts_mapping; @@ -5034,11 +5053,12 @@ err_enc_opts_mapping: mapping_destroy(uplink_priv->tunnel_mapping); err_tun_mapping: #if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) - mlx5_esw_sample_cleanup(uplink_priv->esw_psample); + mlx5e_tc_sample_cleanup(uplink_priv->tc_psample); #endif mlx5_tc_ct_clean(uplink_priv->ct_priv); netdev_warn(priv->netdev, "Failed to initialize tc (eswitch), err: %d", err); + mlx5e_tc_post_act_destroy(uplink_priv->post_act); return err; } @@ -5055,9 +5075,10 @@ void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht) mapping_destroy(uplink_priv->tunnel_mapping); #if IS_ENABLED(CONFIG_MLX5_TC_SAMPLE) - mlx5_esw_sample_cleanup(uplink_priv->esw_psample); + mlx5e_tc_sample_cleanup(uplink_priv->tc_psample); #endif mlx5_tc_ct_clean(uplink_priv->ct_priv); + mlx5e_tc_post_act_destroy(uplink_priv->post_act); } int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index f7cbeb0b66d2..1a4cd882f0fb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -70,6 +70,7 @@ struct mlx5_flow_attr { struct mlx5_fc *counter; struct mlx5_modify_hdr *modify_hdr; struct mlx5_ct_attr ct_attr; + struct mlx5e_sample_attr *sample_attr; struct mlx5e_tc_flow_parse_attr *parse_attr; u32 chain; u16 prio; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c index 505bf811984a..2e504c7461c6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/egress_ofld.c @@ -15,6 +15,15 @@ static void esw_acl_egress_ofld_fwd2vport_destroy(struct mlx5_vport *vport) vport->egress.offloads.fwd_rule = NULL; } +static void esw_acl_egress_ofld_bounce_rule_destroy(struct mlx5_vport *vport) +{ + if (!vport->egress.offloads.bounce_rule) + return; + + mlx5_del_flow_rules(vport->egress.offloads.bounce_rule); + vport->egress.offloads.bounce_rule = NULL; +} + static int esw_acl_egress_ofld_fwd2vport_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport, struct mlx5_flow_destination *fwd_dest) @@ -87,6 +96,7 @@ static void esw_acl_egress_ofld_rules_destroy(struct mlx5_vport *vport) { esw_acl_egress_vlan_destroy(vport); esw_acl_egress_ofld_fwd2vport_destroy(vport); + esw_acl_egress_ofld_bounce_rule_destroy(vport); } static int esw_acl_egress_ofld_groups_create(struct mlx5_eswitch *esw, @@ -145,6 +155,12 @@ static void esw_acl_egress_ofld_groups_destroy(struct mlx5_vport *vport) mlx5_destroy_flow_group(vport->egress.offloads.fwd_grp); vport->egress.offloads.fwd_grp = NULL; } + + if (!IS_ERR_OR_NULL(vport->egress.offloads.bounce_grp)) { + mlx5_destroy_flow_group(vport->egress.offloads.bounce_grp); + vport->egress.offloads.bounce_grp = NULL; + } + esw_acl_egress_vlan_grp_destroy(vport); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c index 69a3630818d7..7e221038df8d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c @@ -5,6 +5,7 @@ #include <linux/notifier.h> #include <net/netevent.h> #include <net/switchdev.h> +#include "lib/devcom.h" #include "bridge.h" #include "eswitch.h" #include "bridge_priv.h" @@ -56,7 +57,6 @@ struct mlx5_esw_bridge { struct list_head fdb_list; struct rhashtable fdb_ht; - struct xarray vports; struct mlx5_flow_table *egress_ft; struct mlx5_flow_group *egress_vlan_fg; @@ -77,6 +77,15 @@ mlx5_esw_bridge_fdb_offload_notify(struct net_device *dev, const unsigned char * call_switchdev_notifiers(val, dev, &send_info.info, NULL); } +static void +mlx5_esw_bridge_fdb_del_notify(struct mlx5_esw_bridge_fdb_entry *entry) +{ + if (!(entry->flags & (MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER | MLX5_ESW_BRIDGE_FLAG_PEER))) + mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr, + entry->key.vid, + SWITCHDEV_FDB_DEL_TO_BRIDGE); +} + static struct mlx5_flow_table * mlx5_esw_bridge_table_create(int max_fte, u32 level, struct mlx5_eswitch *esw) { @@ -400,9 +409,10 @@ mlx5_esw_bridge_egress_table_cleanup(struct mlx5_esw_bridge *bridge) } static struct mlx5_flow_handle * -mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr, - struct mlx5_esw_bridge_vlan *vlan, u32 counter_id, - struct mlx5_esw_bridge *bridge) +mlx5_esw_bridge_ingress_flow_with_esw_create(u16 vport_num, const unsigned char *addr, + struct mlx5_esw_bridge_vlan *vlan, u32 counter_id, + struct mlx5_esw_bridge *bridge, + struct mlx5_eswitch *esw) { struct mlx5_esw_bridge_offloads *br_offloads = bridge->br_offloads; struct mlx5_flow_act flow_act = { @@ -430,7 +440,7 @@ mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr, MLX5_SET(fte_match_param, rule_spec->match_criteria, misc_parameters_2.metadata_reg_c_0, mlx5_eswitch_get_vport_metadata_mask()); MLX5_SET(fte_match_param, rule_spec->match_value, misc_parameters_2.metadata_reg_c_0, - mlx5_eswitch_get_vport_metadata_for_match(br_offloads->esw, vport_num)); + mlx5_eswitch_get_vport_metadata_for_match(esw, vport_num)); if (vlan && vlan->pkt_reformat_push) { flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; @@ -459,6 +469,35 @@ mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr, } static struct mlx5_flow_handle * +mlx5_esw_bridge_ingress_flow_create(u16 vport_num, const unsigned char *addr, + struct mlx5_esw_bridge_vlan *vlan, u32 counter_id, + struct mlx5_esw_bridge *bridge) +{ + return mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id, + bridge, bridge->br_offloads->esw); +} + +static struct mlx5_flow_handle * +mlx5_esw_bridge_ingress_flow_peer_create(u16 vport_num, const unsigned char *addr, + struct mlx5_esw_bridge_vlan *vlan, u32 counter_id, + struct mlx5_esw_bridge *bridge) +{ + struct mlx5_devcom *devcom = bridge->br_offloads->esw->dev->priv.devcom; + static struct mlx5_flow_handle *handle; + struct mlx5_eswitch *peer_esw; + + peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); + if (!peer_esw) + return ERR_PTR(-ENODEV); + + handle = mlx5_esw_bridge_ingress_flow_with_esw_create(vport_num, addr, vlan, counter_id, + bridge, peer_esw); + + mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); + return handle; +} + +static struct mlx5_flow_handle * mlx5_esw_bridge_ingress_filter_flow_create(u16 vport_num, const unsigned char *addr, struct mlx5_esw_bridge *bridge) { @@ -505,7 +544,7 @@ mlx5_esw_bridge_ingress_filter_flow_create(u16 vport_num, const unsigned char *a } static struct mlx5_flow_handle * -mlx5_esw_bridge_egress_flow_create(u16 vport_num, const unsigned char *addr, +mlx5_esw_bridge_egress_flow_create(u16 vport_num, u16 esw_owner_vhca_id, const unsigned char *addr, struct mlx5_esw_bridge_vlan *vlan, struct mlx5_esw_bridge *bridge) { @@ -550,6 +589,10 @@ mlx5_esw_bridge_egress_flow_create(u16 vport_num, const unsigned char *addr, vlan->vid); } + if (MLX5_CAP_ESW(bridge->br_offloads->esw->dev, merged_eswitch)) { + dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID; + dest.vport.vhca_id = esw_owner_vhca_id; + } handle = mlx5_add_flow_rules(bridge->egress_ft, rule_spec, &flow_act, &dest, 1); kvfree(rule_spec); @@ -576,7 +619,6 @@ static struct mlx5_esw_bridge *mlx5_esw_bridge_create(int ifindex, goto err_fdb_ht; INIT_LIST_HEAD(&bridge->fdb_list); - xa_init(&bridge->vports); bridge->ifindex = ifindex; bridge->refcnt = 1; bridge->ageing_time = clock_t_to_jiffies(BR_DEFAULT_AGEING_TIME); @@ -603,7 +645,6 @@ static void mlx5_esw_bridge_put(struct mlx5_esw_bridge_offloads *br_offloads, return; mlx5_esw_bridge_egress_table_cleanup(bridge); - WARN_ON(!xa_empty(&bridge->vports)); list_del(&bridge->list); rhashtable_destroy(&bridge->fdb_ht); kvfree(bridge); @@ -639,30 +680,40 @@ mlx5_esw_bridge_lookup(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads return bridge; } +static unsigned long mlx5_esw_bridge_port_key_from_data(u16 vport_num, u16 esw_owner_vhca_id) +{ + return vport_num | (unsigned long)esw_owner_vhca_id << sizeof(vport_num) * BITS_PER_BYTE; +} + +static unsigned long mlx5_esw_bridge_port_key(struct mlx5_esw_bridge_port *port) +{ + return mlx5_esw_bridge_port_key_from_data(port->vport_num, port->esw_owner_vhca_id); +} + static int mlx5_esw_bridge_port_insert(struct mlx5_esw_bridge_port *port, - struct mlx5_esw_bridge *bridge) + struct mlx5_esw_bridge_offloads *br_offloads) { - return xa_insert(&bridge->vports, port->vport_num, port, GFP_KERNEL); + return xa_insert(&br_offloads->ports, mlx5_esw_bridge_port_key(port), port, GFP_KERNEL); } static struct mlx5_esw_bridge_port * -mlx5_esw_bridge_port_lookup(u16 vport_num, struct mlx5_esw_bridge *bridge) +mlx5_esw_bridge_port_lookup(u16 vport_num, u16 esw_owner_vhca_id, + struct mlx5_esw_bridge_offloads *br_offloads) { - return xa_load(&bridge->vports, vport_num); + return xa_load(&br_offloads->ports, mlx5_esw_bridge_port_key_from_data(vport_num, + esw_owner_vhca_id)); } static void mlx5_esw_bridge_port_erase(struct mlx5_esw_bridge_port *port, - struct mlx5_esw_bridge *bridge) + struct mlx5_esw_bridge_offloads *br_offloads) { - xa_erase(&bridge->vports, port->vport_num); + xa_erase(&br_offloads->ports, mlx5_esw_bridge_port_key(port)); } -static void mlx5_esw_bridge_fdb_entry_refresh(unsigned long lastuse, - struct mlx5_esw_bridge_fdb_entry *entry) +static void mlx5_esw_bridge_fdb_entry_refresh(struct mlx5_esw_bridge_fdb_entry *entry) { trace_mlx5_esw_bridge_fdb_entry_refresh(entry); - entry->lastuse = lastuse; mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr, entry->key.vid, SWITCHDEV_FDB_ADD_TO_BRIDGE); @@ -690,10 +741,7 @@ static void mlx5_esw_bridge_fdb_flush(struct mlx5_esw_bridge *bridge) struct mlx5_esw_bridge_fdb_entry *entry, *tmp; list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) { - if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER)) - mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr, - entry->key.vid, - SWITCHDEV_FDB_DEL_TO_BRIDGE); + mlx5_esw_bridge_fdb_del_notify(entry); mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge); } } @@ -841,10 +889,7 @@ static void mlx5_esw_bridge_vlan_flush(struct mlx5_esw_bridge_vlan *vlan, struct mlx5_esw_bridge_fdb_entry *entry, *tmp; list_for_each_entry_safe(entry, tmp, &vlan->fdb_list, vlan_list) { - if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER)) - mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr, - entry->key.vid, - SWITCHDEV_FDB_DEL_TO_BRIDGE); + mlx5_esw_bridge_fdb_del_notify(entry); mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge); } @@ -875,13 +920,13 @@ static void mlx5_esw_bridge_port_vlans_flush(struct mlx5_esw_bridge_port *port, } static struct mlx5_esw_bridge_vlan * -mlx5_esw_bridge_port_vlan_lookup(u16 vid, u16 vport_num, struct mlx5_esw_bridge *bridge, - struct mlx5_eswitch *esw) +mlx5_esw_bridge_port_vlan_lookup(u16 vid, u16 vport_num, u16 esw_owner_vhca_id, + struct mlx5_esw_bridge *bridge, struct mlx5_eswitch *esw) { struct mlx5_esw_bridge_port *port; struct mlx5_esw_bridge_vlan *vlan; - port = mlx5_esw_bridge_port_lookup(vport_num, bridge); + port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, bridge->br_offloads); if (!port) { /* FDB is added asynchronously on wq while port might have been deleted * concurrently. Report on 'info' logging level and skip the FDB offload. @@ -904,24 +949,23 @@ mlx5_esw_bridge_port_vlan_lookup(u16 vid, u16 vport_num, struct mlx5_esw_bridge } static struct mlx5_esw_bridge_fdb_entry * -mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, const unsigned char *addr, - u16 vid, bool added_by_user, struct mlx5_eswitch *esw, - struct mlx5_esw_bridge *bridge) +mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id, + const unsigned char *addr, u16 vid, bool added_by_user, bool peer, + struct mlx5_eswitch *esw, struct mlx5_esw_bridge *bridge) { struct mlx5_esw_bridge_vlan *vlan = NULL; struct mlx5_esw_bridge_fdb_entry *entry; struct mlx5_flow_handle *handle; struct mlx5_fc *counter; - struct mlx5e_priv *priv; int err; if (bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG && vid) { - vlan = mlx5_esw_bridge_port_vlan_lookup(vid, vport_num, bridge, esw); + vlan = mlx5_esw_bridge_port_vlan_lookup(vid, vport_num, esw_owner_vhca_id, bridge, + esw); if (IS_ERR(vlan)) return ERR_CAST(vlan); } - priv = netdev_priv(dev); entry = kvzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) return ERR_PTR(-ENOMEM); @@ -930,19 +974,25 @@ mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, const unsi entry->key.vid = vid; entry->dev = dev; entry->vport_num = vport_num; + entry->esw_owner_vhca_id = esw_owner_vhca_id; entry->lastuse = jiffies; if (added_by_user) entry->flags |= MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER; + if (peer) + entry->flags |= MLX5_ESW_BRIDGE_FLAG_PEER; - counter = mlx5_fc_create(priv->mdev, true); + counter = mlx5_fc_create(esw->dev, true); if (IS_ERR(counter)) { err = PTR_ERR(counter); goto err_ingress_fc_create; } entry->ingress_counter = counter; - handle = mlx5_esw_bridge_ingress_flow_create(vport_num, addr, vlan, mlx5_fc_id(counter), - bridge); + handle = peer ? + mlx5_esw_bridge_ingress_flow_peer_create(vport_num, addr, vlan, + mlx5_fc_id(counter), bridge) : + mlx5_esw_bridge_ingress_flow_create(vport_num, addr, vlan, + mlx5_fc_id(counter), bridge); if (IS_ERR(handle)) { err = PTR_ERR(handle); esw_warn(esw->dev, "Failed to create ingress flow(vport=%u,err=%d)\n", @@ -962,7 +1012,8 @@ mlx5_esw_bridge_fdb_entry_init(struct net_device *dev, u16 vport_num, const unsi entry->filter_handle = handle; } - handle = mlx5_esw_bridge_egress_flow_create(vport_num, addr, vlan, bridge); + handle = mlx5_esw_bridge_egress_flow_create(vport_num, esw_owner_vhca_id, addr, vlan, + bridge); if (IS_ERR(handle)) { err = PTR_ERR(handle); esw_warn(esw->dev, "Failed to create egress flow(vport=%u,err=%d)\n", @@ -994,32 +1045,37 @@ err_egress_flow_create: err_ingress_filter_flow_create: mlx5_del_flow_rules(entry->ingress_handle); err_ingress_flow_create: - mlx5_fc_destroy(priv->mdev, entry->ingress_counter); + mlx5_fc_destroy(esw->dev, entry->ingress_counter); err_ingress_fc_create: kvfree(entry); return ERR_PTR(err); } -int mlx5_esw_bridge_ageing_time_set(unsigned long ageing_time, struct mlx5_eswitch *esw, - struct mlx5_vport *vport) +int mlx5_esw_bridge_ageing_time_set(u16 vport_num, u16 esw_owner_vhca_id, unsigned long ageing_time, + struct mlx5_esw_bridge_offloads *br_offloads) { - if (!vport->bridge) + struct mlx5_esw_bridge_port *port; + + port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads); + if (!port) return -EINVAL; - vport->bridge->ageing_time = clock_t_to_jiffies(ageing_time); + port->bridge->ageing_time = clock_t_to_jiffies(ageing_time); return 0; } -int mlx5_esw_bridge_vlan_filtering_set(bool enable, struct mlx5_eswitch *esw, - struct mlx5_vport *vport) +int mlx5_esw_bridge_vlan_filtering_set(u16 vport_num, u16 esw_owner_vhca_id, bool enable, + struct mlx5_esw_bridge_offloads *br_offloads) { + struct mlx5_esw_bridge_port *port; struct mlx5_esw_bridge *bridge; bool filtering; - if (!vport->bridge) + port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads); + if (!port) return -EINVAL; - bridge = vport->bridge; + bridge = port->bridge; filtering = bridge->flags & MLX5_ESW_BRIDGE_VLAN_FILTERING_FLAG; if (filtering == enable) return 0; @@ -1033,114 +1089,143 @@ int mlx5_esw_bridge_vlan_filtering_set(bool enable, struct mlx5_eswitch *esw, return 0; } -static int mlx5_esw_bridge_vport_init(struct mlx5_esw_bridge_offloads *br_offloads, - struct mlx5_esw_bridge *bridge, - struct mlx5_vport *vport) +static int mlx5_esw_bridge_vport_init(u16 vport_num, u16 esw_owner_vhca_id, u16 flags, + struct mlx5_esw_bridge_offloads *br_offloads, + struct mlx5_esw_bridge *bridge) { struct mlx5_eswitch *esw = br_offloads->esw; struct mlx5_esw_bridge_port *port; int err; port = kvzalloc(sizeof(*port), GFP_KERNEL); - if (!port) { - err = -ENOMEM; - goto err_port_alloc; - } + if (!port) + return -ENOMEM; - port->vport_num = vport->vport; + port->vport_num = vport_num; + port->esw_owner_vhca_id = esw_owner_vhca_id; + port->bridge = bridge; + port->flags |= flags; xa_init(&port->vlans); - err = mlx5_esw_bridge_port_insert(port, bridge); + err = mlx5_esw_bridge_port_insert(port, br_offloads); if (err) { - esw_warn(esw->dev, "Failed to insert port metadata (vport=%u,err=%d)\n", - vport->vport, err); + esw_warn(esw->dev, + "Failed to insert port metadata (vport=%u,esw_owner_vhca_id=%u,err=%d)\n", + port->vport_num, port->esw_owner_vhca_id, err); goto err_port_insert; } trace_mlx5_esw_bridge_vport_init(port); - vport->bridge = bridge; return 0; err_port_insert: kvfree(port); -err_port_alloc: - mlx5_esw_bridge_put(br_offloads, bridge); return err; } static int mlx5_esw_bridge_vport_cleanup(struct mlx5_esw_bridge_offloads *br_offloads, - struct mlx5_vport *vport) + struct mlx5_esw_bridge_port *port) { - struct mlx5_esw_bridge *bridge = vport->bridge; + u16 vport_num = port->vport_num, esw_owner_vhca_id = port->esw_owner_vhca_id; + struct mlx5_esw_bridge *bridge = port->bridge; struct mlx5_esw_bridge_fdb_entry *entry, *tmp; - struct mlx5_esw_bridge_port *port; list_for_each_entry_safe(entry, tmp, &bridge->fdb_list, list) - if (entry->vport_num == vport->vport) + if (entry->vport_num == vport_num && entry->esw_owner_vhca_id == esw_owner_vhca_id) mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge); - port = mlx5_esw_bridge_port_lookup(vport->vport, bridge); - if (!port) { - WARN(1, "Vport %u metadata not found on bridge", vport->vport); - return -EINVAL; - } - trace_mlx5_esw_bridge_vport_cleanup(port); mlx5_esw_bridge_port_vlans_flush(port, bridge); - mlx5_esw_bridge_port_erase(port, bridge); + mlx5_esw_bridge_port_erase(port, br_offloads); kvfree(port); mlx5_esw_bridge_put(br_offloads, bridge); - vport->bridge = NULL; return 0; } -int mlx5_esw_bridge_vport_link(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads, - struct mlx5_vport *vport, struct netlink_ext_ack *extack) +static int mlx5_esw_bridge_vport_link_with_flags(int ifindex, u16 vport_num, u16 esw_owner_vhca_id, + u16 flags, + struct mlx5_esw_bridge_offloads *br_offloads, + struct netlink_ext_ack *extack) { struct mlx5_esw_bridge *bridge; int err; - WARN_ON(vport->bridge); - bridge = mlx5_esw_bridge_lookup(ifindex, br_offloads); if (IS_ERR(bridge)) { NL_SET_ERR_MSG_MOD(extack, "Error checking for existing bridge with same ifindex"); return PTR_ERR(bridge); } - err = mlx5_esw_bridge_vport_init(br_offloads, bridge, vport); - if (err) + err = mlx5_esw_bridge_vport_init(vport_num, esw_owner_vhca_id, flags, br_offloads, bridge); + if (err) { NL_SET_ERR_MSG_MOD(extack, "Error initializing port"); + goto err_vport; + } + return 0; + +err_vport: + mlx5_esw_bridge_put(br_offloads, bridge); return err; } -int mlx5_esw_bridge_vport_unlink(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads, - struct mlx5_vport *vport, struct netlink_ext_ack *extack) +int mlx5_esw_bridge_vport_link(int ifindex, u16 vport_num, u16 esw_owner_vhca_id, + struct mlx5_esw_bridge_offloads *br_offloads, + struct netlink_ext_ack *extack) { - struct mlx5_esw_bridge *bridge = vport->bridge; + return mlx5_esw_bridge_vport_link_with_flags(ifindex, vport_num, esw_owner_vhca_id, 0, + br_offloads, extack); +} + +int mlx5_esw_bridge_vport_unlink(int ifindex, u16 vport_num, u16 esw_owner_vhca_id, + struct mlx5_esw_bridge_offloads *br_offloads, + struct netlink_ext_ack *extack) +{ + struct mlx5_esw_bridge_port *port; int err; - if (!bridge) { + port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads); + if (!port) { NL_SET_ERR_MSG_MOD(extack, "Port is not attached to any bridge"); return -EINVAL; } - if (bridge->ifindex != ifindex) { + if (port->bridge->ifindex != ifindex) { NL_SET_ERR_MSG_MOD(extack, "Port is attached to another bridge"); return -EINVAL; } - err = mlx5_esw_bridge_vport_cleanup(br_offloads, vport); + err = mlx5_esw_bridge_vport_cleanup(br_offloads, port); if (err) NL_SET_ERR_MSG_MOD(extack, "Port cleanup failed"); return err; } -int mlx5_esw_bridge_port_vlan_add(u16 vid, u16 flags, struct mlx5_eswitch *esw, - struct mlx5_vport *vport, struct netlink_ext_ack *extack) +int mlx5_esw_bridge_vport_peer_link(int ifindex, u16 vport_num, u16 esw_owner_vhca_id, + struct mlx5_esw_bridge_offloads *br_offloads, + struct netlink_ext_ack *extack) +{ + if (!MLX5_CAP_ESW(br_offloads->esw->dev, merged_eswitch)) + return 0; + + return mlx5_esw_bridge_vport_link_with_flags(ifindex, vport_num, esw_owner_vhca_id, + MLX5_ESW_BRIDGE_PORT_FLAG_PEER, + br_offloads, extack); +} + +int mlx5_esw_bridge_vport_peer_unlink(int ifindex, u16 vport_num, u16 esw_owner_vhca_id, + struct mlx5_esw_bridge_offloads *br_offloads, + struct netlink_ext_ack *extack) +{ + return mlx5_esw_bridge_vport_unlink(ifindex, vport_num, esw_owner_vhca_id, br_offloads, + extack); +} + +int mlx5_esw_bridge_port_vlan_add(u16 vport_num, u16 esw_owner_vhca_id, u16 vid, u16 flags, + struct mlx5_esw_bridge_offloads *br_offloads, + struct netlink_ext_ack *extack) { struct mlx5_esw_bridge_port *port; struct mlx5_esw_bridge_vlan *vlan; - port = mlx5_esw_bridge_port_lookup(vport->vport, vport->bridge); + port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads); if (!port) return -EINVAL; @@ -1148,10 +1233,10 @@ int mlx5_esw_bridge_port_vlan_add(u16 vid, u16 flags, struct mlx5_eswitch *esw, if (vlan) { if (vlan->flags == flags) return 0; - mlx5_esw_bridge_vlan_cleanup(port, vlan, vport->bridge); + mlx5_esw_bridge_vlan_cleanup(port, vlan, port->bridge); } - vlan = mlx5_esw_bridge_vlan_create(vid, flags, port, esw); + vlan = mlx5_esw_bridge_vlan_create(vid, flags, port, br_offloads->esw); if (IS_ERR(vlan)) { NL_SET_ERR_MSG_MOD(extack, "Failed to create VLAN entry"); return PTR_ERR(vlan); @@ -1159,62 +1244,93 @@ int mlx5_esw_bridge_port_vlan_add(u16 vid, u16 flags, struct mlx5_eswitch *esw, return 0; } -void mlx5_esw_bridge_port_vlan_del(u16 vid, struct mlx5_eswitch *esw, struct mlx5_vport *vport) +void mlx5_esw_bridge_port_vlan_del(u16 vport_num, u16 esw_owner_vhca_id, u16 vid, + struct mlx5_esw_bridge_offloads *br_offloads) { struct mlx5_esw_bridge_port *port; struct mlx5_esw_bridge_vlan *vlan; - port = mlx5_esw_bridge_port_lookup(vport->vport, vport->bridge); + port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads); if (!port) return; vlan = mlx5_esw_bridge_vlan_lookup(vid, port); if (!vlan) return; - mlx5_esw_bridge_vlan_cleanup(port, vlan, vport->bridge); + mlx5_esw_bridge_vlan_cleanup(port, vlan, port->bridge); } -void mlx5_esw_bridge_fdb_create(struct net_device *dev, struct mlx5_eswitch *esw, - struct mlx5_vport *vport, - struct switchdev_notifier_fdb_info *fdb_info) +void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id, + struct mlx5_esw_bridge_offloads *br_offloads, + struct switchdev_notifier_fdb_info *fdb_info) { - struct mlx5_esw_bridge *bridge = vport->bridge; struct mlx5_esw_bridge_fdb_entry *entry; - u16 vport_num = vport->vport; + struct mlx5_esw_bridge_fdb_key key; + struct mlx5_esw_bridge_port *port; + struct mlx5_esw_bridge *bridge; - if (!bridge) { - esw_info(esw->dev, "Vport is not assigned to bridge (vport=%u)\n", vport_num); + port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads); + if (!port || port->flags & MLX5_ESW_BRIDGE_PORT_FLAG_PEER) + return; + + bridge = port->bridge; + ether_addr_copy(key.addr, fdb_info->addr); + key.vid = fdb_info->vid; + entry = rhashtable_lookup_fast(&bridge->fdb_ht, &key, fdb_ht_params); + if (!entry) { + esw_debug(br_offloads->esw->dev, + "FDB entry with specified key not found (MAC=%pM,vid=%u,vport=%u)\n", + key.addr, key.vid, vport_num); return; } - entry = mlx5_esw_bridge_fdb_entry_init(dev, vport_num, fdb_info->addr, fdb_info->vid, - fdb_info->added_by_user, esw, bridge); + entry->lastuse = jiffies; +} + +void mlx5_esw_bridge_fdb_create(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id, + struct mlx5_esw_bridge_offloads *br_offloads, + struct switchdev_notifier_fdb_info *fdb_info) +{ + struct mlx5_esw_bridge_fdb_entry *entry; + struct mlx5_esw_bridge_port *port; + struct mlx5_esw_bridge *bridge; + + port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads); + if (!port) + return; + + bridge = port->bridge; + entry = mlx5_esw_bridge_fdb_entry_init(dev, vport_num, esw_owner_vhca_id, fdb_info->addr, + fdb_info->vid, fdb_info->added_by_user, + port->flags & MLX5_ESW_BRIDGE_PORT_FLAG_PEER, + br_offloads->esw, bridge); if (IS_ERR(entry)) return; if (entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER) mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid, SWITCHDEV_FDB_OFFLOADED); - else + else if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_PEER)) /* Take over dynamic entries to prevent kernel bridge from aging them out. */ mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid, SWITCHDEV_FDB_ADD_TO_BRIDGE); } -void mlx5_esw_bridge_fdb_remove(struct net_device *dev, struct mlx5_eswitch *esw, - struct mlx5_vport *vport, +void mlx5_esw_bridge_fdb_remove(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id, + struct mlx5_esw_bridge_offloads *br_offloads, struct switchdev_notifier_fdb_info *fdb_info) { - struct mlx5_esw_bridge *bridge = vport->bridge; + struct mlx5_eswitch *esw = br_offloads->esw; struct mlx5_esw_bridge_fdb_entry *entry; struct mlx5_esw_bridge_fdb_key key; - u16 vport_num = vport->vport; + struct mlx5_esw_bridge_port *port; + struct mlx5_esw_bridge *bridge; - if (!bridge) { - esw_warn(esw->dev, "Vport is not assigned to bridge (vport=%u)\n", vport_num); + port = mlx5_esw_bridge_port_lookup(vport_num, esw_owner_vhca_id, br_offloads); + if (!port) return; - } + bridge = port->bridge; ether_addr_copy(key.addr, fdb_info->addr); key.vid = fdb_info->vid; entry = rhashtable_lookup_fast(&bridge->fdb_ht, &key, fdb_ht_params); @@ -1225,9 +1341,7 @@ void mlx5_esw_bridge_fdb_remove(struct net_device *dev, struct mlx5_eswitch *esw return; } - if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER)) - mlx5_esw_bridge_fdb_offload_notify(dev, entry->key.addr, entry->key.vid, - SWITCHDEV_FDB_DEL_TO_BRIDGE); + mlx5_esw_bridge_fdb_del_notify(entry); mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge); } @@ -1245,11 +1359,10 @@ void mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads *br_offloads) continue; if (time_after(lastuse, entry->lastuse)) { - mlx5_esw_bridge_fdb_entry_refresh(lastuse, entry); - } else if (time_is_before_jiffies(entry->lastuse + bridge->ageing_time)) { - mlx5_esw_bridge_fdb_offload_notify(entry->dev, entry->key.addr, - entry->key.vid, - SWITCHDEV_FDB_DEL_TO_BRIDGE); + mlx5_esw_bridge_fdb_entry_refresh(entry); + } else if (!(entry->flags & MLX5_ESW_BRIDGE_FLAG_PEER) && + time_is_before_jiffies(entry->lastuse + bridge->ageing_time)) { + mlx5_esw_bridge_fdb_del_notify(entry); mlx5_esw_bridge_fdb_entry_cleanup(entry, bridge); } } @@ -1258,13 +1371,11 @@ void mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads *br_offloads) static void mlx5_esw_bridge_flush(struct mlx5_esw_bridge_offloads *br_offloads) { - struct mlx5_eswitch *esw = br_offloads->esw; - struct mlx5_vport *vport; + struct mlx5_esw_bridge_port *port; unsigned long i; - mlx5_esw_for_each_vport(esw, i, vport) - if (vport->bridge) - mlx5_esw_bridge_vport_cleanup(br_offloads, vport); + xa_for_each(&br_offloads->ports, i, port) + mlx5_esw_bridge_vport_cleanup(br_offloads, port); WARN_ONCE(!list_empty(&br_offloads->bridges), "Cleaning up bridge offloads while still having bridges attached\n"); @@ -1279,6 +1390,7 @@ struct mlx5_esw_bridge_offloads *mlx5_esw_bridge_init(struct mlx5_eswitch *esw) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&br_offloads->bridges); + xa_init(&br_offloads->ports); br_offloads->esw = esw; esw->br_offloads = br_offloads; @@ -1293,6 +1405,7 @@ void mlx5_esw_bridge_cleanup(struct mlx5_eswitch *esw) return; mlx5_esw_bridge_flush(br_offloads); + WARN_ON(!xa_empty(&br_offloads->ports)); esw->br_offloads = NULL; kvfree(br_offloads); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h index d826942b27fc..efc39975226e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.h @@ -7,6 +7,7 @@ #include <linux/notifier.h> #include <linux/list.h> #include <linux/workqueue.h> +#include <linux/xarray.h> #include "eswitch.h" struct mlx5_flow_table; @@ -15,6 +16,8 @@ struct mlx5_flow_group; struct mlx5_esw_bridge_offloads { struct mlx5_eswitch *esw; struct list_head bridges; + struct xarray ports; + struct notifier_block netdev_nb; struct notifier_block nb_blk; struct notifier_block nb; @@ -31,23 +34,36 @@ struct mlx5_esw_bridge_offloads { struct mlx5_esw_bridge_offloads *mlx5_esw_bridge_init(struct mlx5_eswitch *esw); void mlx5_esw_bridge_cleanup(struct mlx5_eswitch *esw); -int mlx5_esw_bridge_vport_link(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads, - struct mlx5_vport *vport, struct netlink_ext_ack *extack); -int mlx5_esw_bridge_vport_unlink(int ifindex, struct mlx5_esw_bridge_offloads *br_offloads, - struct mlx5_vport *vport, struct netlink_ext_ack *extack); -void mlx5_esw_bridge_fdb_create(struct net_device *dev, struct mlx5_eswitch *esw, - struct mlx5_vport *vport, +int mlx5_esw_bridge_vport_link(int ifindex, u16 vport_num, u16 esw_owner_vhca_id, + struct mlx5_esw_bridge_offloads *br_offloads, + struct netlink_ext_ack *extack); +int mlx5_esw_bridge_vport_unlink(int ifindex, u16 vport_num, u16 esw_owner_vhca_id, + struct mlx5_esw_bridge_offloads *br_offloads, + struct netlink_ext_ack *extack); +int mlx5_esw_bridge_vport_peer_link(int ifindex, u16 vport_num, u16 esw_owner_vhca_id, + struct mlx5_esw_bridge_offloads *br_offloads, + struct netlink_ext_ack *extack); +int mlx5_esw_bridge_vport_peer_unlink(int ifindex, u16 vport_num, u16 esw_owner_vhca_id, + struct mlx5_esw_bridge_offloads *br_offloads, + struct netlink_ext_ack *extack); +void mlx5_esw_bridge_fdb_update_used(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id, + struct mlx5_esw_bridge_offloads *br_offloads, + struct switchdev_notifier_fdb_info *fdb_info); +void mlx5_esw_bridge_fdb_create(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id, + struct mlx5_esw_bridge_offloads *br_offloads, struct switchdev_notifier_fdb_info *fdb_info); -void mlx5_esw_bridge_fdb_remove(struct net_device *dev, struct mlx5_eswitch *esw, - struct mlx5_vport *vport, +void mlx5_esw_bridge_fdb_remove(struct net_device *dev, u16 vport_num, u16 esw_owner_vhca_id, + struct mlx5_esw_bridge_offloads *br_offloads, struct switchdev_notifier_fdb_info *fdb_info); void mlx5_esw_bridge_update(struct mlx5_esw_bridge_offloads *br_offloads); -int mlx5_esw_bridge_ageing_time_set(unsigned long ageing_time, struct mlx5_eswitch *esw, - struct mlx5_vport *vport); -int mlx5_esw_bridge_vlan_filtering_set(bool enable, struct mlx5_eswitch *esw, - struct mlx5_vport *vport); -int mlx5_esw_bridge_port_vlan_add(u16 vid, u16 flags, struct mlx5_eswitch *esw, - struct mlx5_vport *vport, struct netlink_ext_ack *extack); -void mlx5_esw_bridge_port_vlan_del(u16 vid, struct mlx5_eswitch *esw, struct mlx5_vport *vport); +int mlx5_esw_bridge_ageing_time_set(u16 vport_num, u16 esw_owner_vhca_id, unsigned long ageing_time, + struct mlx5_esw_bridge_offloads *br_offloads); +int mlx5_esw_bridge_vlan_filtering_set(u16 vport_num, u16 esw_owner_vhca_id, bool enable, + struct mlx5_esw_bridge_offloads *br_offloads); +int mlx5_esw_bridge_port_vlan_add(u16 vport_num, u16 esw_owner_vhca_id, u16 vid, u16 flags, + struct mlx5_esw_bridge_offloads *br_offloads, + struct netlink_ext_ack *extack); +void mlx5_esw_bridge_port_vlan_del(u16 vport_num, u16 esw_owner_vhca_id, u16 vid, + struct mlx5_esw_bridge_offloads *br_offloads); #endif /* __MLX5_ESW_BRIDGE_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h index d9ab2e8bc2cb..52964a82d6a6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_priv.h @@ -19,6 +19,11 @@ struct mlx5_esw_bridge_fdb_key { enum { MLX5_ESW_BRIDGE_FLAG_ADDED_BY_USER = BIT(0), + MLX5_ESW_BRIDGE_FLAG_PEER = BIT(1), +}; + +enum { + MLX5_ESW_BRIDGE_PORT_FLAG_PEER = BIT(0), }; struct mlx5_esw_bridge_fdb_entry { @@ -28,6 +33,7 @@ struct mlx5_esw_bridge_fdb_entry { struct list_head list; struct list_head vlan_list; u16 vport_num; + u16 esw_owner_vhca_id; u16 flags; struct mlx5_flow_handle *ingress_handle; @@ -47,6 +53,9 @@ struct mlx5_esw_bridge_vlan { struct mlx5_esw_bridge_port { u16 vport_num; + u16 esw_owner_vhca_id; + u16 flags; + struct mlx5_esw_bridge *bridge; struct xarray vlans; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c index 1703384eca95..20af557ae30c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/devlink_port.c @@ -91,9 +91,15 @@ int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, u16 vport_ if (err) goto reg_err; + err = devlink_rate_leaf_create(dl_port, vport); + if (err) + goto rate_err; + vport->dl_port = dl_port; return 0; +rate_err: + devlink_port_unregister(dl_port); reg_err: mlx5_esw_dl_port_free(dl_port); return err; @@ -109,6 +115,12 @@ void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_eswitch *esw, u16 vpo vport = mlx5_eswitch_get_vport(esw, vport_num); if (IS_ERR(vport)) return; + + if (vport->dl_port->devlink_rate) { + mlx5_esw_qos_vport_update_group(esw, vport, NULL, NULL); + devlink_rate_leaf_destroy(vport->dl_port); + } + devlink_port_unregister(vport->dl_port); mlx5_esw_dl_port_free(vport->dl_port); vport->dl_port = NULL; @@ -148,8 +160,16 @@ int mlx5_esw_devlink_sf_port_register(struct mlx5_eswitch *esw, struct devlink_p if (err) return err; + err = devlink_rate_leaf_create(dl_port, vport); + if (err) + goto rate_err; + vport->dl_port = dl_port; return 0; + +rate_err: + devlink_port_unregister(dl_port); + return err; } void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num) @@ -159,6 +179,12 @@ void mlx5_esw_devlink_sf_port_unregister(struct mlx5_eswitch *esw, u16 vport_num vport = mlx5_eswitch_get_vport(esw, vport_num); if (IS_ERR(vport)) return; + + if (vport->dl_port->devlink_rate) { + mlx5_esw_qos_vport_update_group(esw, vport, NULL, NULL); + devlink_rate_leaf_destroy(vport->dl_port); + } + devlink_port_unregister(vport->dl_port); vport->dl_port = NULL; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h index 227964b7d3b9..3401188e0a60 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h @@ -85,11 +85,18 @@ DECLARE_EVENT_CLASS(mlx5_esw_bridge_port_template, TP_ARGS(port), TP_STRUCT__entry( __field(u16, vport_num) + __field(u16, esw_owner_vhca_id) + __field(u16, flags) ), TP_fast_assign( __entry->vport_num = port->vport_num; + __entry->esw_owner_vhca_id = port->esw_owner_vhca_id; + __entry->flags = port->flags; ), - TP_printk("vport_num=%hu", __entry->vport_num) + TP_printk("vport_num=%hu esw_owner_vhca_id=%hu flags=%hx", + __entry->vport_num, + __entry->esw_owner_vhca_id, + __entry->flags) ); DEFINE_EVENT(mlx5_esw_bridge_port_template, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h new file mode 100644 index 000000000000..458baf0c6415 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/qos_tracepoint.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM mlx5 + +#if !defined(_MLX5_ESW_TP_) || defined(TRACE_HEADER_MULTI_READ) +#define _MLX5_ESW_TP_ + +#include <linux/tracepoint.h> +#include "eswitch.h" + +TRACE_EVENT(mlx5_esw_vport_qos_destroy, + TP_PROTO(const struct mlx5_vport *vport), + TP_ARGS(vport), + TP_STRUCT__entry(__string(devname, dev_name(vport->dev->device)) + __field(unsigned short, vport_id) + __field(unsigned int, tsar_ix) + ), + TP_fast_assign(__assign_str(devname, dev_name(vport->dev->device)); + __entry->vport_id = vport->vport; + __entry->tsar_ix = vport->qos.esw_tsar_ix; + ), + TP_printk("(%s) vport=%hu tsar_ix=%u\n", + __get_str(devname), __entry->vport_id, __entry->tsar_ix + ) +); + +DECLARE_EVENT_CLASS(mlx5_esw_vport_qos_template, + TP_PROTO(const struct mlx5_vport *vport, u32 bw_share, u32 max_rate), + TP_ARGS(vport, bw_share, max_rate), + TP_STRUCT__entry(__string(devname, dev_name(vport->dev->device)) + __field(unsigned short, vport_id) + __field(unsigned int, tsar_ix) + __field(unsigned int, bw_share) + __field(unsigned int, max_rate) + __field(void *, group) + ), + TP_fast_assign(__assign_str(devname, dev_name(vport->dev->device)); + __entry->vport_id = vport->vport; + __entry->tsar_ix = vport->qos.esw_tsar_ix; + __entry->bw_share = bw_share; + __entry->max_rate = max_rate; + __entry->group = vport->qos.group; + ), + TP_printk("(%s) vport=%hu tsar_ix=%u bw_share=%u, max_rate=%u group=%p\n", + __get_str(devname), __entry->vport_id, __entry->tsar_ix, + __entry->bw_share, __entry->max_rate, __entry->group + ) +); + +DEFINE_EVENT(mlx5_esw_vport_qos_template, mlx5_esw_vport_qos_create, + TP_PROTO(const struct mlx5_vport *vport, u32 bw_share, u32 max_rate), + TP_ARGS(vport, bw_share, max_rate) + ); + +DEFINE_EVENT(mlx5_esw_vport_qos_template, mlx5_esw_vport_qos_config, + TP_PROTO(const struct mlx5_vport *vport, u32 bw_share, u32 max_rate), + TP_ARGS(vport, bw_share, max_rate) + ); + +DECLARE_EVENT_CLASS(mlx5_esw_group_qos_template, + TP_PROTO(const struct mlx5_core_dev *dev, + const struct mlx5_esw_rate_group *group, + unsigned int tsar_ix), + TP_ARGS(dev, group, tsar_ix), + TP_STRUCT__entry(__string(devname, dev_name(dev->device)) + __field(const void *, group) + __field(unsigned int, tsar_ix) + ), + TP_fast_assign(__assign_str(devname, dev_name(dev->device)); + __entry->group = group; + __entry->tsar_ix = tsar_ix; + ), + TP_printk("(%s) group=%p tsar_ix=%u\n", + __get_str(devname), __entry->group, __entry->tsar_ix + ) +); + +DEFINE_EVENT(mlx5_esw_group_qos_template, mlx5_esw_group_qos_create, + TP_PROTO(const struct mlx5_core_dev *dev, + const struct mlx5_esw_rate_group *group, + unsigned int tsar_ix), + TP_ARGS(dev, group, tsar_ix) + ); + +DEFINE_EVENT(mlx5_esw_group_qos_template, mlx5_esw_group_qos_destroy, + TP_PROTO(const struct mlx5_core_dev *dev, + const struct mlx5_esw_rate_group *group, + unsigned int tsar_ix), + TP_ARGS(dev, group, tsar_ix) + ); + +TRACE_EVENT(mlx5_esw_group_qos_config, + TP_PROTO(const struct mlx5_core_dev *dev, + const struct mlx5_esw_rate_group *group, + unsigned int tsar_ix, u32 bw_share, u32 max_rate), + TP_ARGS(dev, group, tsar_ix, bw_share, max_rate), + TP_STRUCT__entry(__string(devname, dev_name(dev->device)) + __field(const void *, group) + __field(unsigned int, tsar_ix) + __field(unsigned int, bw_share) + __field(unsigned int, max_rate) + ), + TP_fast_assign(__assign_str(devname, dev_name(dev->device)); + __entry->group = group; + __entry->tsar_ix = tsar_ix; + __entry->bw_share = bw_share; + __entry->max_rate = max_rate; + ), + TP_printk("(%s) group=%p tsar_ix=%u bw_share=%u max_rate=%u\n", + __get_str(devname), __entry->group, __entry->tsar_ix, + __entry->bw_share, __entry->max_rate + ) +); +#endif /* _MLX5_ESW_TP_ */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH esw/diag +#undef TRACE_INCLUDE_FILE +#define TRACE_INCLUDE_FILE qos_tracepoint +#include <trace/define_trace.h> diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c index 3da7becc1069..425c91814b34 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/indir_table.c @@ -364,6 +364,7 @@ static int mlx5_create_indir_fwd_group(struct mlx5_eswitch *esw, dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; dest.vport.num = e->vport; dest.vport.vhca_id = MLX5_CAP_GEN(esw->dev, vhca_id); + dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID; e->fwd_rule = mlx5_add_flow_rules(e->ft, spec, &flow_act, &dest, 1); if (IS_ERR(e->fwd_rule)) { mlx5_destroy_flow_group(e->fwd_grp); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c index d9041b16611d..df277a6cddc0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/legacy.c @@ -11,6 +11,7 @@ #include "mlx5_core.h" #include "eswitch.h" #include "fs_core.h" +#include "esw/qos.h" enum { LEGACY_VEPA_PRIO = 0, @@ -508,3 +509,22 @@ unlock: mutex_unlock(&esw->state_lock); return err; } + +int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport, + u32 max_rate, u32 min_rate) +{ + struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); + int err; + + if (!mlx5_esw_allowed(esw)) + return -EPERM; + if (IS_ERR(evport)) + return PTR_ERR(evport); + + mutex_lock(&esw->state_lock); + err = mlx5_esw_qos_set_vport_min_rate(esw, evport, min_rate, NULL); + if (!err) + err = mlx5_esw_qos_set_vport_max_rate(esw, evport, max_rate, NULL); + mutex_unlock(&esw->state_lock); + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c new file mode 100644 index 000000000000..985e305179d1 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.c @@ -0,0 +1,869 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#include "eswitch.h" +#include "esw/qos.h" +#include "en/port.h" +#define CREATE_TRACE_POINTS +#include "diag/qos_tracepoint.h" + +/* Minimum supported BW share value by the HW is 1 Mbit/sec */ +#define MLX5_MIN_BW_SHARE 1 + +#define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \ + min_t(u32, max_t(u32, DIV_ROUND_UP(rate, divider), MLX5_MIN_BW_SHARE), limit) + +struct mlx5_esw_rate_group { + u32 tsar_ix; + u32 max_rate; + u32 min_rate; + u32 bw_share; + struct list_head list; +}; + +static int esw_qos_tsar_config(struct mlx5_core_dev *dev, u32 *sched_ctx, + u32 parent_ix, u32 tsar_ix, + u32 max_rate, u32 bw_share) +{ + u32 bitmask = 0; + + if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling)) + return -EOPNOTSUPP; + + MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_ix); + MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate); + MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share); + bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; + bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE; + + return mlx5_modify_scheduling_element_cmd(dev, + SCHEDULING_HIERARCHY_E_SWITCH, + sched_ctx, + tsar_ix, + bitmask); +} + +static int esw_qos_group_config(struct mlx5_eswitch *esw, struct mlx5_esw_rate_group *group, + u32 max_rate, u32 bw_share, struct netlink_ext_ack *extack) +{ + u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; + struct mlx5_core_dev *dev = esw->dev; + int err; + + err = esw_qos_tsar_config(dev, sched_ctx, + esw->qos.root_tsar_ix, group->tsar_ix, + max_rate, bw_share); + if (err) + NL_SET_ERR_MSG_MOD(extack, "E-Switch modify group TSAR element failed"); + + trace_mlx5_esw_group_qos_config(dev, group, group->tsar_ix, bw_share, max_rate); + + return err; +} + +static int esw_qos_vport_config(struct mlx5_eswitch *esw, + struct mlx5_vport *vport, + u32 max_rate, u32 bw_share, + struct netlink_ext_ack *extack) +{ + u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; + struct mlx5_esw_rate_group *group = vport->qos.group; + struct mlx5_core_dev *dev = esw->dev; + u32 parent_tsar_ix; + void *vport_elem; + int err; + + if (!vport->qos.enabled) + return -EIO; + + parent_tsar_ix = group ? group->tsar_ix : esw->qos.root_tsar_ix; + MLX5_SET(scheduling_context, sched_ctx, element_type, + SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT); + vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx, + element_attributes); + MLX5_SET(vport_element, vport_elem, vport_number, vport->vport); + + err = esw_qos_tsar_config(dev, sched_ctx, parent_tsar_ix, vport->qos.esw_tsar_ix, + max_rate, bw_share); + if (err) { + esw_warn(esw->dev, + "E-Switch modify TSAR vport element failed (vport=%d,err=%d)\n", + vport->vport, err); + NL_SET_ERR_MSG_MOD(extack, "E-Switch modify TSAR vport element failed"); + return err; + } + + trace_mlx5_esw_vport_qos_config(vport, bw_share, max_rate); + + return 0; +} + +static u32 esw_qos_calculate_min_rate_divider(struct mlx5_eswitch *esw, + struct mlx5_esw_rate_group *group, + bool group_level) +{ + u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); + struct mlx5_vport *evport; + u32 max_guarantee = 0; + unsigned long i; + + if (group_level) { + struct mlx5_esw_rate_group *group; + + list_for_each_entry(group, &esw->qos.groups, list) { + if (group->min_rate < max_guarantee) + continue; + max_guarantee = group->min_rate; + } + } else { + mlx5_esw_for_each_vport(esw, i, evport) { + if (!evport->enabled || !evport->qos.enabled || + evport->qos.group != group || evport->qos.min_rate < max_guarantee) + continue; + max_guarantee = evport->qos.min_rate; + } + } + + if (max_guarantee) + return max_t(u32, max_guarantee / fw_max_bw_share, 1); + + /* If vports min rate divider is 0 but their group has bw_share configured, then + * need to set bw_share for vports to minimal value. + */ + if (!group_level && !max_guarantee && group->bw_share) + return 1; + return 0; +} + +static u32 esw_qos_calc_bw_share(u32 min_rate, u32 divider, u32 fw_max) +{ + if (divider) + return MLX5_RATE_TO_BW_SHARE(min_rate, divider, fw_max); + + return 0; +} + +static int esw_qos_normalize_vports_min_rate(struct mlx5_eswitch *esw, + struct mlx5_esw_rate_group *group, + struct netlink_ext_ack *extack) +{ + u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); + u32 divider = esw_qos_calculate_min_rate_divider(esw, group, false); + struct mlx5_vport *evport; + unsigned long i; + u32 bw_share; + int err; + + mlx5_esw_for_each_vport(esw, i, evport) { + if (!evport->enabled || !evport->qos.enabled || evport->qos.group != group) + continue; + bw_share = esw_qos_calc_bw_share(evport->qos.min_rate, divider, fw_max_bw_share); + + if (bw_share == evport->qos.bw_share) + continue; + + err = esw_qos_vport_config(esw, evport, evport->qos.max_rate, bw_share, extack); + if (err) + return err; + + evport->qos.bw_share = bw_share; + } + + return 0; +} + +static int esw_qos_normalize_groups_min_rate(struct mlx5_eswitch *esw, u32 divider, + struct netlink_ext_ack *extack) +{ + u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); + struct mlx5_esw_rate_group *group; + u32 bw_share; + int err; + + list_for_each_entry(group, &esw->qos.groups, list) { + bw_share = esw_qos_calc_bw_share(group->min_rate, divider, fw_max_bw_share); + + if (bw_share == group->bw_share) + continue; + + err = esw_qos_group_config(esw, group, group->max_rate, bw_share, extack); + if (err) + return err; + + group->bw_share = bw_share; + + /* All the group's vports need to be set with default bw_share + * to enable them with QOS + */ + err = esw_qos_normalize_vports_min_rate(esw, group, extack); + + if (err) + return err; + } + + return 0; +} + +int mlx5_esw_qos_set_vport_min_rate(struct mlx5_eswitch *esw, + struct mlx5_vport *evport, + u32 min_rate, + struct netlink_ext_ack *extack) +{ + u32 fw_max_bw_share, previous_min_rate; + bool min_rate_supported; + int err; + + lockdep_assert_held(&esw->state_lock); + fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); + min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) && + fw_max_bw_share >= MLX5_MIN_BW_SHARE; + if (min_rate && !min_rate_supported) + return -EOPNOTSUPP; + if (min_rate == evport->qos.min_rate) + return 0; + + previous_min_rate = evport->qos.min_rate; + evport->qos.min_rate = min_rate; + err = esw_qos_normalize_vports_min_rate(esw, evport->qos.group, extack); + if (err) + evport->qos.min_rate = previous_min_rate; + + return err; +} + +int mlx5_esw_qos_set_vport_max_rate(struct mlx5_eswitch *esw, + struct mlx5_vport *evport, + u32 max_rate, + struct netlink_ext_ack *extack) +{ + u32 act_max_rate = max_rate; + bool max_rate_supported; + int err; + + lockdep_assert_held(&esw->state_lock); + max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit); + + if (max_rate && !max_rate_supported) + return -EOPNOTSUPP; + if (max_rate == evport->qos.max_rate) + return 0; + + /* If parent group has rate limit need to set to group + * value when new max rate is 0. + */ + if (evport->qos.group && !max_rate) + act_max_rate = evport->qos.group->max_rate; + + err = esw_qos_vport_config(esw, evport, act_max_rate, evport->qos.bw_share, extack); + + if (!err) + evport->qos.max_rate = max_rate; + + return err; +} + +static int esw_qos_set_group_min_rate(struct mlx5_eswitch *esw, struct mlx5_esw_rate_group *group, + u32 min_rate, struct netlink_ext_ack *extack) +{ + u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); + struct mlx5_core_dev *dev = esw->dev; + u32 previous_min_rate, divider; + int err; + + if (!(MLX5_CAP_QOS(dev, esw_bw_share) && fw_max_bw_share >= MLX5_MIN_BW_SHARE)) + return -EOPNOTSUPP; + + if (min_rate == group->min_rate) + return 0; + + previous_min_rate = group->min_rate; + group->min_rate = min_rate; + divider = esw_qos_calculate_min_rate_divider(esw, group, true); + err = esw_qos_normalize_groups_min_rate(esw, divider, extack); + if (err) { + group->min_rate = previous_min_rate; + NL_SET_ERR_MSG_MOD(extack, "E-Switch group min rate setting failed"); + + /* Attempt restoring previous configuration */ + divider = esw_qos_calculate_min_rate_divider(esw, group, true); + if (esw_qos_normalize_groups_min_rate(esw, divider, extack)) + NL_SET_ERR_MSG_MOD(extack, "E-Switch BW share restore failed"); + } + + return err; +} + +static int esw_qos_set_group_max_rate(struct mlx5_eswitch *esw, + struct mlx5_esw_rate_group *group, + u32 max_rate, struct netlink_ext_ack *extack) +{ + struct mlx5_vport *vport; + unsigned long i; + int err; + + if (group->max_rate == max_rate) + return 0; + + err = esw_qos_group_config(esw, group, max_rate, group->bw_share, extack); + if (err) + return err; + + group->max_rate = max_rate; + + /* Any unlimited vports in the group should be set + * with the value of the group. + */ + mlx5_esw_for_each_vport(esw, i, vport) { + if (!vport->enabled || !vport->qos.enabled || + vport->qos.group != group || vport->qos.max_rate) + continue; + + err = esw_qos_vport_config(esw, vport, max_rate, vport->qos.bw_share, extack); + if (err) + NL_SET_ERR_MSG_MOD(extack, + "E-Switch vport implicit rate limit setting failed"); + } + + return err; +} + +static int esw_qos_vport_create_sched_element(struct mlx5_eswitch *esw, + struct mlx5_vport *vport, + u32 max_rate, u32 bw_share) +{ + u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; + struct mlx5_esw_rate_group *group = vport->qos.group; + struct mlx5_core_dev *dev = esw->dev; + u32 parent_tsar_ix; + void *vport_elem; + int err; + + parent_tsar_ix = group ? group->tsar_ix : esw->qos.root_tsar_ix; + MLX5_SET(scheduling_context, sched_ctx, element_type, + SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT); + vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx, element_attributes); + MLX5_SET(vport_element, vport_elem, vport_number, vport->vport); + MLX5_SET(scheduling_context, sched_ctx, parent_element_id, parent_tsar_ix); + MLX5_SET(scheduling_context, sched_ctx, max_average_bw, max_rate); + MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share); + + err = mlx5_create_scheduling_element_cmd(dev, + SCHEDULING_HIERARCHY_E_SWITCH, + sched_ctx, + &vport->qos.esw_tsar_ix); + if (err) { + esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n", + vport->vport, err); + return err; + } + + return 0; +} + +static int esw_qos_update_group_scheduling_element(struct mlx5_eswitch *esw, + struct mlx5_vport *vport, + struct mlx5_esw_rate_group *curr_group, + struct mlx5_esw_rate_group *new_group, + struct netlink_ext_ack *extack) +{ + u32 max_rate; + int err; + + err = mlx5_destroy_scheduling_element_cmd(esw->dev, + SCHEDULING_HIERARCHY_E_SWITCH, + vport->qos.esw_tsar_ix); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR vport element failed"); + return err; + } + + vport->qos.group = new_group; + max_rate = vport->qos.max_rate ? vport->qos.max_rate : new_group->max_rate; + + /* If vport is unlimited, we set the group's value. + * Therefore, if the group is limited it will apply to + * the vport as well and if not, vport will remain unlimited. + */ + err = esw_qos_vport_create_sched_element(esw, vport, max_rate, vport->qos.bw_share); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "E-Switch vport group set failed."); + goto err_sched; + } + + return 0; + +err_sched: + vport->qos.group = curr_group; + max_rate = vport->qos.max_rate ? vport->qos.max_rate : curr_group->max_rate; + if (esw_qos_vport_create_sched_element(esw, vport, max_rate, vport->qos.bw_share)) + esw_warn(esw->dev, "E-Switch vport group restore failed (vport=%d)\n", + vport->vport); + + return err; +} + +static int esw_qos_vport_update_group(struct mlx5_eswitch *esw, + struct mlx5_vport *vport, + struct mlx5_esw_rate_group *group, + struct netlink_ext_ack *extack) +{ + struct mlx5_esw_rate_group *new_group, *curr_group; + int err; + + if (!vport->enabled) + return -EINVAL; + + curr_group = vport->qos.group; + new_group = group ?: esw->qos.group0; + if (curr_group == new_group) + return 0; + + err = esw_qos_update_group_scheduling_element(esw, vport, curr_group, new_group, extack); + if (err) + return err; + + /* Recalculate bw share weights of old and new groups */ + if (vport->qos.bw_share) { + esw_qos_normalize_vports_min_rate(esw, curr_group, extack); + esw_qos_normalize_vports_min_rate(esw, new_group, extack); + } + + return 0; +} + +static struct mlx5_esw_rate_group * +esw_qos_create_rate_group(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack) +{ + u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; + struct mlx5_esw_rate_group *group; + u32 divider; + int err; + + if (!MLX5_CAP_QOS(esw->dev, log_esw_max_sched_depth)) + return ERR_PTR(-EOPNOTSUPP); + + group = kzalloc(sizeof(*group), GFP_KERNEL); + if (!group) + return ERR_PTR(-ENOMEM); + + MLX5_SET(scheduling_context, tsar_ctx, parent_element_id, + esw->qos.root_tsar_ix); + err = mlx5_create_scheduling_element_cmd(esw->dev, + SCHEDULING_HIERARCHY_E_SWITCH, + tsar_ctx, + &group->tsar_ix); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "E-Switch create TSAR for group failed"); + goto err_sched_elem; + } + + list_add_tail(&group->list, &esw->qos.groups); + + divider = esw_qos_calculate_min_rate_divider(esw, group, true); + if (divider) { + err = esw_qos_normalize_groups_min_rate(esw, divider, extack); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "E-Switch groups normalization failed"); + goto err_min_rate; + } + } + trace_mlx5_esw_group_qos_create(esw->dev, group, group->tsar_ix); + + return group; + +err_min_rate: + list_del(&group->list); + err = mlx5_destroy_scheduling_element_cmd(esw->dev, + SCHEDULING_HIERARCHY_E_SWITCH, + group->tsar_ix); + if (err) + NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR for group failed"); +err_sched_elem: + kfree(group); + return ERR_PTR(err); +} + +static int esw_qos_destroy_rate_group(struct mlx5_eswitch *esw, + struct mlx5_esw_rate_group *group, + struct netlink_ext_ack *extack) +{ + u32 divider; + int err; + + list_del(&group->list); + + divider = esw_qos_calculate_min_rate_divider(esw, NULL, true); + err = esw_qos_normalize_groups_min_rate(esw, divider, extack); + if (err) + NL_SET_ERR_MSG_MOD(extack, "E-Switch groups' normalization failed"); + + err = mlx5_destroy_scheduling_element_cmd(esw->dev, + SCHEDULING_HIERARCHY_E_SWITCH, + group->tsar_ix); + if (err) + NL_SET_ERR_MSG_MOD(extack, "E-Switch destroy TSAR_ID failed"); + + trace_mlx5_esw_group_qos_destroy(esw->dev, group, group->tsar_ix); + kfree(group); + return err; +} + +static bool esw_qos_element_type_supported(struct mlx5_core_dev *dev, int type) +{ + switch (type) { + case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR: + return MLX5_CAP_QOS(dev, esw_element_type) & + ELEMENT_TYPE_CAP_MASK_TASR; + case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT: + return MLX5_CAP_QOS(dev, esw_element_type) & + ELEMENT_TYPE_CAP_MASK_VPORT; + case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC: + return MLX5_CAP_QOS(dev, esw_element_type) & + ELEMENT_TYPE_CAP_MASK_VPORT_TC; + case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC: + return MLX5_CAP_QOS(dev, esw_element_type) & + ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC; + } + return false; +} + +void mlx5_esw_qos_create(struct mlx5_eswitch *esw) +{ + u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; + struct mlx5_core_dev *dev = esw->dev; + __be32 *attr; + int err; + + if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling)) + return; + + if (!esw_qos_element_type_supported(dev, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR)) + return; + + mutex_lock(&esw->state_lock); + if (esw->qos.enabled) + goto unlock; + + MLX5_SET(scheduling_context, tsar_ctx, element_type, + SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR); + + attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes); + *attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16); + + err = mlx5_create_scheduling_element_cmd(dev, + SCHEDULING_HIERARCHY_E_SWITCH, + tsar_ctx, + &esw->qos.root_tsar_ix); + if (err) { + esw_warn(dev, "E-Switch create root TSAR failed (%d)\n", err); + goto unlock; + } + + INIT_LIST_HEAD(&esw->qos.groups); + if (MLX5_CAP_QOS(dev, log_esw_max_sched_depth)) { + esw->qos.group0 = esw_qos_create_rate_group(esw, NULL); + if (IS_ERR(esw->qos.group0)) { + esw_warn(dev, "E-Switch create rate group 0 failed (%ld)\n", + PTR_ERR(esw->qos.group0)); + goto err_group0; + } + } + esw->qos.enabled = true; +unlock: + mutex_unlock(&esw->state_lock); + return; + +err_group0: + err = mlx5_destroy_scheduling_element_cmd(esw->dev, + SCHEDULING_HIERARCHY_E_SWITCH, + esw->qos.root_tsar_ix); + if (err) + esw_warn(esw->dev, "E-Switch destroy root TSAR failed (%d)\n", err); + mutex_unlock(&esw->state_lock); +} + +void mlx5_esw_qos_destroy(struct mlx5_eswitch *esw) +{ + struct devlink *devlink = priv_to_devlink(esw->dev); + int err; + + devlink_rate_nodes_destroy(devlink); + mutex_lock(&esw->state_lock); + if (!esw->qos.enabled) + goto unlock; + + if (esw->qos.group0) + esw_qos_destroy_rate_group(esw, esw->qos.group0, NULL); + + err = mlx5_destroy_scheduling_element_cmd(esw->dev, + SCHEDULING_HIERARCHY_E_SWITCH, + esw->qos.root_tsar_ix); + if (err) + esw_warn(esw->dev, "E-Switch destroy root TSAR failed (%d)\n", err); + + esw->qos.enabled = false; +unlock: + mutex_unlock(&esw->state_lock); +} + +int mlx5_esw_qos_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport, + u32 max_rate, u32 bw_share) +{ + int err; + + lockdep_assert_held(&esw->state_lock); + if (!esw->qos.enabled) + return 0; + + if (vport->qos.enabled) + return -EEXIST; + + vport->qos.group = esw->qos.group0; + + err = esw_qos_vport_create_sched_element(esw, vport, max_rate, bw_share); + if (!err) { + vport->qos.enabled = true; + trace_mlx5_esw_vport_qos_create(vport, bw_share, max_rate); + } + + return err; +} + +void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport) +{ + int err; + + lockdep_assert_held(&esw->state_lock); + if (!esw->qos.enabled || !vport->qos.enabled) + return; + WARN(vport->qos.group && vport->qos.group != esw->qos.group0, + "Disabling QoS on port before detaching it from group"); + + err = mlx5_destroy_scheduling_element_cmd(esw->dev, + SCHEDULING_HIERARCHY_E_SWITCH, + vport->qos.esw_tsar_ix); + if (err) + esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n", + vport->vport, err); + + vport->qos.enabled = false; + trace_mlx5_esw_vport_qos_destroy(vport); +} + +int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps) +{ + u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; + struct mlx5_vport *vport; + u32 bitmask; + + vport = mlx5_eswitch_get_vport(esw, vport_num); + if (IS_ERR(vport)) + return PTR_ERR(vport); + + if (!vport->qos.enabled) + return -EOPNOTSUPP; + + MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps); + bitmask = MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; + + return mlx5_modify_scheduling_element_cmd(esw->dev, + SCHEDULING_HIERARCHY_E_SWITCH, + ctx, + vport->qos.esw_tsar_ix, + bitmask); +} + +#define MLX5_LINKSPEED_UNIT 125000 /* 1Mbps in Bps */ + +/* Converts bytes per second value passed in a pointer into megabits per + * second, rewriting last. If converted rate exceed link speed or is not a + * fraction of Mbps - returns error. + */ +static int esw_qos_devlink_rate_to_mbps(struct mlx5_core_dev *mdev, const char *name, + u64 *rate, struct netlink_ext_ack *extack) +{ + u32 link_speed_max, reminder; + u64 value; + int err; + + err = mlx5e_port_max_linkspeed(mdev, &link_speed_max); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to get link maximum speed"); + return err; + } + + value = div_u64_rem(*rate, MLX5_LINKSPEED_UNIT, &reminder); + if (reminder) { + pr_err("%s rate value %lluBps not in link speed units of 1Mbps.\n", + name, *rate); + NL_SET_ERR_MSG_MOD(extack, "TX rate value not in link speed units of 1Mbps"); + return -EINVAL; + } + + if (value > link_speed_max) { + pr_err("%s rate value %lluMbps exceed link maximum speed %u.\n", + name, value, link_speed_max); + NL_SET_ERR_MSG_MOD(extack, "TX rate value exceed link maximum speed"); + return -EINVAL; + } + + *rate = value; + return 0; +} + +/* Eswitch devlink rate API */ + +int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void *priv, + u64 tx_share, struct netlink_ext_ack *extack) +{ + struct mlx5_vport *vport = priv; + struct mlx5_eswitch *esw; + int err; + + esw = vport->dev->priv.eswitch; + if (!mlx5_esw_allowed(esw)) + return -EPERM; + + err = esw_qos_devlink_rate_to_mbps(vport->dev, "tx_share", &tx_share, extack); + if (err) + return err; + + mutex_lock(&esw->state_lock); + err = mlx5_esw_qos_set_vport_min_rate(esw, vport, tx_share, extack); + mutex_unlock(&esw->state_lock); + return err; +} + +int mlx5_esw_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void *priv, + u64 tx_max, struct netlink_ext_ack *extack) +{ + struct mlx5_vport *vport = priv; + struct mlx5_eswitch *esw; + int err; + + esw = vport->dev->priv.eswitch; + if (!mlx5_esw_allowed(esw)) + return -EPERM; + + err = esw_qos_devlink_rate_to_mbps(vport->dev, "tx_max", &tx_max, extack); + if (err) + return err; + + mutex_lock(&esw->state_lock); + err = mlx5_esw_qos_set_vport_max_rate(esw, vport, tx_max, extack); + mutex_unlock(&esw->state_lock); + return err; +} + +int mlx5_esw_devlink_rate_node_tx_share_set(struct devlink_rate *rate_node, void *priv, + u64 tx_share, struct netlink_ext_ack *extack) +{ + struct mlx5_core_dev *dev = devlink_priv(rate_node->devlink); + struct mlx5_eswitch *esw = dev->priv.eswitch; + struct mlx5_esw_rate_group *group = priv; + int err; + + err = esw_qos_devlink_rate_to_mbps(dev, "tx_share", &tx_share, extack); + if (err) + return err; + + mutex_lock(&esw->state_lock); + err = esw_qos_set_group_min_rate(esw, group, tx_share, extack); + mutex_unlock(&esw->state_lock); + return err; +} + +int mlx5_esw_devlink_rate_node_tx_max_set(struct devlink_rate *rate_node, void *priv, + u64 tx_max, struct netlink_ext_ack *extack) +{ + struct mlx5_core_dev *dev = devlink_priv(rate_node->devlink); + struct mlx5_eswitch *esw = dev->priv.eswitch; + struct mlx5_esw_rate_group *group = priv; + int err; + + err = esw_qos_devlink_rate_to_mbps(dev, "tx_max", &tx_max, extack); + if (err) + return err; + + mutex_lock(&esw->state_lock); + err = esw_qos_set_group_max_rate(esw, group, tx_max, extack); + mutex_unlock(&esw->state_lock); + return err; +} + +int mlx5_esw_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv, + struct netlink_ext_ack *extack) +{ + struct mlx5_esw_rate_group *group; + struct mlx5_eswitch *esw; + int err = 0; + + esw = mlx5_devlink_eswitch_get(rate_node->devlink); + if (IS_ERR(esw)) + return PTR_ERR(esw); + + mutex_lock(&esw->state_lock); + if (esw->mode != MLX5_ESWITCH_OFFLOADS) { + NL_SET_ERR_MSG_MOD(extack, + "Rate node creation supported only in switchdev mode"); + err = -EOPNOTSUPP; + goto unlock; + } + + group = esw_qos_create_rate_group(esw, extack); + if (IS_ERR(group)) { + err = PTR_ERR(group); + goto unlock; + } + + *priv = group; +unlock: + mutex_unlock(&esw->state_lock); + return err; +} + +int mlx5_esw_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv, + struct netlink_ext_ack *extack) +{ + struct mlx5_esw_rate_group *group = priv; + struct mlx5_eswitch *esw; + int err; + + esw = mlx5_devlink_eswitch_get(rate_node->devlink); + if (IS_ERR(esw)) + return PTR_ERR(esw); + + mutex_lock(&esw->state_lock); + err = esw_qos_destroy_rate_group(esw, group, extack); + mutex_unlock(&esw->state_lock); + return err; +} + +int mlx5_esw_qos_vport_update_group(struct mlx5_eswitch *esw, + struct mlx5_vport *vport, + struct mlx5_esw_rate_group *group, + struct netlink_ext_ack *extack) +{ + int err; + + mutex_lock(&esw->state_lock); + err = esw_qos_vport_update_group(esw, vport, group, extack); + mutex_unlock(&esw->state_lock); + return err; +} + +int mlx5_esw_devlink_rate_parent_set(struct devlink_rate *devlink_rate, + struct devlink_rate *parent, + void *priv, void *parent_priv, + struct netlink_ext_ack *extack) +{ + struct mlx5_esw_rate_group *group; + struct mlx5_vport *vport = priv; + + if (!parent) + return mlx5_esw_qos_vport_update_group(vport->dev->priv.eswitch, + vport, NULL, extack); + + group = parent_priv; + return mlx5_esw_qos_vport_update_group(vport->dev->priv.eswitch, vport, group, extack); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h new file mode 100644 index 000000000000..28451abe2d2f --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/qos.h @@ -0,0 +1,41 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. */ + +#ifndef __MLX5_ESW_QOS_H__ +#define __MLX5_ESW_QOS_H__ + +#ifdef CONFIG_MLX5_ESWITCH + +int mlx5_esw_qos_set_vport_min_rate(struct mlx5_eswitch *esw, + struct mlx5_vport *evport, + u32 min_rate, + struct netlink_ext_ack *extack); +int mlx5_esw_qos_set_vport_max_rate(struct mlx5_eswitch *esw, + struct mlx5_vport *evport, + u32 max_rate, + struct netlink_ext_ack *extack); +void mlx5_esw_qos_create(struct mlx5_eswitch *esw); +void mlx5_esw_qos_destroy(struct mlx5_eswitch *esw); +int mlx5_esw_qos_vport_enable(struct mlx5_eswitch *esw, struct mlx5_vport *vport, + u32 max_rate, u32 bw_share); +void mlx5_esw_qos_vport_disable(struct mlx5_eswitch *esw, struct mlx5_vport *vport); + +int mlx5_esw_devlink_rate_leaf_tx_share_set(struct devlink_rate *rate_leaf, void *priv, + u64 tx_share, struct netlink_ext_ack *extack); +int mlx5_esw_devlink_rate_leaf_tx_max_set(struct devlink_rate *rate_leaf, void *priv, + u64 tx_max, struct netlink_ext_ack *extack); +int mlx5_esw_devlink_rate_node_tx_share_set(struct devlink_rate *rate_node, void *priv, + u64 tx_share, struct netlink_ext_ack *extack); +int mlx5_esw_devlink_rate_node_tx_max_set(struct devlink_rate *rate_node, void *priv, + u64 tx_max, struct netlink_ext_ack *extack); +int mlx5_esw_devlink_rate_node_new(struct devlink_rate *rate_node, void **priv, + struct netlink_ext_ack *extack); +int mlx5_esw_devlink_rate_node_del(struct devlink_rate *rate_node, void *priv, + struct netlink_ext_ack *extack); +int mlx5_esw_devlink_rate_parent_set(struct devlink_rate *devlink_rate, + struct devlink_rate *parent, + void *priv, void *parent_priv, + struct netlink_ext_ack *extack); +#endif + +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.h deleted file mode 100644 index 2a3f4be10030..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/sample.h +++ /dev/null @@ -1,42 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ -/* Copyright (c) 2021 Mellanox Technologies. */ - -#ifndef __MLX5_EN_TC_SAMPLE_H__ -#define __MLX5_EN_TC_SAMPLE_H__ - -#include "en.h" -#include "eswitch.h" - -struct mlx5e_priv; -struct mlx5_flow_attr; -struct mlx5_esw_psample; - -struct mlx5_sample_attr { - u32 group_num; - u32 rate; - u32 trunc_size; - u32 restore_obj_id; - u32 sampler_id; - struct mlx5_flow_table *sample_default_tbl; - struct mlx5_sample_flow *sample_flow; -}; - -void mlx5_esw_sample_skb(struct sk_buff *skb, struct mlx5_mapped_obj *mapped_obj); - -struct mlx5_flow_handle * -mlx5_esw_sample_offload(struct mlx5_esw_psample *sample_priv, - struct mlx5_flow_spec *spec, - struct mlx5_flow_attr *attr); - -void -mlx5_esw_sample_unoffload(struct mlx5_esw_psample *sample_priv, - struct mlx5_flow_handle *rule, - struct mlx5_flow_attr *attr); - -struct mlx5_esw_psample * -mlx5_esw_sample_init(struct mlx5e_priv *priv); - -void -mlx5_esw_sample_cleanup(struct mlx5_esw_psample *esw_psample); - -#endif /* __MLX5_EN_TC_SAMPLE_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index 97e6cb6f13c1..ec136b499204 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -38,6 +38,7 @@ #include <linux/mlx5/mpfs.h> #include "esw/acl/lgcy.h" #include "esw/legacy.h" +#include "esw/qos.h" #include "mlx5_core.h" #include "lib/eq.h" #include "eswitch.h" @@ -740,201 +741,6 @@ static void esw_vport_change_handler(struct work_struct *work) mutex_unlock(&esw->state_lock); } -static bool element_type_supported(struct mlx5_eswitch *esw, int type) -{ - const struct mlx5_core_dev *dev = esw->dev; - - switch (type) { - case SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR: - return MLX5_CAP_QOS(dev, esw_element_type) & - ELEMENT_TYPE_CAP_MASK_TASR; - case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT: - return MLX5_CAP_QOS(dev, esw_element_type) & - ELEMENT_TYPE_CAP_MASK_VPORT; - case SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT_TC: - return MLX5_CAP_QOS(dev, esw_element_type) & - ELEMENT_TYPE_CAP_MASK_VPORT_TC; - case SCHEDULING_CONTEXT_ELEMENT_TYPE_PARA_VPORT_TC: - return MLX5_CAP_QOS(dev, esw_element_type) & - ELEMENT_TYPE_CAP_MASK_PARA_VPORT_TC; - } - return false; -} - -/* Vport QoS management */ -static void esw_create_tsar(struct mlx5_eswitch *esw) -{ - u32 tsar_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; - struct mlx5_core_dev *dev = esw->dev; - __be32 *attr; - int err; - - if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling)) - return; - - if (!element_type_supported(esw, SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR)) - return; - - if (esw->qos.enabled) - return; - - MLX5_SET(scheduling_context, tsar_ctx, element_type, - SCHEDULING_CONTEXT_ELEMENT_TYPE_TSAR); - - attr = MLX5_ADDR_OF(scheduling_context, tsar_ctx, element_attributes); - *attr = cpu_to_be32(TSAR_ELEMENT_TSAR_TYPE_DWRR << 16); - - err = mlx5_create_scheduling_element_cmd(dev, - SCHEDULING_HIERARCHY_E_SWITCH, - tsar_ctx, - &esw->qos.root_tsar_id); - if (err) { - esw_warn(esw->dev, "E-Switch create TSAR failed (%d)\n", err); - return; - } - - esw->qos.enabled = true; -} - -static void esw_destroy_tsar(struct mlx5_eswitch *esw) -{ - int err; - - if (!esw->qos.enabled) - return; - - err = mlx5_destroy_scheduling_element_cmd(esw->dev, - SCHEDULING_HIERARCHY_E_SWITCH, - esw->qos.root_tsar_id); - if (err) - esw_warn(esw->dev, "E-Switch destroy TSAR failed (%d)\n", err); - - esw->qos.enabled = false; -} - -static int esw_vport_enable_qos(struct mlx5_eswitch *esw, - struct mlx5_vport *vport, - u32 initial_max_rate, u32 initial_bw_share) -{ - u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; - struct mlx5_core_dev *dev = esw->dev; - void *vport_elem; - int err = 0; - - if (!esw->qos.enabled) - return 0; - - if (vport->qos.enabled) - return -EEXIST; - - MLX5_SET(scheduling_context, sched_ctx, element_type, - SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT); - vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx, - element_attributes); - MLX5_SET(vport_element, vport_elem, vport_number, vport->vport); - MLX5_SET(scheduling_context, sched_ctx, parent_element_id, - esw->qos.root_tsar_id); - MLX5_SET(scheduling_context, sched_ctx, max_average_bw, - initial_max_rate); - MLX5_SET(scheduling_context, sched_ctx, bw_share, initial_bw_share); - - err = mlx5_create_scheduling_element_cmd(dev, - SCHEDULING_HIERARCHY_E_SWITCH, - sched_ctx, - &vport->qos.esw_tsar_ix); - if (err) { - esw_warn(esw->dev, "E-Switch create TSAR vport element failed (vport=%d,err=%d)\n", - vport->vport, err); - return err; - } - - vport->qos.enabled = true; - return 0; -} - -static void esw_vport_disable_qos(struct mlx5_eswitch *esw, - struct mlx5_vport *vport) -{ - int err; - - if (!vport->qos.enabled) - return; - - err = mlx5_destroy_scheduling_element_cmd(esw->dev, - SCHEDULING_HIERARCHY_E_SWITCH, - vport->qos.esw_tsar_ix); - if (err) - esw_warn(esw->dev, "E-Switch destroy TSAR vport element failed (vport=%d,err=%d)\n", - vport->vport, err); - - vport->qos.enabled = false; -} - -static int esw_vport_qos_config(struct mlx5_eswitch *esw, - struct mlx5_vport *vport, - u32 max_rate, u32 bw_share) -{ - u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0}; - struct mlx5_core_dev *dev = esw->dev; - void *vport_elem; - u32 bitmask = 0; - int err = 0; - - if (!MLX5_CAP_GEN(dev, qos) || !MLX5_CAP_QOS(dev, esw_scheduling)) - return -EOPNOTSUPP; - - if (!vport->qos.enabled) - return -EIO; - - MLX5_SET(scheduling_context, sched_ctx, element_type, - SCHEDULING_CONTEXT_ELEMENT_TYPE_VPORT); - vport_elem = MLX5_ADDR_OF(scheduling_context, sched_ctx, - element_attributes); - MLX5_SET(vport_element, vport_elem, vport_number, vport->vport); - MLX5_SET(scheduling_context, sched_ctx, parent_element_id, - esw->qos.root_tsar_id); - MLX5_SET(scheduling_context, sched_ctx, max_average_bw, - max_rate); - MLX5_SET(scheduling_context, sched_ctx, bw_share, bw_share); - bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW; - bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE; - - err = mlx5_modify_scheduling_element_cmd(dev, - SCHEDULING_HIERARCHY_E_SWITCH, - sched_ctx, - vport->qos.esw_tsar_ix, - bitmask); - if (err) { - esw_warn(esw->dev, "E-Switch modify TSAR vport element failed (vport=%d,err=%d)\n", - vport->vport, err); - return err; - } - - return 0; -} - -int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, - u32 rate_mbps) -{ - u32 ctx[MLX5_ST_SZ_DW(scheduling_context)] = {}; - struct mlx5_vport *vport; - - vport = mlx5_eswitch_get_vport(esw, vport_num); - if (IS_ERR(vport)) - return PTR_ERR(vport); - - if (!vport->qos.enabled) - return -EOPNOTSUPP; - - MLX5_SET(scheduling_context, ctx, max_average_bw, rate_mbps); - - return mlx5_modify_scheduling_element_cmd(esw->dev, - SCHEDULING_HIERARCHY_E_SWITCH, - ctx, - vport->qos.esw_tsar_ix, - MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW); -} - static void node_guid_gen_from_mac(u64 *node_guid, const u8 *mac) { ((u8 *)node_guid)[7] = mac[0]; @@ -976,7 +782,7 @@ static int esw_vport_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) return err; /* Attach vport to the eswitch rate limiter */ - esw_vport_enable_qos(esw, vport, vport->qos.max_rate, vport->qos.bw_share); + mlx5_esw_qos_vport_enable(esw, vport, vport->qos.max_rate, vport->qos.bw_share); if (mlx5_esw_is_manager_vport(esw, vport_num)) return 0; @@ -1013,7 +819,7 @@ static void esw_vport_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport vport_num, 1, MLX5_VPORT_ADMIN_STATE_DOWN); - esw_vport_disable_qos(esw, vport); + mlx5_esw_qos_vport_disable(esw, vport); esw_vport_cleanup_acl(esw, vport); } @@ -1454,12 +1260,10 @@ int mlx5_eswitch_enable_locked(struct mlx5_eswitch *esw, int mode, int num_vfs) mlx5_eswitch_update_num_of_vfs(esw, num_vfs); - esw_create_tsar(esw); + mlx5_esw_qos_create(esw); esw->mode = mode; - mlx5_lag_update(esw->dev); - if (mode == MLX5_ESWITCH_LEGACY) { err = esw_legacy_enable(esw); } else { @@ -1486,7 +1290,7 @@ abort: if (mode == MLX5_ESWITCH_OFFLOADS) mlx5_rescan_drivers(esw->dev); - esw_destroy_tsar(esw); + mlx5_esw_qos_destroy(esw); mlx5_esw_acls_ns_cleanup(esw); return err; } @@ -1494,7 +1298,7 @@ abort: /** * mlx5_eswitch_enable - Enable eswitch * @esw: Pointer to eswitch - * @num_vfs: Enable eswitch swich for given number of VFs. + * @num_vfs: Enable eswitch switch for given number of VFs. * Caller must pass num_vfs > 0 when enabling eswitch for * vf vports. * mlx5_eswitch_enable() returns 0 on success or error code on failure. @@ -1506,6 +1310,7 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) if (!mlx5_esw_allowed(esw)) return 0; + mlx5_lag_disable_change(esw->dev); down_write(&esw->mode_lock); if (esw->mode == MLX5_ESWITCH_NONE) { ret = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_LEGACY, num_vfs); @@ -1519,6 +1324,7 @@ int mlx5_eswitch_enable(struct mlx5_eswitch *esw, int num_vfs) esw->esw_funcs.num_vfs = num_vfs; } up_write(&esw->mode_lock); + mlx5_lag_enable_change(esw->dev); return ret; } @@ -1550,12 +1356,10 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw, bool clear_vf) old_mode = esw->mode; esw->mode = MLX5_ESWITCH_NONE; - mlx5_lag_update(esw->dev); - if (old_mode == MLX5_ESWITCH_OFFLOADS) mlx5_rescan_drivers(esw->dev); - esw_destroy_tsar(esw); + mlx5_esw_qos_destroy(esw); mlx5_esw_acls_ns_cleanup(esw); if (clear_vf) @@ -1567,10 +1371,12 @@ void mlx5_eswitch_disable(struct mlx5_eswitch *esw, bool clear_vf) if (!mlx5_esw_allowed(esw)) return; + mlx5_lag_disable_change(esw->dev); down_write(&esw->mode_lock); mlx5_eswitch_disable_locked(esw, clear_vf); esw->esw_funcs.num_vfs = 0; up_write(&esw->mode_lock); + mlx5_lag_enable_change(esw->dev); } static int mlx5_query_hca_cap_host_pf(struct mlx5_core_dev *dev, void *out) @@ -1759,7 +1565,9 @@ int mlx5_eswitch_init(struct mlx5_core_dev *dev) ida_init(&esw->offloads.vport_metadata_ida); xa_init_flags(&esw->offloads.vhca_map, XA_FLAGS_ALLOC); mutex_init(&esw->state_lock); + lockdep_register_key(&esw->mode_lock_key); init_rwsem(&esw->mode_lock); + lockdep_set_class(&esw->mode_lock, &esw->mode_lock_key); esw->enabled_vports = 0; esw->mode = MLX5_ESWITCH_NONE; @@ -1793,6 +1601,7 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) esw->dev->priv.eswitch = NULL; destroy_workqueue(esw->work_queue); + lockdep_unregister_key(&esw->mode_lock_key); mutex_destroy(&esw->state_lock); WARN_ON(!xa_empty(&esw->offloads.vhca_map)); xa_destroy(&esw->offloads.vhca_map); @@ -1889,8 +1698,7 @@ is_port_function_supported(struct mlx5_eswitch *esw, u16 vport_num) mlx5_esw_is_sf_vport(esw, vport_num); } -int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink, - struct devlink_port *port, +int mlx5_devlink_port_function_hw_addr_get(struct devlink_port *port, u8 *hw_addr, int *hw_addr_len, struct netlink_ext_ack *extack) { @@ -1899,7 +1707,7 @@ int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink, int err = -EOPNOTSUPP; u16 vport_num; - esw = mlx5_devlink_eswitch_get(devlink); + esw = mlx5_devlink_eswitch_get(port->devlink); if (IS_ERR(esw)) return PTR_ERR(esw); @@ -1923,8 +1731,7 @@ int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink, return err; } -int mlx5_devlink_port_function_hw_addr_set(struct devlink *devlink, - struct devlink_port *port, +int mlx5_devlink_port_function_hw_addr_set(struct devlink_port *port, const u8 *hw_addr, int hw_addr_len, struct netlink_ext_ack *extack) { @@ -1933,7 +1740,7 @@ int mlx5_devlink_port_function_hw_addr_set(struct devlink *devlink, int err = -EOPNOTSUPP; u16 vport_num; - esw = mlx5_devlink_eswitch_get(devlink); + esw = mlx5_devlink_eswitch_get(port->devlink); if (IS_ERR(esw)) { NL_SET_ERR_MSG_MOD(extack, "Eswitch doesn't support set hw_addr"); return PTR_ERR(esw); @@ -2049,110 +1856,6 @@ int __mlx5_eswitch_set_vport_vlan(struct mlx5_eswitch *esw, return err; } -static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw) -{ - u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); - struct mlx5_vport *evport; - u32 max_guarantee = 0; - unsigned long i; - - mlx5_esw_for_each_vport(esw, i, evport) { - if (!evport->enabled || evport->qos.min_rate < max_guarantee) - continue; - max_guarantee = evport->qos.min_rate; - } - - if (max_guarantee) - return max_t(u32, max_guarantee / fw_max_bw_share, 1); - return 0; -} - -static int normalize_vports_min_rate(struct mlx5_eswitch *esw) -{ - u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); - u32 divider = calculate_vports_min_rate_divider(esw); - struct mlx5_vport *evport; - u32 vport_max_rate; - u32 vport_min_rate; - unsigned long i; - u32 bw_share; - int err; - - mlx5_esw_for_each_vport(esw, i, evport) { - if (!evport->enabled) - continue; - vport_min_rate = evport->qos.min_rate; - vport_max_rate = evport->qos.max_rate; - bw_share = 0; - - if (divider) - bw_share = MLX5_RATE_TO_BW_SHARE(vport_min_rate, - divider, - fw_max_bw_share); - - if (bw_share == evport->qos.bw_share) - continue; - - err = esw_vport_qos_config(esw, evport, vport_max_rate, - bw_share); - if (!err) - evport->qos.bw_share = bw_share; - else - return err; - } - - return 0; -} - -int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport, - u32 max_rate, u32 min_rate) -{ - struct mlx5_vport *evport = mlx5_eswitch_get_vport(esw, vport); - u32 fw_max_bw_share; - u32 previous_min_rate; - bool min_rate_supported; - bool max_rate_supported; - int err = 0; - - if (!mlx5_esw_allowed(esw)) - return -EPERM; - if (IS_ERR(evport)) - return PTR_ERR(evport); - - fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share); - min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) && - fw_max_bw_share >= MLX5_MIN_BW_SHARE; - max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit); - - if ((min_rate && !min_rate_supported) || (max_rate && !max_rate_supported)) - return -EOPNOTSUPP; - - mutex_lock(&esw->state_lock); - - if (min_rate == evport->qos.min_rate) - goto set_max_rate; - - previous_min_rate = evport->qos.min_rate; - evport->qos.min_rate = min_rate; - err = normalize_vports_min_rate(esw); - if (err) { - evport->qos.min_rate = previous_min_rate; - goto unlock; - } - -set_max_rate: - if (max_rate == evport->qos.max_rate) - goto unlock; - - err = esw_vport_qos_config(esw, evport, max_rate, evport->qos.bw_share); - if (!err) - evport->qos.max_rate = max_rate; - -unlock: - mutex_unlock(&esw->state_lock); - return err; -} - int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw, u16 vport_num, struct ifla_vf_stats *vf_stats) @@ -2366,10 +2069,23 @@ int mlx5_esw_try_lock(struct mlx5_eswitch *esw) */ void mlx5_esw_unlock(struct mlx5_eswitch *esw) { + if (!mlx5_esw_allowed(esw)) + return; up_write(&esw->mode_lock); } /** + * mlx5_esw_lock() - Take write lock on esw mode lock + * @esw: eswitch device. + */ +void mlx5_esw_lock(struct mlx5_eswitch *esw) +{ + if (!mlx5_esw_allowed(esw)) + return; + down_write(&esw->mode_lock); +} + +/** * mlx5_eswitch_get_total_vports - Get total vports of the eswitch * * @dev: Pointer to core device @@ -2384,3 +2100,15 @@ u16 mlx5_eswitch_get_total_vports(const struct mlx5_core_dev *dev) return mlx5_esw_allowed(esw) ? esw->total_vports : 0; } EXPORT_SYMBOL_GPL(mlx5_eswitch_get_total_vports); + +/** + * mlx5_eswitch_get_core_dev - Get the mdev device + * @esw : eswitch device. + * + * Return the mellanox core device which manages the eswitch. + */ +struct mlx5_core_dev *mlx5_eswitch_get_core_dev(struct mlx5_eswitch *esw) +{ + return mlx5_esw_allowed(esw) ? esw->dev : NULL; +} +EXPORT_SYMBOL(mlx5_eswitch_get_core_dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index d562edf5b0bc..2c7444101bb9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -46,7 +46,7 @@ #include "lib/fs_chains.h" #include "sf/sf.h" #include "en/tc_ct.h" -#include "esw/sample.h" +#include "en/tc/sample.h" enum mlx5_mapped_obj_type { MLX5_MAPPED_OBJ_CHAIN, @@ -61,6 +61,7 @@ struct mlx5_mapped_obj { u32 group_id; u32 rate; u32 trunc_size; + u32 tunnel_id; } sample; }; }; @@ -75,17 +76,20 @@ struct mlx5_mapped_obj { #define MLX5_MAX_MC_PER_VPORT(dev) \ (1 << MLX5_CAP_GEN(dev, log_max_current_mc_list)) -#define MLX5_MIN_BW_SHARE 1 - -#define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \ - min_t(u32, max_t(u32, (rate) / (divider), MLX5_MIN_BW_SHARE), limit) - #define mlx5_esw_has_fwd_fdb(dev) \ MLX5_CAP_ESW_FLOWTABLE(dev, fdb_multi_path_to_table) #define esw_chains(esw) \ ((esw)->fdb_table.offloads.esw_chains_priv) +enum { + MAPPING_TYPE_CHAIN, + MAPPING_TYPE_TUNNEL, + MAPPING_TYPE_TUNNEL_ENC_OPTS, + MAPPING_TYPE_LABELS, + MAPPING_TYPE_ZONE, +}; + struct vport_ingress { struct mlx5_flow_table *acl; struct mlx5_flow_handle *allow_rule; @@ -124,6 +128,8 @@ struct vport_egress { struct { struct mlx5_flow_group *fwd_grp; struct mlx5_flow_handle *fwd_rule; + struct mlx5_flow_handle *bounce_rule; + struct mlx5_flow_group *bounce_grp; } offloads; }; }; @@ -150,8 +156,6 @@ enum mlx5_eswitch_vport_event { MLX5_VPORT_PROMISC_CHANGE = BIT(3), }; -struct mlx5_esw_bridge; - struct mlx5_vport { struct mlx5_core_dev *dev; struct hlist_head uc_list[MLX5_L2_ADDR_HASH_SIZE]; @@ -173,6 +177,7 @@ struct mlx5_vport { u32 bw_share; u32 min_rate; u32 max_rate; + struct mlx5_esw_rate_group *group; } qos; u16 vport; @@ -180,7 +185,6 @@ struct mlx5_vport { enum mlx5_eswitch_vport_event enabled_events; int index; struct devlink_port *dl_port; - struct mlx5_esw_bridge *bridge; }; struct mlx5_esw_indir_table; @@ -302,7 +306,9 @@ struct mlx5_eswitch { struct { bool enabled; - u32 root_tsar_id; + u32 root_tsar_ix; + struct mlx5_esw_rate_group *group0; + struct list_head groups; /* Protected by esw->state_lock */ } qos; struct mlx5_esw_bridge_offloads *br_offloads; @@ -315,6 +321,7 @@ struct mlx5_eswitch { u32 large_group_num; } params; struct blocking_notifier_head n_head; + struct lock_class_key mode_lock_key; }; void esw_offloads_disable(struct mlx5_eswitch *esw); @@ -327,8 +334,7 @@ int mlx5_esw_offloads_vport_metadata_set(struct mlx5_eswitch *esw, bool enable); u32 mlx5_esw_match_metadata_alloc(struct mlx5_eswitch *esw); void mlx5_esw_match_metadata_free(struct mlx5_eswitch *esw, u32 metadata); -int mlx5_esw_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, - u32 rate_mbps); +int mlx5_esw_qos_modify_vport_rate(struct mlx5_eswitch *esw, u16 vport_num, u32 rate_mbps); /* E-Switch API */ int mlx5_eswitch_init(struct mlx5_core_dev *dev); @@ -351,6 +357,10 @@ int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw, u16 vport_num, bool setting); int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, u16 vport, u32 max_rate, u32 min_rate); +int mlx5_esw_qos_vport_update_group(struct mlx5_eswitch *esw, + struct mlx5_vport *vport, + struct mlx5_esw_rate_group *group, + struct netlink_ext_ack *extack); int mlx5_eswitch_set_vepa(struct mlx5_eswitch *esw, u8 setting); int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting); int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw, @@ -461,7 +471,6 @@ struct mlx5_esw_flow_attr { } dests[MLX5_MAX_FLOW_FWD_VPORTS]; struct mlx5_rx_tun_attr *rx_tun_attr; struct mlx5_pkt_reformat *decap_pkt_reformat; - struct mlx5_sample_attr *sample; }; int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, @@ -475,12 +484,10 @@ int mlx5_devlink_eswitch_encap_mode_set(struct devlink *devlink, struct netlink_ext_ack *extack); int mlx5_devlink_eswitch_encap_mode_get(struct devlink *devlink, enum devlink_eswitch_encap_mode *encap); -int mlx5_devlink_port_function_hw_addr_get(struct devlink *devlink, - struct devlink_port *port, +int mlx5_devlink_port_function_hw_addr_get(struct devlink_port *port, u8 *hw_addr, int *hw_addr_len, struct netlink_ext_ack *extack); -int mlx5_devlink_port_function_hw_addr_set(struct devlink *devlink, - struct devlink_port *port, +int mlx5_devlink_port_function_hw_addr_set(struct devlink_port *port, const u8 *hw_addr, int hw_addr_len, struct netlink_ext_ack *extack); @@ -699,11 +706,18 @@ void mlx5_esw_get(struct mlx5_core_dev *dev); void mlx5_esw_put(struct mlx5_core_dev *dev); int mlx5_esw_try_lock(struct mlx5_eswitch *esw); void mlx5_esw_unlock(struct mlx5_eswitch *esw); +void mlx5_esw_lock(struct mlx5_eswitch *esw); void esw_vport_change_handle_locked(struct mlx5_vport *vport); bool mlx5_esw_offloads_controller_valid(const struct mlx5_eswitch *esw, u32 controller); +int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw, + struct mlx5_eswitch *slave_esw); +void mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw, + struct mlx5_eswitch *slave_esw); +int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw); + #else /* CONFIG_MLX5_ESWITCH */ /* eswitch API stubs */ static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } @@ -719,6 +733,9 @@ static inline const u32 *mlx5_esw_query_functions(struct mlx5_core_dev *dev) return ERR_PTR(-EOPNOTSUPP); } +static inline void mlx5_esw_unlock(struct mlx5_eswitch *esw) { return; } +static inline void mlx5_esw_lock(struct mlx5_eswitch *esw) { return; } + static inline struct mlx5_flow_handle * esw_add_restore_rule(struct mlx5_eswitch *esw, u32 tag) { @@ -731,6 +748,23 @@ mlx5_esw_vport_to_devlink_port_index(const struct mlx5_core_dev *dev, { return vport_num; } + +static inline int +mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw, + struct mlx5_eswitch *slave_esw) +{ + return 0; +} + +static inline void +mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw, + struct mlx5_eswitch *slave_esw) {} + +static inline int +mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw) +{ + return 0; +} #endif /* CONFIG_MLX5_ESWITCH */ #endif /* __MLX5_ESWITCH_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 3bb71a186004..0d461e38add3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -187,12 +187,12 @@ esw_cleanup_decap_indir(struct mlx5_eswitch *esw, static int esw_setup_sampler_dest(struct mlx5_flow_destination *dest, struct mlx5_flow_act *flow_act, - struct mlx5_esw_flow_attr *esw_attr, + struct mlx5_flow_attr *attr, int i) { flow_act->flags |= FLOW_ACT_IGNORE_FLOW_LEVEL; dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_SAMPLER; - dest[i].sampler_id = esw_attr->sample->sampler_id; + dest[i].sampler_id = attr->sample_attr->sampler_id; return 0; } @@ -435,7 +435,7 @@ esw_setup_dests(struct mlx5_flow_destination *dest, attr->flags |= MLX5_ESW_ATTR_FLAG_SRC_REWRITE; if (attr->flags & MLX5_ESW_ATTR_FLAG_SAMPLE) { - esw_setup_sampler_dest(dest, flow_act, esw_attr, *i); + esw_setup_sampler_dest(dest, flow_act, attr, *i); (*i)++; } else if (attr->dest_ft) { esw_setup_ft_dest(dest, flow_act, esw, attr, spec, *i); @@ -540,10 +540,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) flow_act.modify_hdr = attr->modify_hdr; - /* esw_attr->sample is allocated only when there is a sample action */ - if (esw_attr->sample && esw_attr->sample->sample_default_tbl) { - fdb = esw_attr->sample->sample_default_tbl; - } else if (split) { + if (split) { fwd_attr.chain = attr->chain; fwd_attr.prio = attr->prio; fwd_attr.vport = esw_attr->in_rep->vport; @@ -927,6 +924,7 @@ out: struct mlx5_flow_handle * mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw, + struct mlx5_eswitch *from_esw, struct mlx5_eswitch_rep *rep, u32 sqn) { @@ -945,10 +943,10 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw, misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, misc_parameters); MLX5_SET(fte_match_set_misc, misc, source_sqn, sqn); /* source vport is the esw manager */ - MLX5_SET(fte_match_set_misc, misc, source_port, rep->esw->manager_vport); + MLX5_SET(fte_match_set_misc, misc, source_port, from_esw->manager_vport); if (MLX5_CAP_ESW(on_esw->dev, merged_eswitch)) MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id, - MLX5_CAP_GEN(rep->esw->dev, vhca_id)); + MLX5_CAP_GEN(from_esw->dev, vhca_id)); misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_sqn); @@ -964,6 +962,9 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw, dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + if (rep->vport == MLX5_VPORT_UPLINK) + spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT; + flow_rule = mlx5_add_flow_rules(on_esw->fdb_table.offloads.slow_fdb, spec, &flow_act, &dest, 1); if (IS_ERR(flow_rule)) @@ -1614,7 +1615,18 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw) goto ns_err; } - table_size = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ + + /* To be strictly correct: + * MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ) + * should be: + * esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ + + * peer_esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ + * but as the peer device might not be in switchdev mode it's not + * possible. We use the fact that by default FW sets max vfs and max sfs + * to the same value on both devices. If it needs to be changed in the future note + * the peer miss group should also be created based on the number of + * total vports of the peer (currently is also uses esw->total_vports). + */ + table_size = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ) + MLX5_ESW_MISS_FLOWS + esw->total_vports + esw->esw_funcs.num_vfs; /* create the slow path fdb with encap set, so further table instances @@ -1671,7 +1683,8 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw) source_eswitch_owner_vhca_id_valid, 1); } - ix = esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ; + /* See comment above table_size calculation */ + ix = MLX5_MAX_PORTS * (esw->total_vports * MAX_SQ_NVPORTS + MAX_PF_SQ); MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, ix - 1); @@ -2311,14 +2324,293 @@ void esw_offloads_unload_rep(struct mlx5_eswitch *esw, u16 vport_num) mlx5_esw_offloads_devlink_port_unregister(esw, vport_num); } +static int esw_set_uplink_slave_ingress_root(struct mlx5_core_dev *master, + struct mlx5_core_dev *slave) +{ + u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {}; + u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {}; + struct mlx5_eswitch *esw; + struct mlx5_flow_root_namespace *root; + struct mlx5_flow_namespace *ns; + struct mlx5_vport *vport; + int err; + + MLX5_SET(set_flow_table_root_in, in, opcode, + MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); + MLX5_SET(set_flow_table_root_in, in, table_type, FS_FT_ESW_INGRESS_ACL); + MLX5_SET(set_flow_table_root_in, in, other_vport, 1); + MLX5_SET(set_flow_table_root_in, in, vport_number, MLX5_VPORT_UPLINK); + + if (master) { + esw = master->priv.eswitch; + vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); + MLX5_SET(set_flow_table_root_in, in, table_of_other_vport, 1); + MLX5_SET(set_flow_table_root_in, in, table_vport_number, + MLX5_VPORT_UPLINK); + + ns = mlx5_get_flow_vport_acl_namespace(master, + MLX5_FLOW_NAMESPACE_ESW_INGRESS, + vport->index); + root = find_root(&ns->node); + mutex_lock(&root->chain_lock); + + MLX5_SET(set_flow_table_root_in, in, + table_eswitch_owner_vhca_id_valid, 1); + MLX5_SET(set_flow_table_root_in, in, + table_eswitch_owner_vhca_id, + MLX5_CAP_GEN(master, vhca_id)); + MLX5_SET(set_flow_table_root_in, in, table_id, + root->root_ft->id); + } else { + esw = slave->priv.eswitch; + vport = mlx5_eswitch_get_vport(esw, MLX5_VPORT_UPLINK); + ns = mlx5_get_flow_vport_acl_namespace(slave, + MLX5_FLOW_NAMESPACE_ESW_INGRESS, + vport->index); + root = find_root(&ns->node); + mutex_lock(&root->chain_lock); + MLX5_SET(set_flow_table_root_in, in, table_id, root->root_ft->id); + } + + err = mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out)); + mutex_unlock(&root->chain_lock); + + return err; +} + +static int esw_set_slave_root_fdb(struct mlx5_core_dev *master, + struct mlx5_core_dev *slave) +{ + u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {}; + u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {}; + struct mlx5_flow_root_namespace *root; + struct mlx5_flow_namespace *ns; + int err; + + MLX5_SET(set_flow_table_root_in, in, opcode, + MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); + MLX5_SET(set_flow_table_root_in, in, table_type, + FS_FT_FDB); + + if (master) { + ns = mlx5_get_flow_namespace(master, + MLX5_FLOW_NAMESPACE_FDB); + root = find_root(&ns->node); + mutex_lock(&root->chain_lock); + MLX5_SET(set_flow_table_root_in, in, + table_eswitch_owner_vhca_id_valid, 1); + MLX5_SET(set_flow_table_root_in, in, + table_eswitch_owner_vhca_id, + MLX5_CAP_GEN(master, vhca_id)); + MLX5_SET(set_flow_table_root_in, in, table_id, + root->root_ft->id); + } else { + ns = mlx5_get_flow_namespace(slave, + MLX5_FLOW_NAMESPACE_FDB); + root = find_root(&ns->node); + mutex_lock(&root->chain_lock); + MLX5_SET(set_flow_table_root_in, in, table_id, + root->root_ft->id); + } + + err = mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out)); + mutex_unlock(&root->chain_lock); + + return err; +} + +static int __esw_set_master_egress_rule(struct mlx5_core_dev *master, + struct mlx5_core_dev *slave, + struct mlx5_vport *vport, + struct mlx5_flow_table *acl) +{ + struct mlx5_flow_handle *flow_rule = NULL; + struct mlx5_flow_destination dest = {}; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_spec *spec; + int err = 0; + void *misc; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return -ENOMEM; + + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; + misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, + misc_parameters); + MLX5_SET(fte_match_set_misc, misc, source_port, MLX5_VPORT_UPLINK); + MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id, + MLX5_CAP_GEN(slave, vhca_id)); + + misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, misc_parameters); + MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); + MLX5_SET_TO_ONES(fte_match_set_misc, misc, + source_eswitch_owner_vhca_id); + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + dest.type = MLX5_FLOW_DESTINATION_TYPE_VPORT; + dest.vport.num = slave->priv.eswitch->manager_vport; + dest.vport.vhca_id = MLX5_CAP_GEN(slave, vhca_id); + dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; + + flow_rule = mlx5_add_flow_rules(acl, spec, &flow_act, + &dest, 1); + if (IS_ERR(flow_rule)) + err = PTR_ERR(flow_rule); + else + vport->egress.offloads.bounce_rule = flow_rule; + + kvfree(spec); + return err; +} + +static int esw_set_master_egress_rule(struct mlx5_core_dev *master, + struct mlx5_core_dev *slave) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + struct mlx5_eswitch *esw = master->priv.eswitch; + struct mlx5_flow_table_attr ft_attr = { + .max_fte = 1, .prio = 0, .level = 0, + }; + struct mlx5_flow_namespace *egress_ns; + struct mlx5_flow_table *acl; + struct mlx5_flow_group *g; + struct mlx5_vport *vport; + void *match_criteria; + u32 *flow_group_in; + int err; + + vport = mlx5_eswitch_get_vport(esw, esw->manager_vport); + if (IS_ERR(vport)) + return PTR_ERR(vport); + + egress_ns = mlx5_get_flow_vport_acl_namespace(master, + MLX5_FLOW_NAMESPACE_ESW_EGRESS, + vport->index); + if (!egress_ns) + return -EINVAL; + + if (vport->egress.acl) + return -EINVAL; + + flow_group_in = kvzalloc(inlen, GFP_KERNEL); + if (!flow_group_in) + return -ENOMEM; + + acl = mlx5_create_vport_flow_table(egress_ns, &ft_attr, vport->vport); + if (IS_ERR(acl)) { + err = PTR_ERR(acl); + goto out; + } + + match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, + match_criteria); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, + misc_parameters.source_port); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, + misc_parameters.source_eswitch_owner_vhca_id); + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, + MLX5_MATCH_MISC_PARAMETERS); + + MLX5_SET(create_flow_group_in, flow_group_in, + source_eswitch_owner_vhca_id_valid, 1); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0); + + g = mlx5_create_flow_group(acl, flow_group_in); + if (IS_ERR(g)) { + err = PTR_ERR(g); + goto err_group; + } + + err = __esw_set_master_egress_rule(master, slave, vport, acl); + if (err) + goto err_rule; + + vport->egress.acl = acl; + vport->egress.offloads.bounce_grp = g; + + kvfree(flow_group_in); + + return 0; + +err_rule: + mlx5_destroy_flow_group(g); +err_group: + mlx5_destroy_flow_table(acl); +out: + kvfree(flow_group_in); + return err; +} + +static void esw_unset_master_egress_rule(struct mlx5_core_dev *dev) +{ + struct mlx5_vport *vport; + + vport = mlx5_eswitch_get_vport(dev->priv.eswitch, + dev->priv.eswitch->manager_vport); + + esw_acl_egress_ofld_cleanup(vport); +} + +int mlx5_eswitch_offloads_config_single_fdb(struct mlx5_eswitch *master_esw, + struct mlx5_eswitch *slave_esw) +{ + int err; + + err = esw_set_uplink_slave_ingress_root(master_esw->dev, + slave_esw->dev); + if (err) + return -EINVAL; + + err = esw_set_slave_root_fdb(master_esw->dev, + slave_esw->dev); + if (err) + goto err_fdb; + + err = esw_set_master_egress_rule(master_esw->dev, + slave_esw->dev); + if (err) + goto err_acl; + + return err; + +err_acl: + esw_set_slave_root_fdb(NULL, slave_esw->dev); + +err_fdb: + esw_set_uplink_slave_ingress_root(NULL, slave_esw->dev); + + return err; +} + +void mlx5_eswitch_offloads_destroy_single_fdb(struct mlx5_eswitch *master_esw, + struct mlx5_eswitch *slave_esw) +{ + esw_unset_master_egress_rule(master_esw->dev); + esw_set_slave_root_fdb(NULL, slave_esw->dev); + esw_set_uplink_slave_ingress_root(NULL, slave_esw->dev); +} + #define ESW_OFFLOADS_DEVCOM_PAIR (0) #define ESW_OFFLOADS_DEVCOM_UNPAIR (1) -static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw, - struct mlx5_eswitch *peer_esw) +static void mlx5_esw_offloads_rep_event_unpair(struct mlx5_eswitch *esw) { + const struct mlx5_eswitch_rep_ops *ops; + struct mlx5_eswitch_rep *rep; + unsigned long i; + u8 rep_type; - return esw_add_fdb_peer_miss_rules(esw, peer_esw->dev); + mlx5_esw_for_each_rep(esw, i, rep) { + rep_type = NUM_REP_TYPES; + while (rep_type--) { + ops = esw->offloads.rep_ops[rep_type]; + if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED && + ops->event) + ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_UNPAIR, NULL); + } + } } static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw) @@ -2326,9 +2618,42 @@ static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw) #if IS_ENABLED(CONFIG_MLX5_CLS_ACT) mlx5e_tc_clean_fdb_peer_flows(esw); #endif + mlx5_esw_offloads_rep_event_unpair(esw); esw_del_fdb_peer_miss_rules(esw); } +static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw, + struct mlx5_eswitch *peer_esw) +{ + const struct mlx5_eswitch_rep_ops *ops; + struct mlx5_eswitch_rep *rep; + unsigned long i; + u8 rep_type; + int err; + + err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev); + if (err) + return err; + + mlx5_esw_for_each_rep(esw, i, rep) { + for (rep_type = 0; rep_type < NUM_REP_TYPES; rep_type++) { + ops = esw->offloads.rep_ops[rep_type]; + if (atomic_read(&rep->rep_data[rep_type].state) == REP_LOADED && + ops->event) { + err = ops->event(esw, rep, MLX5_SWITCHDEV_EVENT_PAIR, peer_esw); + if (err) + goto err_out; + } + } + } + + return 0; + +err_out: + mlx5_esw_offloads_unpair(esw); + return err; +} + static int mlx5_esw_offloads_set_ns_peer(struct mlx5_eswitch *esw, struct mlx5_eswitch *peer_esw, bool pair) @@ -2619,6 +2944,31 @@ static void esw_destroy_uplink_offloads_acl_tables(struct mlx5_eswitch *esw) esw_vport_destroy_offloads_acl_tables(esw, vport); } +int mlx5_eswitch_reload_reps(struct mlx5_eswitch *esw) +{ + struct mlx5_eswitch_rep *rep; + unsigned long i; + int ret; + + if (!esw || esw->mode != MLX5_ESWITCH_OFFLOADS) + return 0; + + rep = mlx5_eswitch_get_rep(esw, MLX5_VPORT_UPLINK); + if (atomic_read(&rep->rep_data[REP_ETH].state) != REP_LOADED) + return 0; + + ret = mlx5_esw_offloads_rep_load(esw, MLX5_VPORT_UPLINK); + if (ret) + return ret; + + mlx5_esw_for_each_rep(esw, i, rep) { + if (atomic_read(&rep->rep_data[REP_ETH].state) == REP_LOADED) + mlx5_esw_offloads_rep_load(esw, rep->vport); + } + + return 0; +} + static int esw_offloads_steering_init(struct mlx5_eswitch *esw) { struct mlx5_esw_indir_table *indir; @@ -2788,6 +3138,7 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) struct mapping_ctx *reg_c0_obj_pool; struct mlx5_vport *vport; unsigned long i; + u64 mapping_id; int err; if (MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, reformat) && @@ -2811,9 +3162,13 @@ int esw_offloads_enable(struct mlx5_eswitch *esw) if (err) goto err_vport_metadata; - reg_c0_obj_pool = mapping_create(sizeof(struct mlx5_mapped_obj), - ESW_REG_C0_USER_DATA_METADATA_MASK, - true); + mapping_id = mlx5_query_nic_system_image_guid(esw->dev); + + reg_c0_obj_pool = mapping_create_for_id(mapping_id, MAPPING_TYPE_CHAIN, + sizeof(struct mlx5_mapped_obj), + ESW_REG_C0_USER_DATA_METADATA_MASK, + true); + if (IS_ERR(reg_c0_obj_pool)) { err = PTR_ERR(reg_c0_obj_pool); goto err_pool; @@ -2991,10 +3346,11 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, if (esw_mode_from_devlink(mode, &mlx5_mode)) return -EINVAL; + mlx5_lag_disable_change(esw->dev); err = mlx5_esw_try_lock(esw); if (err < 0) { NL_SET_ERR_MSG_MOD(extack, "Can't change mode, E-Switch is busy"); - return err; + goto enable_lag; } cur_mlx5_mode = err; err = 0; @@ -3018,6 +3374,8 @@ int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, unlock: mlx5_esw_unlock(esw); +enable_lag: + mlx5_lag_enable_change(esw->dev); return err; } @@ -3091,8 +3449,11 @@ int mlx5_devlink_eswitch_inline_mode_set(struct devlink *devlink, u8 mode, switch (MLX5_CAP_ETH(dev, wqe_inline_mode)) { case MLX5_CAP_INLINE_MODE_NOT_REQUIRED: - if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE) + if (mode == DEVLINK_ESWITCH_INLINE_MODE_NONE) { + err = 0; goto out; + } + fallthrough; case MLX5_CAP_INLINE_MODE_L2: NL_SET_ERR_MSG_MOD(extack, "Inline mode can't be set"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c index d713ae24d6b6..a1ac3a654962 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/events.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c @@ -27,7 +27,7 @@ static int pcie_core(struct notifier_block *, unsigned long, void *); static int forward_event(struct notifier_block *, unsigned long, void *); static struct mlx5_nb events_nbs_ref[] = { - /* Events to be proccessed by mlx5_core */ + /* Events to be processed by mlx5_core */ {.nb.notifier_call = any_notifier, .event_type = MLX5_EVENT_TYPE_NOTIFY_ANY }, {.nb.notifier_call = temp_warn, .event_type = MLX5_EVENT_TYPE_TEMP_WARN_EVENT }, {.nb.notifier_call = port_module, .event_type = MLX5_EVENT_TYPE_PORT_MODULE_EVENT }, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c index d5da4ab65766..306279b7f9e7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c @@ -453,7 +453,7 @@ static int mlx5_fpga_conn_create_cq(struct mlx5_fpga_conn *conn, int cq_size) cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); MLX5_SET(cqc, cqc, log_cq_size, ilog2(cq_size)); - MLX5_SET(cqc, cqc, c_eqn, eqn); + MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn); MLX5_SET(cqc, cqc, uar_page, fdev->conn_res.uar->index); MLX5_SET(cqc, cqc, log_page_size, conn->cq.wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c index 0bba92cf5dc0..8ec148010d62 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/ipsec.c @@ -1516,7 +1516,7 @@ static int mlx5_fpga_esp_modify_xfrm(struct mlx5_accel_esp_xfrm *xfrm, mutex_lock(&fpga_xfrm->lock); if (!fpga_xfrm->sa_ctx) - /* Unbounded xfrm, chane only sw attrs */ + /* Unbounded xfrm, change only sw attrs */ goto change_sw_xfrm_attrs; /* copy original hw sa */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 896a6c3dbdb7..7db8df64a60e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -152,17 +152,56 @@ static int mlx5_cmd_stub_destroy_ns(struct mlx5_flow_root_namespace *ns) return 0; } +static int mlx5_cmd_set_slave_root_fdb(struct mlx5_core_dev *master, + struct mlx5_core_dev *slave, + bool ft_id_valid, + u32 ft_id) +{ + u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {}; + u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {}; + struct mlx5_flow_root_namespace *root; + struct mlx5_flow_namespace *ns; + + MLX5_SET(set_flow_table_root_in, in, opcode, + MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); + MLX5_SET(set_flow_table_root_in, in, table_type, + FS_FT_FDB); + if (ft_id_valid) { + MLX5_SET(set_flow_table_root_in, in, + table_eswitch_owner_vhca_id_valid, 1); + MLX5_SET(set_flow_table_root_in, in, + table_eswitch_owner_vhca_id, + MLX5_CAP_GEN(master, vhca_id)); + MLX5_SET(set_flow_table_root_in, in, table_id, + ft_id); + } else { + ns = mlx5_get_flow_namespace(slave, + MLX5_FLOW_NAMESPACE_FDB); + root = find_root(&ns->node); + MLX5_SET(set_flow_table_root_in, in, table_id, + root->root_ft->id); + } + + return mlx5_cmd_exec(slave, in, sizeof(in), out, sizeof(out)); +} + static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, u32 underlay_qpn, bool disconnect) { u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {}; struct mlx5_core_dev *dev = ns->dev; + int err; if ((MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_IB) && underlay_qpn == 0) return 0; + if (ft->type == FS_FT_FDB && + mlx5_lag_is_shared_fdb(dev) && + !mlx5_lag_is_master(dev)) + return 0; + MLX5_SET(set_flow_table_root_in, in, opcode, MLX5_CMD_OP_SET_FLOW_TABLE_ROOT); MLX5_SET(set_flow_table_root_in, in, table_type, ft->type); @@ -177,7 +216,24 @@ static int mlx5_cmd_update_root_ft(struct mlx5_flow_root_namespace *ns, MLX5_SET(set_flow_table_root_in, in, other_vport, !!(ft->flags & MLX5_FLOW_TABLE_OTHER_VPORT)); - return mlx5_cmd_exec_in(dev, set_flow_table_root, in); + err = mlx5_cmd_exec_in(dev, set_flow_table_root, in); + if (!err && + ft->type == FS_FT_FDB && + mlx5_lag_is_shared_fdb(dev) && + mlx5_lag_is_master(dev)) { + err = mlx5_cmd_set_slave_root_fdb(dev, + mlx5_lag_get_peer_mdev(dev), + !disconnect, (!disconnect) ? + ft->id : 0); + if (err && !disconnect) { + MLX5_SET(set_flow_table_root_in, in, op_mod, 0); + MLX5_SET(set_flow_table_root_in, in, table_id, + ns->root_ft->id); + mlx5_cmd_exec_in(dev, set_flow_table_root, in); + } + } + + return err; } static int mlx5_cmd_create_flow_table(struct mlx5_flow_root_namespace *ns, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index c0697e1b7118..9fe8e3c204d6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -413,7 +413,7 @@ static bool check_valid_spec(const struct mlx5_flow_spec *spec) return true; } -static struct mlx5_flow_root_namespace *find_root(struct fs_node *node) +struct mlx5_flow_root_namespace *find_root(struct fs_node *node) { struct fs_node *root; struct mlx5_flow_namespace *ns; @@ -2343,7 +2343,7 @@ static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio, #define FLOW_TABLE_BIT_SZ 1 #define GET_FLOW_TABLE_CAP(dev, offset) \ - ((be32_to_cpu(*((__be32 *)(dev->caps.hca_cur[MLX5_CAP_FLOW_TABLE]) + \ + ((be32_to_cpu(*((__be32 *)(dev->caps.hca[MLX5_CAP_FLOW_TABLE]->cur) + \ offset / 32)) >> \ (32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ) static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps) @@ -2493,7 +2493,7 @@ static void set_prio_attrs_in_prio(struct fs_prio *prio, int acc_level) acc_level_ns = set_prio_attrs_in_ns(ns, acc_level); /* If this a prio with chains, and we can jump from one chain - * (namepsace) to another, so we accumulate the levels + * (namespace) to another, so we accumulate the levels */ if (prio->node.type == FS_TYPE_PRIO_CHAINS) acc_level = acc_level_ns; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index 7317cdeab661..98240badc342 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -294,6 +294,8 @@ void mlx5_fs_egress_acls_cleanup(struct mlx5_core_dev *dev); int mlx5_fs_ingress_acls_init(struct mlx5_core_dev *dev, int total_vports); void mlx5_fs_ingress_acls_cleanup(struct mlx5_core_dev *dev); +struct mlx5_flow_root_namespace *find_root(struct fs_node *node); + #define fs_get_obj(v, _node) {v = container_of((_node), typeof(*v), node); } #define fs_list_for_each_entry(pos, root) \ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 9abeb80ffa31..037e18dd4be0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -170,7 +170,7 @@ static bool reset_fw_if_needed(struct mlx5_core_dev *dev) /* The reset only needs to be issued by one PF. The health buffer is * shared between all functions, and will be cleared during a reset. - * Check again to avoid a redundant 2nd reset. If the fatal erros was + * Check again to avoid a redundant 2nd reset. If the fatal errors was * PCI related a reset won't help. */ fatal_error = mlx5_health_check_fatal_sensors(dev); @@ -213,10 +213,6 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force) mutex_lock(&dev->intf_state_mutex); if (!err_detected && dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) goto unlock;/* a previous error is still being handled */ - if (dev->state == MLX5_DEVICE_STATE_UNINITIALIZED) { - dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; - goto unlock; - } enter_error_state(dev, force); unlock: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c index 0e487ec57d5c..0c8594c7df21 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ethtool.c @@ -99,7 +99,9 @@ static void mlx5i_get_channels(struct net_device *dev, } static int mlx5i_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct mlx5e_priv *priv = mlx5i_epriv(netdev); @@ -107,7 +109,9 @@ static int mlx5i_set_coalesce(struct net_device *netdev, } static int mlx5i_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct mlx5e_priv *priv = mlx5i_epriv(netdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 7d7ed025db0d..67571e5040d6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -50,7 +50,7 @@ static const struct net_device_ops mlx5i_netdev_ops = { .ndo_init = mlx5i_dev_init, .ndo_uninit = mlx5i_dev_cleanup, .ndo_change_mtu = mlx5i_change_mtu, - .ndo_do_ioctl = mlx5i_ioctl, + .ndo_eth_ioctl = mlx5i_ioctl, }; /* IPoIB mlx5 netdev profile */ @@ -314,8 +314,7 @@ static void mlx5i_cleanup_tx(struct mlx5e_priv *priv) static int mlx5i_create_flow_steering(struct mlx5e_priv *priv) { - struct ttc_params ttc_params = {}; - int tt, err; + int err; priv->fs.ns = mlx5_get_flow_namespace(priv->mdev, MLX5_FLOW_NAMESPACE_KERNEL); @@ -330,33 +329,15 @@ static int mlx5i_create_flow_steering(struct mlx5e_priv *priv) priv->netdev->hw_features &= ~NETIF_F_NTUPLE; } - mlx5e_set_ttc_basic_params(priv, &ttc_params); - mlx5e_set_inner_ttc_ft_params(&ttc_params); - for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) - ttc_params.indir_tirn[tt] = priv->inner_indir_tir[tt].tirn; - - err = mlx5e_create_inner_ttc_table(priv, &ttc_params, &priv->fs.inner_ttc); - if (err) { - netdev_err(priv->netdev, "Failed to create inner ttc table, err=%d\n", - err); - goto err_destroy_arfs_tables; - } - - mlx5e_set_ttc_ft_params(&ttc_params); - for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) - ttc_params.indir_tirn[tt] = priv->indir_tir[tt].tirn; - - err = mlx5e_create_ttc_table(priv, &ttc_params, &priv->fs.ttc); + err = mlx5e_create_ttc_table(priv); if (err) { netdev_err(priv->netdev, "Failed to create ttc table, err=%d\n", err); - goto err_destroy_inner_ttc_table; + goto err_destroy_arfs_tables; } return 0; -err_destroy_inner_ttc_table: - mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc); err_destroy_arfs_tables: mlx5e_arfs_destroy_tables(priv); @@ -365,17 +346,20 @@ err_destroy_arfs_tables: static void mlx5i_destroy_flow_steering(struct mlx5e_priv *priv) { - mlx5e_destroy_ttc_table(priv, &priv->fs.ttc); - mlx5e_destroy_inner_ttc_table(priv, &priv->fs.inner_ttc); + mlx5e_destroy_ttc_table(priv); mlx5e_arfs_destroy_tables(priv); } static int mlx5i_init_rx(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; - u16 max_nch = priv->max_nch; + struct mlx5e_lro_param lro_param; int err; + priv->rx_res = mlx5e_rx_res_alloc(); + if (!priv->rx_res) + return -ENOMEM; + mlx5e_create_q_counters(priv); err = mlx5e_open_drop_rq(priv, &priv->drop_rq); @@ -384,54 +368,38 @@ static int mlx5i_init_rx(struct mlx5e_priv *priv) goto err_destroy_q_counters; } - err = mlx5e_create_indirect_rqt(priv); + lro_param = mlx5e_get_lro_param(&priv->channels.params); + err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, 0, + priv->max_nch, priv->drop_rq.rqn, &lro_param, + priv->channels.params.num_channels); if (err) goto err_close_drop_rq; - err = mlx5e_create_direct_rqts(priv, priv->direct_tir, max_nch); - if (err) - goto err_destroy_indirect_rqts; - - err = mlx5e_create_indirect_tirs(priv, true); - if (err) - goto err_destroy_direct_rqts; - - err = mlx5e_create_direct_tirs(priv, priv->direct_tir, max_nch); - if (err) - goto err_destroy_indirect_tirs; - err = mlx5i_create_flow_steering(priv); if (err) - goto err_destroy_direct_tirs; + goto err_destroy_rx_res; return 0; -err_destroy_direct_tirs: - mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch); -err_destroy_indirect_tirs: - mlx5e_destroy_indirect_tirs(priv); -err_destroy_direct_rqts: - mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch); -err_destroy_indirect_rqts: - mlx5e_destroy_rqt(priv, &priv->indir_rqt); +err_destroy_rx_res: + mlx5e_rx_res_destroy(priv->rx_res); err_close_drop_rq: mlx5e_close_drop_rq(&priv->drop_rq); err_destroy_q_counters: mlx5e_destroy_q_counters(priv); + mlx5e_rx_res_free(priv->rx_res); + priv->rx_res = NULL; return err; } static void mlx5i_cleanup_rx(struct mlx5e_priv *priv) { - u16 max_nch = priv->max_nch; - mlx5i_destroy_flow_steering(priv); - mlx5e_destroy_direct_tirs(priv, priv->direct_tir, max_nch); - mlx5e_destroy_indirect_tirs(priv); - mlx5e_destroy_direct_rqts(priv, priv->direct_tir, max_nch); - mlx5e_destroy_rqt(priv, &priv->indir_rqt); + mlx5e_rx_res_destroy(priv->rx_res); mlx5e_close_drop_rq(&priv->drop_rq); mlx5e_destroy_q_counters(priv); + mlx5e_rx_res_free(priv->rx_res); + priv->rx_res = NULL; } /* The stats groups order is opposite to the update_stats() order calls */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c index 18ee21b06a00..5308f23702bc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib_vlan.c @@ -149,7 +149,7 @@ static const struct net_device_ops mlx5i_pkey_netdev_ops = { .ndo_get_stats64 = mlx5i_get_stats, .ndo_uninit = mlx5i_pkey_dev_cleanup, .ndo_change_mtu = mlx5i_pkey_change_mtu, - .ndo_do_ioctl = mlx5i_pkey_ioctl, + .ndo_eth_ioctl = mlx5i_pkey_ioctl, }; /* Child NDOs */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c index 5c043c5cc403..49ca57c6d31d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c @@ -32,7 +32,9 @@ #include <linux/netdevice.h> #include <linux/mlx5/driver.h> +#include <linux/mlx5/eswitch.h> #include <linux/mlx5/vport.h> +#include "lib/devcom.h" #include "mlx5_core.h" #include "eswitch.h" #include "lag.h" @@ -45,7 +47,7 @@ static DEFINE_SPINLOCK(lag_lock); static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1, - u8 remap_port2) + u8 remap_port2, bool shared_fdb) { u32 in[MLX5_ST_SZ_DW(create_lag_in)] = {}; void *lag_ctx = MLX5_ADDR_OF(create_lag_in, in, ctx); @@ -54,6 +56,7 @@ static int mlx5_cmd_create_lag(struct mlx5_core_dev *dev, u8 remap_port1, MLX5_SET(lagc, lag_ctx, tx_remap_affinity_1, remap_port1); MLX5_SET(lagc, lag_ctx, tx_remap_affinity_2, remap_port2); + MLX5_SET(lagc, lag_ctx, fdb_selection_mode, shared_fdb); return mlx5_cmd_exec_in(dev, create_lag, in); } @@ -224,35 +227,59 @@ void mlx5_modify_lag(struct mlx5_lag *ldev, } static int mlx5_create_lag(struct mlx5_lag *ldev, - struct lag_tracker *tracker) + struct lag_tracker *tracker, + bool shared_fdb) { struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; + struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev; + u32 in[MLX5_ST_SZ_DW(destroy_lag_in)] = {}; int err; mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[MLX5_LAG_P1], &ldev->v2p_map[MLX5_LAG_P2]); - mlx5_core_info(dev0, "lag map port 1:%d port 2:%d", - ldev->v2p_map[MLX5_LAG_P1], ldev->v2p_map[MLX5_LAG_P2]); + mlx5_core_info(dev0, "lag map port 1:%d port 2:%d shared_fdb:%d", + ldev->v2p_map[MLX5_LAG_P1], ldev->v2p_map[MLX5_LAG_P2], + shared_fdb); err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[MLX5_LAG_P1], - ldev->v2p_map[MLX5_LAG_P2]); - if (err) + ldev->v2p_map[MLX5_LAG_P2], shared_fdb); + if (err) { mlx5_core_err(dev0, "Failed to create LAG (%d)\n", err); + return err; + } + + if (shared_fdb) { + err = mlx5_eswitch_offloads_config_single_fdb(dev0->priv.eswitch, + dev1->priv.eswitch); + if (err) + mlx5_core_err(dev0, "Can't enable single FDB mode\n"); + else + mlx5_core_info(dev0, "Operation mode is single FDB\n"); + } + + if (err) { + MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG); + if (mlx5_cmd_exec_in(dev0, destroy_lag, in)) + mlx5_core_err(dev0, + "Failed to deactivate RoCE LAG; driver restart required\n"); + } + return err; } int mlx5_activate_lag(struct mlx5_lag *ldev, struct lag_tracker *tracker, - u8 flags) + u8 flags, + bool shared_fdb) { bool roce_lag = !!(flags & MLX5_LAG_FLAG_ROCE); struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; int err; - err = mlx5_create_lag(ldev, tracker); + err = mlx5_create_lag(ldev, tracker, shared_fdb); if (err) { if (roce_lag) { mlx5_core_err(dev0, @@ -266,6 +293,7 @@ int mlx5_activate_lag(struct mlx5_lag *ldev, } ldev->flags |= flags; + ldev->shared_fdb = shared_fdb; return 0; } @@ -277,6 +305,13 @@ static int mlx5_deactivate_lag(struct mlx5_lag *ldev) int err; ldev->flags &= ~MLX5_LAG_MODE_FLAGS; + mlx5_lag_mp_reset(ldev); + + if (ldev->shared_fdb) { + mlx5_eswitch_offloads_destroy_single_fdb(ldev->pf[MLX5_LAG_P1].dev->priv.eswitch, + ldev->pf[MLX5_LAG_P2].dev->priv.eswitch); + ldev->shared_fdb = false; + } MLX5_SET(destroy_lag_in, in, opcode, MLX5_CMD_OP_DESTROY_LAG); err = mlx5_cmd_exec_in(dev0, destroy_lag, in); @@ -333,6 +368,10 @@ static void mlx5_lag_remove_devices(struct mlx5_lag *ldev) if (!ldev->pf[i].dev) continue; + if (ldev->pf[i].dev->priv.flags & + MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV) + continue; + ldev->pf[i].dev->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; mlx5_rescan_drivers_locked(ldev->pf[i].dev); } @@ -342,12 +381,15 @@ static void mlx5_disable_lag(struct mlx5_lag *ldev) { struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev; + bool shared_fdb = ldev->shared_fdb; bool roce_lag; int err; roce_lag = __mlx5_lag_is_roce(ldev); - if (roce_lag) { + if (shared_fdb) { + mlx5_lag_remove_devices(ldev); + } else if (roce_lag) { if (!(dev0->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)) { dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; mlx5_rescan_drivers_locked(dev0); @@ -359,8 +401,34 @@ static void mlx5_disable_lag(struct mlx5_lag *ldev) if (err) return; - if (roce_lag) + if (shared_fdb || roce_lag) mlx5_lag_add_devices(ldev); + + if (shared_fdb) { + if (!(dev0->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)) + mlx5_eswitch_reload_reps(dev0->priv.eswitch); + if (!(dev1->priv.flags & MLX5_PRIV_FLAGS_DISABLE_ALL_ADEV)) + mlx5_eswitch_reload_reps(dev1->priv.eswitch); + } +} + +static bool mlx5_shared_fdb_supported(struct mlx5_lag *ldev) +{ + struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; + struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev; + + if (is_mdev_switchdev_mode(dev0) && + is_mdev_switchdev_mode(dev1) && + mlx5_eswitch_vport_match_metadata_enabled(dev0->priv.eswitch) && + mlx5_eswitch_vport_match_metadata_enabled(dev1->priv.eswitch) && + mlx5_devcom_is_paired(dev0->priv.devcom, + MLX5_DEVCOM_ESW_OFFLOADS) && + MLX5_CAP_GEN(dev1, lag_native_fdb_selection) && + MLX5_CAP_ESW(dev1, root_ft_on_other_esw) && + MLX5_CAP_ESW(dev0, esw_shared_ingress_acl)) + return true; + + return false; } static void mlx5_do_bond(struct mlx5_lag *ldev) @@ -371,14 +439,17 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) bool do_bond, roce_lag; int err; - if (!mlx5_lag_is_ready(ldev)) - return; - - tracker = ldev->tracker; + if (!mlx5_lag_is_ready(ldev)) { + do_bond = false; + } else { + tracker = ldev->tracker; - do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev); + do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev); + } if (do_bond && !__mlx5_lag_is_active(ldev)) { + bool shared_fdb = mlx5_shared_fdb_supported(ldev); + roce_lag = !mlx5_sriov_is_enabled(dev0) && !mlx5_sriov_is_enabled(dev1); @@ -388,23 +459,40 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) dev1->priv.eswitch->mode == MLX5_ESWITCH_NONE; #endif - if (roce_lag) + if (shared_fdb || roce_lag) mlx5_lag_remove_devices(ldev); err = mlx5_activate_lag(ldev, &tracker, roce_lag ? MLX5_LAG_FLAG_ROCE : - MLX5_LAG_FLAG_SRIOV); + MLX5_LAG_FLAG_SRIOV, + shared_fdb); if (err) { - if (roce_lag) + if (shared_fdb || roce_lag) mlx5_lag_add_devices(ldev); return; - } - - if (roce_lag) { + } else if (roce_lag) { dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; mlx5_rescan_drivers_locked(dev0); mlx5_nic_vport_enable_roce(dev1); + } else if (shared_fdb) { + dev0->priv.flags &= ~MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; + mlx5_rescan_drivers_locked(dev0); + + err = mlx5_eswitch_reload_reps(dev0->priv.eswitch); + if (!err) + err = mlx5_eswitch_reload_reps(dev1->priv.eswitch); + + if (err) { + dev0->priv.flags |= MLX5_PRIV_FLAGS_DISABLE_IB_ADEV; + mlx5_rescan_drivers_locked(dev0); + mlx5_deactivate_lag(ldev); + mlx5_lag_add_devices(ldev); + mlx5_eswitch_reload_reps(dev0->priv.eswitch); + mlx5_eswitch_reload_reps(dev1->priv.eswitch); + mlx5_core_err(dev0, "Failed to enable lag\n"); + return; + } } } else if (do_bond && __mlx5_lag_is_active(ldev)) { mlx5_modify_lag(ldev, &tracker); @@ -418,21 +506,48 @@ static void mlx5_queue_bond_work(struct mlx5_lag *ldev, unsigned long delay) queue_delayed_work(ldev->wq, &ldev->bond_work, delay); } +static void mlx5_lag_lock_eswitches(struct mlx5_core_dev *dev0, + struct mlx5_core_dev *dev1) +{ + if (dev0) + mlx5_esw_lock(dev0->priv.eswitch); + if (dev1) + mlx5_esw_lock(dev1->priv.eswitch); +} + +static void mlx5_lag_unlock_eswitches(struct mlx5_core_dev *dev0, + struct mlx5_core_dev *dev1) +{ + if (dev1) + mlx5_esw_unlock(dev1->priv.eswitch); + if (dev0) + mlx5_esw_unlock(dev0->priv.eswitch); +} + static void mlx5_do_bond_work(struct work_struct *work) { struct delayed_work *delayed_work = to_delayed_work(work); struct mlx5_lag *ldev = container_of(delayed_work, struct mlx5_lag, bond_work); + struct mlx5_core_dev *dev0 = ldev->pf[MLX5_LAG_P1].dev; + struct mlx5_core_dev *dev1 = ldev->pf[MLX5_LAG_P2].dev; int status; status = mlx5_dev_list_trylock(); if (!status) { - /* 1 sec delay. */ mlx5_queue_bond_work(ldev, HZ); return; } + if (ldev->mode_changes_in_progress) { + mlx5_dev_list_unlock(); + mlx5_queue_bond_work(ldev, HZ); + return; + } + + mlx5_lag_lock_eswitches(dev0, dev1); mlx5_do_bond(ldev); + mlx5_lag_unlock_eswitches(dev0, dev1); mlx5_dev_list_unlock(); } @@ -630,7 +745,7 @@ static void mlx5_ldev_remove_mdev(struct mlx5_lag *ldev, } /* Must be called with intf_mutex held */ -static void __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev) +static int __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev) { struct mlx5_lag *ldev = NULL; struct mlx5_core_dev *tmp_dev; @@ -638,7 +753,7 @@ static void __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev) if (!MLX5_CAP_GEN(dev, vport_group_manager) || !MLX5_CAP_GEN(dev, lag_master) || MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS) - return; + return 0; tmp_dev = mlx5_get_next_phys_dev(dev); if (tmp_dev) @@ -648,15 +763,17 @@ static void __mlx5_lag_dev_add_mdev(struct mlx5_core_dev *dev) ldev = mlx5_lag_dev_alloc(dev); if (!ldev) { mlx5_core_err(dev, "Failed to alloc lag dev\n"); - return; + return 0; } } else { + if (ldev->mode_changes_in_progress) + return -EAGAIN; mlx5_ldev_get(ldev); } mlx5_ldev_add_mdev(ldev, dev); - return; + return 0; } void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev) @@ -667,7 +784,13 @@ void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev) if (!ldev) return; +recheck: mlx5_dev_list_lock(); + if (ldev->mode_changes_in_progress) { + mlx5_dev_list_unlock(); + msleep(100); + goto recheck; + } mlx5_ldev_remove_mdev(ldev, dev); mlx5_dev_list_unlock(); mlx5_ldev_put(ldev); @@ -675,8 +798,16 @@ void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev) void mlx5_lag_add_mdev(struct mlx5_core_dev *dev) { + int err; + +recheck: mlx5_dev_list_lock(); - __mlx5_lag_dev_add_mdev(dev); + err = __mlx5_lag_dev_add_mdev(dev); + if (err) { + mlx5_dev_list_unlock(); + msleep(100); + goto recheck; + } mlx5_dev_list_unlock(); } @@ -690,11 +821,11 @@ void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev, if (!ldev) return; - if (__mlx5_lag_is_active(ldev)) - mlx5_disable_lag(ldev); - mlx5_ldev_remove_netdev(ldev, netdev); ldev->flags &= ~MLX5_LAG_FLAG_READY; + + if (__mlx5_lag_is_active(ldev)) + mlx5_queue_bond_work(ldev, 0); } /* Must be called with intf_mutex held */ @@ -716,6 +847,7 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev, if (i >= MLX5_MAX_PORTS) ldev->flags |= MLX5_LAG_FLAG_READY; + mlx5_queue_bond_work(ldev, 0); } bool mlx5_lag_is_roce(struct mlx5_core_dev *dev) @@ -746,6 +878,21 @@ bool mlx5_lag_is_active(struct mlx5_core_dev *dev) } EXPORT_SYMBOL(mlx5_lag_is_active); +bool mlx5_lag_is_master(struct mlx5_core_dev *dev) +{ + struct mlx5_lag *ldev; + bool res; + + spin_lock(&lag_lock); + ldev = mlx5_lag_dev(dev); + res = ldev && __mlx5_lag_is_active(ldev) && + dev == ldev->pf[MLX5_LAG_P1].dev; + spin_unlock(&lag_lock); + + return res; +} +EXPORT_SYMBOL(mlx5_lag_is_master); + bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev) { struct mlx5_lag *ldev; @@ -760,19 +907,50 @@ bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev) } EXPORT_SYMBOL(mlx5_lag_is_sriov); -void mlx5_lag_update(struct mlx5_core_dev *dev) +bool mlx5_lag_is_shared_fdb(struct mlx5_core_dev *dev) +{ + struct mlx5_lag *ldev; + bool res; + + spin_lock(&lag_lock); + ldev = mlx5_lag_dev(dev); + res = ldev && __mlx5_lag_is_sriov(ldev) && ldev->shared_fdb; + spin_unlock(&lag_lock); + + return res; +} +EXPORT_SYMBOL(mlx5_lag_is_shared_fdb); + +void mlx5_lag_disable_change(struct mlx5_core_dev *dev) { + struct mlx5_core_dev *dev0; + struct mlx5_core_dev *dev1; struct mlx5_lag *ldev; mlx5_dev_list_lock(); + ldev = mlx5_lag_dev(dev); - if (!ldev) - goto unlock; + dev0 = ldev->pf[MLX5_LAG_P1].dev; + dev1 = ldev->pf[MLX5_LAG_P2].dev; - mlx5_do_bond(ldev); + ldev->mode_changes_in_progress++; + if (__mlx5_lag_is_active(ldev)) { + mlx5_lag_lock_eswitches(dev0, dev1); + mlx5_disable_lag(ldev); + mlx5_lag_unlock_eswitches(dev0, dev1); + } + mlx5_dev_list_unlock(); +} -unlock: +void mlx5_lag_enable_change(struct mlx5_core_dev *dev) +{ + struct mlx5_lag *ldev; + + mlx5_dev_list_lock(); + ldev = mlx5_lag_dev(dev); + ldev->mode_changes_in_progress--; mlx5_dev_list_unlock(); + mlx5_queue_bond_work(ldev, 0); } struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev) @@ -827,6 +1005,26 @@ unlock: } EXPORT_SYMBOL(mlx5_lag_get_slave_port); +struct mlx5_core_dev *mlx5_lag_get_peer_mdev(struct mlx5_core_dev *dev) +{ + struct mlx5_core_dev *peer_dev = NULL; + struct mlx5_lag *ldev; + + spin_lock(&lag_lock); + ldev = mlx5_lag_dev(dev); + if (!ldev) + goto unlock; + + peer_dev = ldev->pf[MLX5_LAG_P1].dev == dev ? + ldev->pf[MLX5_LAG_P2].dev : + ldev->pf[MLX5_LAG_P1].dev; + +unlock: + spin_unlock(&lag_lock); + return peer_dev; +} +EXPORT_SYMBOL(mlx5_lag_get_peer_mdev); + int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, u64 *values, int num_counters, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag.h index 191392c37558..d4bae528954e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.h @@ -39,6 +39,8 @@ struct lag_tracker { */ struct mlx5_lag { u8 flags; + int mode_changes_in_progress; + bool shared_fdb; u8 v2p_map[MLX5_MAX_PORTS]; struct kref ref; struct lag_func pf[MLX5_MAX_PORTS]; @@ -71,7 +73,8 @@ void mlx5_modify_lag(struct mlx5_lag *ldev, struct lag_tracker *tracker); int mlx5_activate_lag(struct mlx5_lag *ldev, struct lag_tracker *tracker, - u8 flags); + u8 flags, + bool shared_fdb); int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev, struct net_device *ndev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c index c4bf8b679541..f239b352a58a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.c @@ -161,7 +161,7 @@ static void mlx5_lag_fib_route_event(struct mlx5_lag *ldev, struct lag_tracker tracker; tracker = ldev->tracker; - mlx5_activate_lag(ldev, &tracker, MLX5_LAG_FLAG_MULTIPATH); + mlx5_activate_lag(ldev, &tracker, MLX5_LAG_FLAG_MULTIPATH, false); } mlx5_lag_set_port_affinity(ldev, MLX5_LAG_NORMAL_AFFINITY); @@ -302,6 +302,14 @@ static int mlx5_lag_fib_event(struct notifier_block *nb, return NOTIFY_DONE; } +void mlx5_lag_mp_reset(struct mlx5_lag *ldev) +{ + /* Clear mfi, as it might become stale when a route delete event + * has been missed, see mlx5_lag_fib_route_event(). + */ + ldev->lag_mp.mfi = NULL; +} + int mlx5_lag_mp_init(struct mlx5_lag *ldev) { struct lag_mp *mp = &ldev->lag_mp; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h index 258ac7b2964e..729c839397a8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag_mp.h @@ -21,11 +21,13 @@ struct lag_mp { #ifdef CONFIG_MLX5_ESWITCH +void mlx5_lag_mp_reset(struct mlx5_lag *ldev); int mlx5_lag_mp_init(struct mlx5_lag *ldev); void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev); #else /* CONFIG_MLX5_ESWITCH */ +static inline void mlx5_lag_mp_reset(struct mlx5_lag *ldev) {}; static inline int mlx5_lag_mp_init(struct mlx5_lag *ldev) { return 0; } static inline void mlx5_lag_mp_cleanup(struct mlx5_lag *ldev) {} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index ce696d523493..ffac8a0e7a23 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -749,7 +749,7 @@ static int mlx5_pps_event(struct notifier_block *nb, } else { ptp_event.type = PTP_CLOCK_EXTTS; } - /* TODOL clock->ptp can be NULL if ptp_clock_register failes */ + /* TODOL clock->ptp can be NULL if ptp_clock_register fails */ ptp_clock_event(clock->ptp, &ptp_event); break; case PTP_PF_PEROUT: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c new file mode 100644 index 000000000000..749d17c0057d --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.c @@ -0,0 +1,602 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2021 NVIDIA CORPORATION & AFFILIATES. + +#include <linux/ip.h> +#include <linux/ipv6.h> +#include <linux/tcp.h> +#include <linux/mlx5/fs.h> +#include <linux/mlx5/driver.h> +#include "mlx5_core.h" +#include "lib/fs_ttc.h" + +#define MLX5_TTC_NUM_GROUPS 3 +#define MLX5_TTC_GROUP1_SIZE (BIT(3) + MLX5_NUM_TUNNEL_TT) +#define MLX5_TTC_GROUP2_SIZE BIT(1) +#define MLX5_TTC_GROUP3_SIZE BIT(0) +#define MLX5_TTC_TABLE_SIZE (MLX5_TTC_GROUP1_SIZE +\ + MLX5_TTC_GROUP2_SIZE +\ + MLX5_TTC_GROUP3_SIZE) + +#define MLX5_INNER_TTC_NUM_GROUPS 3 +#define MLX5_INNER_TTC_GROUP1_SIZE BIT(3) +#define MLX5_INNER_TTC_GROUP2_SIZE BIT(1) +#define MLX5_INNER_TTC_GROUP3_SIZE BIT(0) +#define MLX5_INNER_TTC_TABLE_SIZE (MLX5_INNER_TTC_GROUP1_SIZE +\ + MLX5_INNER_TTC_GROUP2_SIZE +\ + MLX5_INNER_TTC_GROUP3_SIZE) + +/* L3/L4 traffic type classifier */ +struct mlx5_ttc_table { + int num_groups; + struct mlx5_flow_table *t; + struct mlx5_flow_group **g; + struct mlx5_ttc_rule rules[MLX5_NUM_TT]; + struct mlx5_flow_handle *tunnel_rules[MLX5_NUM_TUNNEL_TT]; +}; + +struct mlx5_flow_table *mlx5_get_ttc_flow_table(struct mlx5_ttc_table *ttc) +{ + return ttc->t; +} + +static void mlx5_cleanup_ttc_rules(struct mlx5_ttc_table *ttc) +{ + int i; + + for (i = 0; i < MLX5_NUM_TT; i++) { + if (!IS_ERR_OR_NULL(ttc->rules[i].rule)) { + mlx5_del_flow_rules(ttc->rules[i].rule); + ttc->rules[i].rule = NULL; + } + } + + for (i = 0; i < MLX5_NUM_TUNNEL_TT; i++) { + if (!IS_ERR_OR_NULL(ttc->tunnel_rules[i])) { + mlx5_del_flow_rules(ttc->tunnel_rules[i]); + ttc->tunnel_rules[i] = NULL; + } + } +} + +struct mlx5_etype_proto { + u16 etype; + u8 proto; +}; + +static struct mlx5_etype_proto ttc_rules[] = { + [MLX5_TT_IPV4_TCP] = { + .etype = ETH_P_IP, + .proto = IPPROTO_TCP, + }, + [MLX5_TT_IPV6_TCP] = { + .etype = ETH_P_IPV6, + .proto = IPPROTO_TCP, + }, + [MLX5_TT_IPV4_UDP] = { + .etype = ETH_P_IP, + .proto = IPPROTO_UDP, + }, + [MLX5_TT_IPV6_UDP] = { + .etype = ETH_P_IPV6, + .proto = IPPROTO_UDP, + }, + [MLX5_TT_IPV4_IPSEC_AH] = { + .etype = ETH_P_IP, + .proto = IPPROTO_AH, + }, + [MLX5_TT_IPV6_IPSEC_AH] = { + .etype = ETH_P_IPV6, + .proto = IPPROTO_AH, + }, + [MLX5_TT_IPV4_IPSEC_ESP] = { + .etype = ETH_P_IP, + .proto = IPPROTO_ESP, + }, + [MLX5_TT_IPV6_IPSEC_ESP] = { + .etype = ETH_P_IPV6, + .proto = IPPROTO_ESP, + }, + [MLX5_TT_IPV4] = { + .etype = ETH_P_IP, + .proto = 0, + }, + [MLX5_TT_IPV6] = { + .etype = ETH_P_IPV6, + .proto = 0, + }, + [MLX5_TT_ANY] = { + .etype = 0, + .proto = 0, + }, +}; + +static struct mlx5_etype_proto ttc_tunnel_rules[] = { + [MLX5_TT_IPV4_GRE] = { + .etype = ETH_P_IP, + .proto = IPPROTO_GRE, + }, + [MLX5_TT_IPV6_GRE] = { + .etype = ETH_P_IPV6, + .proto = IPPROTO_GRE, + }, + [MLX5_TT_IPV4_IPIP] = { + .etype = ETH_P_IP, + .proto = IPPROTO_IPIP, + }, + [MLX5_TT_IPV6_IPIP] = { + .etype = ETH_P_IPV6, + .proto = IPPROTO_IPIP, + }, + [MLX5_TT_IPV4_IPV6] = { + .etype = ETH_P_IP, + .proto = IPPROTO_IPV6, + }, + [MLX5_TT_IPV6_IPV6] = { + .etype = ETH_P_IPV6, + .proto = IPPROTO_IPV6, + }, + +}; + +u8 mlx5_get_proto_by_tunnel_type(enum mlx5_tunnel_types tt) +{ + return ttc_tunnel_rules[tt].proto; +} + +static bool mlx5_tunnel_proto_supported_rx(struct mlx5_core_dev *mdev, + u8 proto_type) +{ + switch (proto_type) { + case IPPROTO_GRE: + return MLX5_CAP_ETH(mdev, tunnel_stateless_gre); + case IPPROTO_IPIP: + case IPPROTO_IPV6: + return (MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip) || + MLX5_CAP_ETH(mdev, tunnel_stateless_ip_over_ip_rx)); + default: + return false; + } +} + +static bool mlx5_tunnel_any_rx_proto_supported(struct mlx5_core_dev *mdev) +{ + int tt; + + for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) { + if (mlx5_tunnel_proto_supported_rx(mdev, + ttc_tunnel_rules[tt].proto)) + return true; + } + return false; +} + +bool mlx5_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev) +{ + return (mlx5_tunnel_any_rx_proto_supported(mdev) && + MLX5_CAP_FLOWTABLE_NIC_RX(mdev, + ft_field_support.inner_ip_version)); +} + +static u8 mlx5_etype_to_ipv(u16 ethertype) +{ + if (ethertype == ETH_P_IP) + return 4; + + if (ethertype == ETH_P_IPV6) + return 6; + + return 0; +} + +static struct mlx5_flow_handle * +mlx5_generate_ttc_rule(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, + struct mlx5_flow_destination *dest, u16 etype, u8 proto) +{ + int match_ipv_outer = + MLX5_CAP_FLOWTABLE_NIC_RX(dev, + ft_field_support.outer_ip_version); + MLX5_DECLARE_FLOW_ACT(flow_act); + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; + int err = 0; + u8 ipv; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return ERR_PTR(-ENOMEM); + + if (proto) { + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_protocol); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_protocol, proto); + } + + ipv = mlx5_etype_to_ipv(etype); + if (match_ipv_outer && ipv) { + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ip_version); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, ipv); + } else if (etype) { + spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.ethertype); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.ethertype, etype); + } + + rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(dev, "%s: add rule failed\n", __func__); + } + + kvfree(spec); + return err ? ERR_PTR(err) : rule; +} + +static int mlx5_generate_ttc_table_rules(struct mlx5_core_dev *dev, + struct ttc_params *params, + struct mlx5_ttc_table *ttc) +{ + struct mlx5_flow_handle **trules; + struct mlx5_ttc_rule *rules; + struct mlx5_flow_table *ft; + int tt; + int err; + + ft = ttc->t; + rules = ttc->rules; + for (tt = 0; tt < MLX5_NUM_TT; tt++) { + struct mlx5_ttc_rule *rule = &rules[tt]; + + rule->rule = mlx5_generate_ttc_rule(dev, ft, ¶ms->dests[tt], + ttc_rules[tt].etype, + ttc_rules[tt].proto); + if (IS_ERR(rule->rule)) { + err = PTR_ERR(rule->rule); + rule->rule = NULL; + goto del_rules; + } + rule->default_dest = params->dests[tt]; + } + + if (!params->inner_ttc || !mlx5_tunnel_inner_ft_supported(dev)) + return 0; + + trules = ttc->tunnel_rules; + for (tt = 0; tt < MLX5_NUM_TUNNEL_TT; tt++) { + if (!mlx5_tunnel_proto_supported_rx(dev, + ttc_tunnel_rules[tt].proto)) + continue; + trules[tt] = mlx5_generate_ttc_rule(dev, ft, + ¶ms->tunnel_dests[tt], + ttc_tunnel_rules[tt].etype, + ttc_tunnel_rules[tt].proto); + if (IS_ERR(trules[tt])) { + err = PTR_ERR(trules[tt]); + trules[tt] = NULL; + goto del_rules; + } + } + + return 0; + +del_rules: + mlx5_cleanup_ttc_rules(ttc); + return err; +} + +static int mlx5_create_ttc_table_groups(struct mlx5_ttc_table *ttc, + bool use_ipv) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + int ix = 0; + u32 *in; + int err; + u8 *mc; + + ttc->g = kcalloc(MLX5_TTC_NUM_GROUPS, sizeof(*ttc->g), GFP_KERNEL); + if (!ttc->g) + return -ENOMEM; + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) { + kfree(ttc->g); + ttc->g = NULL; + return -ENOMEM; + } + + /* L4 Group */ + mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_protocol); + if (use_ipv) + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ip_version); + else + MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.ethertype); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5_TTC_GROUP1_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in); + if (IS_ERR(ttc->g[ttc->num_groups])) + goto err; + ttc->num_groups++; + + /* L3 Group */ + MLX5_SET(fte_match_param, mc, outer_headers.ip_protocol, 0); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5_TTC_GROUP2_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in); + if (IS_ERR(ttc->g[ttc->num_groups])) + goto err; + ttc->num_groups++; + + /* Any Group */ + memset(in, 0, inlen); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5_TTC_GROUP3_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in); + if (IS_ERR(ttc->g[ttc->num_groups])) + goto err; + ttc->num_groups++; + + kvfree(in); + return 0; + +err: + err = PTR_ERR(ttc->g[ttc->num_groups]); + ttc->g[ttc->num_groups] = NULL; + kvfree(in); + + return err; +} + +static struct mlx5_flow_handle * +mlx5_generate_inner_ttc_rule(struct mlx5_core_dev *dev, + struct mlx5_flow_table *ft, + struct mlx5_flow_destination *dest, + u16 etype, u8 proto) +{ + MLX5_DECLARE_FLOW_ACT(flow_act); + struct mlx5_flow_handle *rule; + struct mlx5_flow_spec *spec; + int err = 0; + u8 ipv; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return ERR_PTR(-ENOMEM); + + ipv = mlx5_etype_to_ipv(etype); + if (etype && ipv) { + spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_version); + MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_version, ipv); + } + + if (proto) { + spec->match_criteria_enable = MLX5_MATCH_INNER_HEADERS; + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, inner_headers.ip_protocol); + MLX5_SET(fte_match_param, spec->match_value, inner_headers.ip_protocol, proto); + } + + rule = mlx5_add_flow_rules(ft, spec, &flow_act, dest, 1); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + mlx5_core_err(dev, "%s: add inner TTC rule failed\n", __func__); + } + + kvfree(spec); + return err ? ERR_PTR(err) : rule; +} + +static int mlx5_generate_inner_ttc_table_rules(struct mlx5_core_dev *dev, + struct ttc_params *params, + struct mlx5_ttc_table *ttc) +{ + struct mlx5_ttc_rule *rules; + struct mlx5_flow_table *ft; + int err; + int tt; + + ft = ttc->t; + rules = ttc->rules; + + for (tt = 0; tt < MLX5_NUM_TT; tt++) { + struct mlx5_ttc_rule *rule = &rules[tt]; + + rule->rule = mlx5_generate_inner_ttc_rule(dev, ft, + ¶ms->dests[tt], + ttc_rules[tt].etype, + ttc_rules[tt].proto); + if (IS_ERR(rule->rule)) { + err = PTR_ERR(rule->rule); + rule->rule = NULL; + goto del_rules; + } + rule->default_dest = params->dests[tt]; + } + + return 0; + +del_rules: + + mlx5_cleanup_ttc_rules(ttc); + return err; +} + +static int mlx5_create_inner_ttc_table_groups(struct mlx5_ttc_table *ttc) +{ + int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); + int ix = 0; + u32 *in; + int err; + u8 *mc; + + ttc->g = kcalloc(MLX5_INNER_TTC_NUM_GROUPS, sizeof(*ttc->g), + GFP_KERNEL); + if (!ttc->g) + return -ENOMEM; + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) { + kfree(ttc->g); + ttc->g = NULL; + return -ENOMEM; + } + + /* L4 Group */ + mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria); + MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_protocol); + MLX5_SET_TO_ONES(fte_match_param, mc, inner_headers.ip_version); + MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_INNER_HEADERS); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5_INNER_TTC_GROUP1_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in); + if (IS_ERR(ttc->g[ttc->num_groups])) + goto err; + ttc->num_groups++; + + /* L3 Group */ + MLX5_SET(fte_match_param, mc, inner_headers.ip_protocol, 0); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5_INNER_TTC_GROUP2_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in); + if (IS_ERR(ttc->g[ttc->num_groups])) + goto err; + ttc->num_groups++; + + /* Any Group */ + memset(in, 0, inlen); + MLX5_SET_CFG(in, start_flow_index, ix); + ix += MLX5_INNER_TTC_GROUP3_SIZE; + MLX5_SET_CFG(in, end_flow_index, ix - 1); + ttc->g[ttc->num_groups] = mlx5_create_flow_group(ttc->t, in); + if (IS_ERR(ttc->g[ttc->num_groups])) + goto err; + ttc->num_groups++; + + kvfree(in); + return 0; + +err: + err = PTR_ERR(ttc->g[ttc->num_groups]); + ttc->g[ttc->num_groups] = NULL; + kvfree(in); + + return err; +} + +struct mlx5_ttc_table *mlx5_create_inner_ttc_table(struct mlx5_core_dev *dev, + struct ttc_params *params) +{ + struct mlx5_ttc_table *ttc; + int err; + + ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL); + if (!ttc) + return ERR_PTR(-ENOMEM); + + WARN_ON_ONCE(params->ft_attr.max_fte); + params->ft_attr.max_fte = MLX5_INNER_TTC_TABLE_SIZE; + ttc->t = mlx5_create_flow_table(params->ns, ¶ms->ft_attr); + if (IS_ERR(ttc->t)) { + err = PTR_ERR(ttc->t); + kvfree(ttc); + return ERR_PTR(err); + } + + err = mlx5_create_inner_ttc_table_groups(ttc); + if (err) + goto destroy_ft; + + err = mlx5_generate_inner_ttc_table_rules(dev, params, ttc); + if (err) + goto destroy_ft; + + return ttc; + +destroy_ft: + mlx5_destroy_ttc_table(ttc); + return ERR_PTR(err); +} + +void mlx5_destroy_ttc_table(struct mlx5_ttc_table *ttc) +{ + int i; + + mlx5_cleanup_ttc_rules(ttc); + for (i = ttc->num_groups - 1; i >= 0; i--) { + if (!IS_ERR_OR_NULL(ttc->g[i])) + mlx5_destroy_flow_group(ttc->g[i]); + ttc->g[i] = NULL; + } + + kfree(ttc->g); + mlx5_destroy_flow_table(ttc->t); + kvfree(ttc); +} + +struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev, + struct ttc_params *params) +{ + bool match_ipv_outer = + MLX5_CAP_FLOWTABLE_NIC_RX(dev, + ft_field_support.outer_ip_version); + struct mlx5_ttc_table *ttc; + int err; + + ttc = kvzalloc(sizeof(*ttc), GFP_KERNEL); + if (!ttc) + return ERR_PTR(-ENOMEM); + + WARN_ON_ONCE(params->ft_attr.max_fte); + params->ft_attr.max_fte = MLX5_TTC_TABLE_SIZE; + ttc->t = mlx5_create_flow_table(params->ns, ¶ms->ft_attr); + if (IS_ERR(ttc->t)) { + err = PTR_ERR(ttc->t); + kvfree(ttc); + return ERR_PTR(err); + } + + err = mlx5_create_ttc_table_groups(ttc, match_ipv_outer); + if (err) + goto destroy_ft; + + err = mlx5_generate_ttc_table_rules(dev, params, ttc); + if (err) + goto destroy_ft; + + return ttc; + +destroy_ft: + mlx5_destroy_ttc_table(ttc); + return ERR_PTR(err); +} + +int mlx5_ttc_fwd_dest(struct mlx5_ttc_table *ttc, enum mlx5_traffic_types type, + struct mlx5_flow_destination *new_dest) +{ + return mlx5_modify_rule_destination(ttc->rules[type].rule, new_dest, + NULL); +} + +struct mlx5_flow_destination +mlx5_ttc_get_default_dest(struct mlx5_ttc_table *ttc, + enum mlx5_traffic_types type) +{ + struct mlx5_flow_destination *dest = &ttc->rules[type].default_dest; + + WARN_ONCE(dest->type != MLX5_FLOW_DESTINATION_TYPE_TIR, + "TTC[%d] default dest is not setup yet", type); + + return *dest; +} + +int mlx5_ttc_fwd_default_dest(struct mlx5_ttc_table *ttc, + enum mlx5_traffic_types type) +{ + struct mlx5_flow_destination dest = mlx5_ttc_get_default_dest(ttc, type); + + return mlx5_ttc_fwd_dest(ttc, type, &dest); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h new file mode 100644 index 000000000000..ce95be8f8382 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_ttc.h @@ -0,0 +1,70 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2020 Mellanox Technologies. */ + +#ifndef __ML5_FS_TTC_H__ +#define __ML5_FS_TTC_H__ + +#include <linux/mlx5/fs.h> + +enum mlx5_traffic_types { + MLX5_TT_IPV4_TCP, + MLX5_TT_IPV6_TCP, + MLX5_TT_IPV4_UDP, + MLX5_TT_IPV6_UDP, + MLX5_TT_IPV4_IPSEC_AH, + MLX5_TT_IPV6_IPSEC_AH, + MLX5_TT_IPV4_IPSEC_ESP, + MLX5_TT_IPV6_IPSEC_ESP, + MLX5_TT_IPV4, + MLX5_TT_IPV6, + MLX5_TT_ANY, + MLX5_NUM_TT, + MLX5_NUM_INDIR_TIRS = MLX5_TT_ANY, +}; + +enum mlx5_tunnel_types { + MLX5_TT_IPV4_GRE, + MLX5_TT_IPV6_GRE, + MLX5_TT_IPV4_IPIP, + MLX5_TT_IPV6_IPIP, + MLX5_TT_IPV4_IPV6, + MLX5_TT_IPV6_IPV6, + MLX5_NUM_TUNNEL_TT, +}; + +struct mlx5_ttc_rule { + struct mlx5_flow_handle *rule; + struct mlx5_flow_destination default_dest; +}; + +struct mlx5_ttc_table; + +struct ttc_params { + struct mlx5_flow_namespace *ns; + struct mlx5_flow_table_attr ft_attr; + struct mlx5_flow_destination dests[MLX5_NUM_TT]; + bool inner_ttc; + struct mlx5_flow_destination tunnel_dests[MLX5_NUM_TUNNEL_TT]; +}; + +struct mlx5_flow_table *mlx5_get_ttc_flow_table(struct mlx5_ttc_table *ttc); + +struct mlx5_ttc_table *mlx5_create_ttc_table(struct mlx5_core_dev *dev, + struct ttc_params *params); +void mlx5_destroy_ttc_table(struct mlx5_ttc_table *ttc); + +struct mlx5_ttc_table *mlx5_create_inner_ttc_table(struct mlx5_core_dev *dev, + struct ttc_params *params); + +int mlx5_ttc_fwd_dest(struct mlx5_ttc_table *ttc, enum mlx5_traffic_types type, + struct mlx5_flow_destination *new_dest); +struct mlx5_flow_destination +mlx5_ttc_get_default_dest(struct mlx5_ttc_table *ttc, + enum mlx5_traffic_types type); +int mlx5_ttc_fwd_default_dest(struct mlx5_ttc_table *ttc, + enum mlx5_traffic_types type); + +bool mlx5_tunnel_inner_ft_supported(struct mlx5_core_dev *mdev); +u8 mlx5_get_proto_by_tunnel_type(enum mlx5_tunnel_types tt); + +#endif /* __MLX5_FS_TTC_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c index 38084400ee8f..e3b0a131c3e1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/vxlan.c @@ -40,7 +40,7 @@ struct mlx5_vxlan { struct mlx5_core_dev *mdev; - /* max_num_ports is usuallly 4, 16 buckets is more than enough */ + /* max_num_ports is usually 4, 16 buckets is more than enough */ DECLARE_HASHTABLE(htable, 4); struct mutex sync_lock; /* sync add/del port HW operations */ }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index c84ad87c99bb..79482824c64f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -252,28 +252,16 @@ static int set_dma_caps(struct pci_dev *pdev) { int err; - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (err) { dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n"); - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n"); return err; } } - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); - if (err) { - dev_warn(&pdev->dev, - "Warning: couldn't set 64-bit consistent PCI DMA mask\n"); - err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - if (err) { - dev_err(&pdev->dev, - "Can't set consistent PCI DMA mask, aborting\n"); - return err; - } - } - dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024); return err; } @@ -389,11 +377,11 @@ static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, switch (cap_mode) { case HCA_CAP_OPMOD_GET_MAX: - memcpy(dev->caps.hca_max[cap_type], hca_caps, + memcpy(dev->caps.hca[cap_type]->max, hca_caps, MLX5_UN_SZ_BYTES(hca_cap_union)); break; case HCA_CAP_OPMOD_GET_CUR: - memcpy(dev->caps.hca_cur[cap_type], hca_caps, + memcpy(dev->caps.hca[cap_type]->cur, hca_caps, MLX5_UN_SZ_BYTES(hca_cap_union)); break; default: @@ -469,7 +457,7 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx) return err; set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability); - memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_ODP], + memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_ODP]->cur, MLX5_ST_SZ_BYTES(odp_cap)); #define ODP_CAP_SET_MAX(dev, field) \ @@ -514,7 +502,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx) set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability); - memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_GENERAL], + memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_GENERAL]->cur, MLX5_ST_SZ_BYTES(cmd_hca_cap)); mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n", @@ -596,7 +584,7 @@ static int handle_hca_cap_roce(struct mlx5_core_dev *dev, void *set_ctx) return 0; set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability); - memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_ROCE], + memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_ROCE]->cur, MLX5_ST_SZ_BYTES(roce_cap)); MLX5_SET(roce_cap, set_hca_cap, sw_r_roce_src_udp_port, 1); @@ -748,14 +736,12 @@ static int mlx5_core_set_issi(struct mlx5_core_dev *dev) static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev, const struct pci_device_id *id) { - struct mlx5_priv *priv = &dev->priv; int err = 0; mutex_init(&dev->pci_status_mutex); pci_set_drvdata(dev->pdev, dev); dev->bar_addr = pci_resource_start(pdev, 0); - priv->numa_node = dev_to_node(mlx5_core_dma_dev(dev)); err = mlx5_pci_enable_device(dev); if (err) { @@ -1179,6 +1165,7 @@ static int mlx5_load(struct mlx5_core_dev *dev) goto err_ec; } + mlx5_lag_add_mdev(dev); err = mlx5_sriov_attach(dev); if (err) { mlx5_core_err(dev, "sriov init failed %d\n", err); @@ -1186,11 +1173,11 @@ static int mlx5_load(struct mlx5_core_dev *dev) } mlx5_sf_dev_table_create(dev); - mlx5_lag_add_mdev(dev); return 0; err_sriov: + mlx5_lag_remove_mdev(dev); mlx5_ec_cleanup(dev); err_ec: mlx5_sf_hw_table_destroy(dev); @@ -1222,9 +1209,9 @@ err_irq_table: static void mlx5_unload(struct mlx5_core_dev *dev) { - mlx5_lag_remove_mdev(dev); mlx5_sf_dev_table_destroy(dev); mlx5_sriov_detach(dev); + mlx5_lag_remove_mdev(dev); mlx5_ec_cleanup(dev); mlx5_sf_hw_table_destroy(dev); mlx5_vhca_event_stop(dev); @@ -1248,11 +1235,6 @@ int mlx5_init_one(struct mlx5_core_dev *dev) int err = 0; mutex_lock(&dev->intf_state_mutex); - if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) { - mlx5_core_warn(dev, "interface is up, NOP\n"); - goto out; - } - /* remove any previous indication of internal error */ dev->state = MLX5_DEVICE_STATE_UP; err = mlx5_function_setup(dev, true); @@ -1271,7 +1253,7 @@ int mlx5_init_one(struct mlx5_core_dev *dev) set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state); - err = mlx5_devlink_register(priv_to_devlink(dev), dev->device); + err = mlx5_devlink_register(priv_to_devlink(dev)); if (err) goto err_devlink_reg; @@ -1293,7 +1275,6 @@ function_teardown: mlx5_function_teardown(dev, true); err_function: dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; -out: mutex_unlock(&dev->intf_state_mutex); return err; } @@ -1380,6 +1361,60 @@ out: mutex_unlock(&dev->intf_state_mutex); } +static const int types[] = { + MLX5_CAP_GENERAL, + MLX5_CAP_GENERAL_2, + MLX5_CAP_ETHERNET_OFFLOADS, + MLX5_CAP_IPOIB_ENHANCED_OFFLOADS, + MLX5_CAP_ODP, + MLX5_CAP_ATOMIC, + MLX5_CAP_ROCE, + MLX5_CAP_IPOIB_OFFLOADS, + MLX5_CAP_FLOW_TABLE, + MLX5_CAP_ESWITCH_FLOW_TABLE, + MLX5_CAP_ESWITCH, + MLX5_CAP_VECTOR_CALC, + MLX5_CAP_QOS, + MLX5_CAP_DEBUG, + MLX5_CAP_DEV_MEM, + MLX5_CAP_DEV_EVENT, + MLX5_CAP_TLS, + MLX5_CAP_VDPA_EMULATION, + MLX5_CAP_IPSEC, +}; + +static void mlx5_hca_caps_free(struct mlx5_core_dev *dev) +{ + int type; + int i; + + for (i = 0; i < ARRAY_SIZE(types); i++) { + type = types[i]; + kfree(dev->caps.hca[type]); + } +} + +static int mlx5_hca_caps_alloc(struct mlx5_core_dev *dev) +{ + struct mlx5_hca_cap *cap; + int type; + int i; + + for (i = 0; i < ARRAY_SIZE(types); i++) { + cap = kzalloc(sizeof(*cap), GFP_KERNEL); + if (!cap) + goto err; + type = types[i]; + dev->caps.hca[type] = cap; + } + + return 0; + +err: + mlx5_hca_caps_free(dev); + return -ENOMEM; +} + int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx) { struct mlx5_priv *priv = &dev->priv; @@ -1399,6 +1434,7 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx) mutex_init(&priv->pgdir_mutex); INIT_LIST_HEAD(&priv->pgdir_list); + priv->numa_node = dev_to_node(mlx5_core_dma_dev(dev)); priv->dbg_root = debugfs_create_dir(dev_name(dev->device), mlx5_debugfs_root); INIT_LIST_HEAD(&priv->traps); @@ -1415,8 +1451,14 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx) if (err) goto err_adev_init; + err = mlx5_hca_caps_alloc(dev); + if (err) + goto err_hca_caps; + return 0; +err_hca_caps: + mlx5_adev_cleanup(dev); err_adev_init: mlx5_pagealloc_cleanup(dev); err_pagealloc_init: @@ -1435,6 +1477,7 @@ void mlx5_mdev_uninit(struct mlx5_core_dev *dev) { struct mlx5_priv *priv = &dev->priv; + mlx5_hca_caps_free(dev); mlx5_adev_cleanup(dev); mlx5_pagealloc_cleanup(dev); mlx5_health_cleanup(dev); @@ -1452,7 +1495,7 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id) struct devlink *devlink; int err; - devlink = mlx5_devlink_alloc(); + devlink = mlx5_devlink_alloc(&pdev->dev); if (!devlink) { dev_err(&pdev->dev, "devlink alloc failed\n"); return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index da365b8f0141..230eab7e3bc9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -168,6 +168,8 @@ void mlx5_lag_add_netdev(struct mlx5_core_dev *dev, struct net_device *netdev); void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev, struct net_device *netdev); void mlx5_lag_add_mdev(struct mlx5_core_dev *dev); void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev); +void mlx5_lag_disable_change(struct mlx5_core_dev *dev); +void mlx5_lag_enable_change(struct mlx5_core_dev *dev); int mlx5_events_init(struct mlx5_core_dev *dev); void mlx5_events_cleanup(struct mlx5_core_dev *dev); @@ -275,4 +277,9 @@ static inline u32 mlx5_sriov_get_vf_total_msix(struct pci_dev *pdev) return MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix); } + +bool mlx5_eth_supported(struct mlx5_core_dev *dev); +bool mlx5_rdma_supported(struct mlx5_core_dev *dev); +bool mlx5_vnet_supported(struct mlx5_core_dev *dev); + #endif /* __MLX5_CORE_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index 3465b363fc2f..c79a10b3454d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -18,7 +18,7 @@ #define MLX5_SFS_PER_CTRL_IRQ 64 #define MLX5_IRQ_CTRL_SF_MAX 8 -/* min num of vectores for SFs to be enabled */ +/* min num of vectors for SFs to be enabled */ #define MLX5_IRQ_VEC_COMP_BASE_SF 2 #define MLX5_EQ_SHARE_IRQ_MAX_COMP (8) @@ -28,13 +28,13 @@ #define MLX5_EQ_REFS_PER_IRQ (2) struct mlx5_irq { - u32 index; struct atomic_notifier_head nh; cpumask_var_t mask; char name[MLX5_MAX_IRQ_NAME]; - struct kref kref; - int irqn; struct mlx5_irq_pool *pool; + int refcount; + u32 index; + int irqn; }; struct mlx5_irq_pool { @@ -138,9 +138,8 @@ out: return ret; } -static void irq_release(struct kref *kref) +static void irq_release(struct mlx5_irq *irq) { - struct mlx5_irq *irq = container_of(kref, struct mlx5_irq, kref); struct mlx5_irq_pool *pool = irq->pool; xa_erase(&pool->irqs, irq->index); @@ -159,10 +158,31 @@ static void irq_put(struct mlx5_irq *irq) struct mlx5_irq_pool *pool = irq->pool; mutex_lock(&pool->lock); - kref_put(&irq->kref, irq_release); + irq->refcount--; + if (!irq->refcount) + irq_release(irq); mutex_unlock(&pool->lock); } +static int irq_get_locked(struct mlx5_irq *irq) +{ + lockdep_assert_held(&irq->pool->lock); + if (WARN_ON_ONCE(!irq->refcount)) + return 0; + irq->refcount++; + return 1; +} + +static int irq_get(struct mlx5_irq *irq) +{ + int err; + + mutex_lock(&irq->pool->lock); + err = irq_get_locked(irq); + mutex_unlock(&irq->pool->lock); + return err; +} + static irqreturn_t irq_int_handler(int irq, void *nh) { atomic_notifier_call_chain(nh, 0, NULL); @@ -215,7 +235,7 @@ static struct mlx5_irq *irq_request(struct mlx5_irq_pool *pool, int i) goto err_cpumask; } irq->pool = pool; - kref_init(&irq->kref); + irq->refcount = 1; irq->index = i; err = xa_err(xa_store(&pool->irqs, irq->index, irq, GFP_KERNEL)); if (err) { @@ -235,18 +255,18 @@ err_req_irq: int mlx5_irq_attach_nb(struct mlx5_irq *irq, struct notifier_block *nb) { - int err; + int ret; - err = kref_get_unless_zero(&irq->kref); - if (WARN_ON_ONCE(!err)) + ret = irq_get(irq); + if (!ret) /* Something very bad happens here, we are enabling EQ * on non-existing IRQ. */ return -ENOENT; - err = atomic_notifier_chain_register(&irq->nh, nb); - if (err) + ret = atomic_notifier_chain_register(&irq->nh, nb); + if (ret) irq_put(irq); - return err; + return ret; } int mlx5_irq_detach_nb(struct mlx5_irq *irq, struct notifier_block *nb) @@ -304,10 +324,9 @@ static struct mlx5_irq *irq_pool_find_least_loaded(struct mlx5_irq_pool *pool, xa_for_each_range(&pool->irqs, index, iter, start, end) { if (!cpumask_equal(iter->mask, affinity)) continue; - if (kref_read(&iter->kref) < pool->min_threshold) + if (iter->refcount < pool->min_threshold) return iter; - if (!irq || kref_read(&iter->kref) < - kref_read(&irq->kref)) + if (!irq || iter->refcount < irq->refcount) irq = iter; } return irq; @@ -322,7 +341,7 @@ static struct mlx5_irq *irq_pool_request_affinity(struct mlx5_irq_pool *pool, mutex_lock(&pool->lock); least_loaded_irq = irq_pool_find_least_loaded(pool, affinity); if (least_loaded_irq && - kref_read(&least_loaded_irq->kref) < pool->min_threshold) + least_loaded_irq->refcount < pool->min_threshold) goto out; new_irq = irq_pool_create_irq(pool, affinity); if (IS_ERR(new_irq)) { @@ -340,11 +359,11 @@ static struct mlx5_irq *irq_pool_request_affinity(struct mlx5_irq_pool *pool, least_loaded_irq = new_irq; goto unlock; out: - kref_get(&least_loaded_irq->kref); - if (kref_read(&least_loaded_irq->kref) > pool->max_threshold) + irq_get_locked(least_loaded_irq); + if (least_loaded_irq->refcount > pool->max_threshold) mlx5_core_dbg(pool->dev, "IRQ %u overloaded, pool_name: %s, %u EQs on this irq\n", least_loaded_irq->irqn, pool->name, - kref_read(&least_loaded_irq->kref) / MLX5_EQ_REFS_PER_IRQ); + least_loaded_irq->refcount / MLX5_EQ_REFS_PER_IRQ); unlock: mutex_unlock(&pool->lock); return least_loaded_irq; @@ -360,7 +379,7 @@ irq_pool_request_vector(struct mlx5_irq_pool *pool, int vecidx, mutex_lock(&pool->lock); irq = xa_load(&pool->irqs, vecidx); if (irq) { - kref_get(&irq->kref); + irq_get_locked(irq); goto unlock; } irq = irq_request(pool, vecidx); @@ -427,7 +446,7 @@ out: return irq; mlx5_core_dbg(dev, "irq %u mapped to cpu %*pbl, %u EQs on this irq\n", irq->irqn, cpumask_pr_args(affinity), - kref_read(&irq->kref) / MLX5_EQ_REFS_PER_IRQ); + irq->refcount / MLX5_EQ_REFS_PER_IRQ); return irq; } @@ -459,8 +478,12 @@ static void irq_pool_free(struct mlx5_irq_pool *pool) struct mlx5_irq *irq; unsigned long index; + /* There are cases in which we are destrying the irq_table before + * freeing all the IRQs, fast teardown for example. Hence, free the irqs + * which might not have been freed. + */ xa_for_each(&pool->irqs, index, irq) - irq_release(&irq->kref); + irq_release(irq); xa_destroy(&pool->irqs); mutex_destroy(&pool->lock); kvfree(pool); @@ -483,7 +506,7 @@ static int irq_pools_init(struct mlx5_core_dev *dev, int sf_vec, int pf_vec) if (!mlx5_sf_max_functions(dev)) return 0; if (sf_vec < MLX5_IRQ_VEC_COMP_BASE_SF) { - mlx5_core_err(dev, "Not enough IRQs for SFs. SF may run at lower performance\n"); + mlx5_core_dbg(dev, "Not enught IRQs for SFs. SF may run at lower performance\n"); return 0; } @@ -601,7 +624,7 @@ void mlx5_irq_table_destroy(struct mlx5_core_dev *dev) return; /* There are cases where IRQs still will be in used when we reaching - * to here. Hence, making sure all the irqs are realeased. + * to here. Hence, making sure all the irqs are released. */ irq_pools_destroy(table); pci_free_irq_vectors(dev->pdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c index fa0288afc0dd..871c2fbe18d3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/dev.c @@ -39,7 +39,7 @@ static ssize_t sfnum_show(struct device *dev, struct device_attribute *attr, cha struct auxiliary_device *adev = container_of(dev, struct auxiliary_device, dev); struct mlx5_sf_dev *sf_dev = container_of(adev, struct mlx5_sf_dev, adev); - return scnprintf(buf, PAGE_SIZE, "%u\n", sf_dev->sfnum); + return sysfs_emit(buf, "%u\n", sf_dev->sfnum); } static DEVICE_ATTR_RO(sfnum); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c index 42c8ee03fe3e..052f48068dc1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c @@ -14,7 +14,7 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia struct devlink *devlink; int err; - devlink = mlx5_devlink_alloc(); + devlink = mlx5_devlink_alloc(&adev->dev); if (!devlink) return -ENOMEM; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c index 1be048769309..13891fdc607e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/devlink.c @@ -164,12 +164,12 @@ static bool mlx5_sf_is_active(const struct mlx5_sf *sf) return sf->hw_state == MLX5_VHCA_STATE_ACTIVE || sf->hw_state == MLX5_VHCA_STATE_IN_USE; } -int mlx5_devlink_sf_port_fn_state_get(struct devlink *devlink, struct devlink_port *dl_port, +int mlx5_devlink_sf_port_fn_state_get(struct devlink_port *dl_port, enum devlink_port_fn_state *state, enum devlink_port_fn_opstate *opstate, struct netlink_ext_ack *extack) { - struct mlx5_core_dev *dev = devlink_priv(devlink); + struct mlx5_core_dev *dev = devlink_priv(dl_port->devlink); struct mlx5_sf_table *table; struct mlx5_sf *sf; int err = 0; @@ -248,11 +248,11 @@ out: return err; } -int mlx5_devlink_sf_port_fn_state_set(struct devlink *devlink, struct devlink_port *dl_port, +int mlx5_devlink_sf_port_fn_state_set(struct devlink_port *dl_port, enum devlink_port_fn_state state, struct netlink_ext_ack *extack) { - struct mlx5_core_dev *dev = devlink_priv(devlink); + struct mlx5_core_dev *dev = devlink_priv(dl_port->devlink); struct mlx5_sf_table *table; struct mlx5_sf *sf; int err; @@ -476,7 +476,7 @@ static void mlx5_sf_table_disable(struct mlx5_sf_table *table) return; /* Balances with refcount_set; drop the reference so that new user cmd cannot start - * and new vhca event handler cannnot run. + * and new vhca event handler cannot run. */ mlx5_sf_table_put(table); wait_for_completion(&table->disable_complete); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h index 81ce13b19ee8..3a480e06ecc0 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/sf.h @@ -24,11 +24,11 @@ int mlx5_devlink_sf_port_new(struct devlink *devlink, unsigned int *new_port_index); int mlx5_devlink_sf_port_del(struct devlink *devlink, unsigned int port_index, struct netlink_ext_ack *extack); -int mlx5_devlink_sf_port_fn_state_get(struct devlink *devlink, struct devlink_port *dl_port, +int mlx5_devlink_sf_port_fn_state_get(struct devlink_port *dl_port, enum devlink_port_fn_state *state, enum devlink_port_fn_opstate *opstate, struct netlink_ext_ack *extack); -int mlx5_devlink_sf_port_fn_state_set(struct devlink *devlink, struct devlink_port *dl_port, +int mlx5_devlink_sf_port_fn_state_set(struct devlink_port *dl_port, enum devlink_port_fn_state state, struct netlink_ext_ack *extack); #else diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c index 6475ba35cf6b..a5b9f65db23c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c @@ -18,12 +18,39 @@ enum dr_action_valid_state { DR_ACTION_STATE_ENCAP, DR_ACTION_STATE_DECAP, DR_ACTION_STATE_MODIFY_HDR, - DR_ACTION_STATE_MODIFY_VLAN, + DR_ACTION_STATE_POP_VLAN, + DR_ACTION_STATE_PUSH_VLAN, DR_ACTION_STATE_NON_TERM, DR_ACTION_STATE_TERM, DR_ACTION_STATE_MAX, }; +static const char * const action_type_to_str[] = { + [DR_ACTION_TYP_TNL_L2_TO_L2] = "DR_ACTION_TYP_TNL_L2_TO_L2", + [DR_ACTION_TYP_L2_TO_TNL_L2] = "DR_ACTION_TYP_L2_TO_TNL_L2", + [DR_ACTION_TYP_TNL_L3_TO_L2] = "DR_ACTION_TYP_TNL_L3_TO_L2", + [DR_ACTION_TYP_L2_TO_TNL_L3] = "DR_ACTION_TYP_L2_TO_TNL_L3", + [DR_ACTION_TYP_DROP] = "DR_ACTION_TYP_DROP", + [DR_ACTION_TYP_QP] = "DR_ACTION_TYP_QP", + [DR_ACTION_TYP_FT] = "DR_ACTION_TYP_FT", + [DR_ACTION_TYP_CTR] = "DR_ACTION_TYP_CTR", + [DR_ACTION_TYP_TAG] = "DR_ACTION_TYP_TAG", + [DR_ACTION_TYP_MODIFY_HDR] = "DR_ACTION_TYP_MODIFY_HDR", + [DR_ACTION_TYP_VPORT] = "DR_ACTION_TYP_VPORT", + [DR_ACTION_TYP_POP_VLAN] = "DR_ACTION_TYP_POP_VLAN", + [DR_ACTION_TYP_PUSH_VLAN] = "DR_ACTION_TYP_PUSH_VLAN", + [DR_ACTION_TYP_INSERT_HDR] = "DR_ACTION_TYP_INSERT_HDR", + [DR_ACTION_TYP_REMOVE_HDR] = "DR_ACTION_TYP_REMOVE_HDR", + [DR_ACTION_TYP_MAX] = "DR_ACTION_UNKNOWN", +}; + +static const char *dr_action_id_to_str(enum mlx5dr_action_type action_id) +{ + if (action_id > DR_ACTION_TYP_MAX) + action_id = DR_ACTION_TYP_MAX; + return action_type_to_str[action_id]; +} + static const enum dr_action_valid_state next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] = { [DR_ACTION_DOMAIN_NIC_INGRESS] = { @@ -39,8 +66,10 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_REMOVE_HDR] = DR_ACTION_STATE_DECAP, [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, - [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, }, [DR_ACTION_STATE_DECAP] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, @@ -53,7 +82,8 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, - [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, }, [DR_ACTION_STATE_ENCAP] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, @@ -73,20 +103,31 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, }, - [DR_ACTION_STATE_MODIFY_VLAN] = { + [DR_ACTION_STATE_POP_VLAN] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, - [DR_ACTION_TYP_TAG] = DR_ACTION_STATE_MODIFY_VLAN, - [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN, - [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_TAG] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, }, + [DR_ACTION_STATE_PUSH_VLAN] = { + [DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_TAG] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, + }, [DR_ACTION_STATE_NON_TERM] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_QP] = DR_ACTION_STATE_TERM, @@ -99,8 +140,10 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_REMOVE_HDR] = DR_ACTION_STATE_DECAP, [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, - [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, }, [DR_ACTION_STATE_TERM] = { [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM, @@ -115,8 +158,16 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_REMOVE_HDR] = DR_ACTION_STATE_DECAP, [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, - [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, + }, + [DR_ACTION_STATE_DECAP] = { + [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_DECAP, }, [DR_ACTION_STATE_ENCAP] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, @@ -132,14 +183,25 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, - [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + }, + [DR_ACTION_STATE_POP_VLAN] = { + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, + [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, }, - [DR_ACTION_STATE_MODIFY_VLAN] = { + [DR_ACTION_STATE_PUSH_VLAN] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, - [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN, - [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, @@ -152,8 +214,10 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_REMOVE_HDR] = DR_ACTION_STATE_DECAP, [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, - [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, }, [DR_ACTION_STATE_TERM] = { [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_TERM, @@ -170,8 +234,10 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_REMOVE_HDR] = DR_ACTION_STATE_DECAP, [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, - [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, }, [DR_ACTION_STATE_DECAP] = { @@ -180,11 +246,12 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_DECAP, [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, - [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, }, [DR_ACTION_STATE_ENCAP] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, @@ -203,13 +270,26 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, }, - [DR_ACTION_STATE_MODIFY_VLAN] = { + [DR_ACTION_STATE_POP_VLAN] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, - [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, - [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, + [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, + }, + [DR_ACTION_STATE_PUSH_VLAN] = { + [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_PUSH_VLAN, [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, @@ -226,8 +306,10 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_REMOVE_HDR] = DR_ACTION_STATE_DECAP, [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, - [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, }, [DR_ACTION_STATE_TERM] = { @@ -244,8 +326,17 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, - [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_REMOVE_HDR] = DR_ACTION_STATE_DECAP, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, + }, + [DR_ACTION_STATE_DECAP] = { + [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_DECAP, [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, }, [DR_ACTION_STATE_ENCAP] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, @@ -262,15 +353,27 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, - [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, + }, + [DR_ACTION_STATE_POP_VLAN] = { + [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_MODIFY_HDR] = DR_ACTION_STATE_MODIFY_HDR, + [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, + [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, }, - [DR_ACTION_STATE_MODIFY_VLAN] = { + [DR_ACTION_STATE_PUSH_VLAN] = { [DR_ACTION_TYP_DROP] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_FT] = DR_ACTION_STATE_TERM, [DR_ACTION_TYP_SAMPLER] = DR_ACTION_STATE_TERM, - [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, - [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_CTR] = DR_ACTION_STATE_PUSH_VLAN, [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, @@ -285,7 +388,9 @@ next_action_state[DR_ACTION_DOMAIN_MAX][DR_ACTION_STATE_MAX][DR_ACTION_TYP_MAX] [DR_ACTION_TYP_L2_TO_TNL_L2] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_L2_TO_TNL_L3] = DR_ACTION_STATE_ENCAP, [DR_ACTION_TYP_INSERT_HDR] = DR_ACTION_STATE_ENCAP, - [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_MODIFY_VLAN, + [DR_ACTION_TYP_REMOVE_HDR] = DR_ACTION_STATE_DECAP, + [DR_ACTION_TYP_PUSH_VLAN] = DR_ACTION_STATE_PUSH_VLAN, + [DR_ACTION_TYP_POP_VLAN] = DR_ACTION_STATE_POP_VLAN, [DR_ACTION_TYP_VPORT] = DR_ACTION_STATE_TERM, }, [DR_ACTION_STATE_TERM] = { @@ -314,6 +419,9 @@ dr_action_reformat_to_action_type(enum mlx5dr_action_reformat_type reformat_type case DR_ACTION_REFORMAT_TYP_INSERT_HDR: *action_type = DR_ACTION_TYP_INSERT_HDR; break; + case DR_ACTION_REFORMAT_TYP_REMOVE_HDR: + *action_type = DR_ACTION_TYP_REMOVE_HDR; + break; default: return -EINVAL; } @@ -326,7 +434,7 @@ dr_action_reformat_to_action_type(enum mlx5dr_action_reformat_type reformat_type * the new size of the STEs array, rule with actions. */ static void dr_actions_apply(struct mlx5dr_domain *dmn, - enum mlx5dr_ste_entry_type ste_type, + enum mlx5dr_domain_nic_type nic_type, u8 *action_type_set, u8 *last_ste, struct mlx5dr_ste_actions_attr *attr, @@ -335,7 +443,7 @@ static void dr_actions_apply(struct mlx5dr_domain *dmn, struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx; u32 added_stes = 0; - if (ste_type == MLX5DR_STE_TYPE_RX) + if (nic_type == DR_DOMAIN_NIC_TYPE_RX) mlx5dr_ste_set_actions_rx(ste_ctx, dmn, action_type_set, last_ste, attr, &added_stes); else @@ -347,7 +455,7 @@ static void dr_actions_apply(struct mlx5dr_domain *dmn, static enum dr_action_domain dr_action_get_action_domain(enum mlx5dr_domain_type domain, - enum mlx5dr_ste_entry_type ste_type) + enum mlx5dr_domain_nic_type nic_type) { switch (domain) { case MLX5DR_DOMAIN_TYPE_NIC_RX: @@ -355,7 +463,7 @@ dr_action_get_action_domain(enum mlx5dr_domain_type domain, case MLX5DR_DOMAIN_TYPE_NIC_TX: return DR_ACTION_DOMAIN_NIC_EGRESS; case MLX5DR_DOMAIN_TYPE_FDB: - if (ste_type == MLX5DR_STE_TYPE_RX) + if (nic_type == DR_DOMAIN_NIC_TYPE_RX) return DR_ACTION_DOMAIN_FDB_INGRESS; return DR_ACTION_DOMAIN_FDB_EGRESS; default: @@ -421,6 +529,18 @@ static int dr_action_handle_cs_recalc(struct mlx5dr_domain *dmn, return 0; } +static void dr_action_print_sequence(struct mlx5dr_domain *dmn, + struct mlx5dr_action *actions[], + int last_idx) +{ + int i; + + for (i = 0; i <= last_idx; i++) + mlx5dr_err(dmn, "< %s (%d) > ", + dr_action_id_to_str(actions[i]->action_type), + actions[i]->action_type); +} + #define WITH_VLAN_NUM_HW_ACTIONS 6 int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, @@ -431,7 +551,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, u32 *new_hw_ste_arr_sz) { struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn; - bool rx_rule = nic_dmn->ste_type == MLX5DR_STE_TYPE_RX; + bool rx_rule = nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX; struct mlx5dr_domain *dmn = matcher->tbl->dmn; u8 action_type_set[DR_ACTION_TYP_MAX] = {}; struct mlx5dr_ste_actions_attr attr = {}; @@ -445,7 +565,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, attr.gvmi = dmn->info.caps.gvmi; attr.hit_gvmi = dmn->info.caps.gvmi; attr.final_icm_addr = nic_dmn->default_icm_addr; - action_domain = dr_action_get_action_domain(dmn->type, nic_dmn->ste_type); + action_domain = dr_action_get_action_domain(dmn->type, nic_dmn->type); for (i = 0; i < num_actions; i++) { struct mlx5dr_action_dest_tbl *dest_tbl; @@ -467,11 +587,11 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, if (dest_tbl->tbl->dmn != dmn) { mlx5dr_err(dmn, "Destination table belongs to a different domain\n"); - goto out_invalid_arg; + return -EINVAL; } if (dest_tbl->tbl->level <= matcher->tbl->level) { - mlx5_core_warn_once(dmn->mdev, - "Connecting table to a lower/same level destination table\n"); + mlx5_core_dbg_once(dmn->mdev, + "Connecting table to a lower/same level destination table\n"); mlx5dr_dbg(dmn, "Connecting table at level %d to a destination table at level %d\n", matcher->tbl->level, @@ -509,7 +629,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, break; case DR_ACTION_TYP_QP: mlx5dr_info(dmn, "Domain doesn't support QP\n"); - goto out_invalid_arg; + return -EOPNOTSUPP; case DR_ACTION_TYP_CTR: attr.ctr_id = action->ctr->ctr_id + action->ctr->offeset; @@ -536,7 +656,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, if (rx_rule && !(dmn->ste_ctx->actions_caps & DR_STE_CTX_ACTION_CAP_RX_ENCAP)) { mlx5dr_info(dmn, "Device doesn't support Encap on RX\n"); - goto out_invalid_arg; + return -EOPNOTSUPP; } attr.reformat.size = action->reformat->size; attr.reformat.id = action->reformat->id; @@ -549,48 +669,66 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, attr.hit_gvmi = action->vport->caps->vhca_gvmi; dest_action = action; if (rx_rule) { - /* Loopback on WIRE vport is not supported */ - if (action->vport->caps->num == WIRE_PORT) - goto out_invalid_arg; - + if (action->vport->caps->num == WIRE_PORT) { + mlx5dr_dbg(dmn, "Device doesn't support Loopback on WIRE vport\n"); + return -EOPNOTSUPP; + } attr.final_icm_addr = action->vport->caps->icm_address_rx; } else { attr.final_icm_addr = action->vport->caps->icm_address_tx; } break; case DR_ACTION_TYP_POP_VLAN: + if (!rx_rule && !(dmn->ste_ctx->actions_caps & + DR_STE_CTX_ACTION_CAP_TX_POP)) { + mlx5dr_dbg(dmn, "Device doesn't support POP VLAN action on TX\n"); + return -EOPNOTSUPP; + } + max_actions_type = MLX5DR_MAX_VLANS; attr.vlans.count++; break; case DR_ACTION_TYP_PUSH_VLAN: + if (rx_rule && !(dmn->ste_ctx->actions_caps & + DR_STE_CTX_ACTION_CAP_RX_PUSH)) { + mlx5dr_dbg(dmn, "Device doesn't support PUSH VLAN action on RX\n"); + return -EOPNOTSUPP; + } + max_actions_type = MLX5DR_MAX_VLANS; - if (attr.vlans.count == MLX5DR_MAX_VLANS) + if (attr.vlans.count == MLX5DR_MAX_VLANS) { + mlx5dr_dbg(dmn, "Max VLAN push/pop count exceeded\n"); return -EINVAL; + } attr.vlans.headers[attr.vlans.count++] = action->push_vlan->vlan_hdr; break; case DR_ACTION_TYP_INSERT_HDR: + case DR_ACTION_TYP_REMOVE_HDR: attr.reformat.size = action->reformat->size; attr.reformat.id = action->reformat->id; attr.reformat.param_0 = action->reformat->param_0; attr.reformat.param_1 = action->reformat->param_1; break; default: - goto out_invalid_arg; + mlx5dr_err(dmn, "Unsupported action type %d\n", action_type); + return -EINVAL; } /* Check action duplication */ if (++action_type_set[action_type] > max_actions_type) { mlx5dr_err(dmn, "Action type %d supports only max %d time(s)\n", action_type, max_actions_type); - goto out_invalid_arg; + return -EINVAL; } /* Check action state machine is valid */ if (dr_action_validate_and_get_next_state(action_domain, action_type, &state)) { - mlx5dr_err(dmn, "Invalid action sequence provided\n"); + mlx5dr_err(dmn, "Invalid action (gvmi: %d, is_rx: %d) sequence provided:", + attr.gvmi, rx_rule); + dr_action_print_sequence(dmn, actions, i); return -EOPNOTSUPP; } } @@ -614,16 +752,13 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher, } dr_actions_apply(dmn, - nic_dmn->ste_type, + nic_dmn->type, action_type_set, last_ste, &attr, new_hw_ste_arr_sz); return 0; - -out_invalid_arg: - return -EINVAL; } static unsigned int action_size[DR_ACTION_TYP_MAX] = { @@ -638,6 +773,7 @@ static unsigned int action_size[DR_ACTION_TYP_MAX] = { [DR_ACTION_TYP_VPORT] = sizeof(struct mlx5dr_action_vport), [DR_ACTION_TYP_PUSH_VLAN] = sizeof(struct mlx5dr_action_push_vlan), [DR_ACTION_TYP_INSERT_HDR] = sizeof(struct mlx5dr_action_reformat), + [DR_ACTION_TYP_REMOVE_HDR] = sizeof(struct mlx5dr_action_reformat), [DR_ACTION_TYP_SAMPLER] = sizeof(struct mlx5dr_action_sampler), }; @@ -709,7 +845,8 @@ dec_ref: struct mlx5dr_action * mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, struct mlx5dr_action_dest *dests, - u32 num_of_dests) + u32 num_of_dests, + bool ignore_flow_level) { struct mlx5dr_cmd_flow_destination_hw_info *hw_dests; struct mlx5dr_action **ref_actions; @@ -776,7 +913,8 @@ mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, num_of_dests, reformat_req, &action->dest_tbl->fw_tbl.id, - &action->dest_tbl->fw_tbl.group_id); + &action->dest_tbl->fw_tbl.group_id, + ignore_flow_level); if (ret) goto free_action; @@ -884,11 +1022,23 @@ dr_action_verify_reformat_params(enum mlx5dr_action_type reformat_type, size_t data_sz, void *data) { - if ((!data && data_sz) || (data && !data_sz) || - ((reformat_param_0 || reformat_param_1) && - reformat_type != DR_ACTION_TYP_INSERT_HDR) || - reformat_type > DR_ACTION_TYP_INSERT_HDR) { - mlx5dr_dbg(dmn, "Invalid reformat parameter!\n"); + if (reformat_type == DR_ACTION_TYP_INSERT_HDR) { + if ((!data && data_sz) || (data && !data_sz) || + MLX5_CAP_GEN_2(dmn->mdev, max_reformat_insert_size) < data_sz || + MLX5_CAP_GEN_2(dmn->mdev, max_reformat_insert_offset) < reformat_param_1) { + mlx5dr_dbg(dmn, "Invalid reformat parameters for INSERT_HDR\n"); + goto out_err; + } + } else if (reformat_type == DR_ACTION_TYP_REMOVE_HDR) { + if (data || + MLX5_CAP_GEN_2(dmn->mdev, max_reformat_remove_size) < data_sz || + MLX5_CAP_GEN_2(dmn->mdev, max_reformat_remove_offset) < reformat_param_1) { + mlx5dr_dbg(dmn, "Invalid reformat parameters for REMOVE_HDR\n"); + goto out_err; + } + } else if (reformat_param_0 || reformat_param_1 || + reformat_type > DR_ACTION_TYP_REMOVE_HDR) { + mlx5dr_dbg(dmn, "Invalid reformat parameters\n"); goto out_err; } @@ -987,7 +1137,6 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn, return 0; } case DR_ACTION_TYP_INSERT_HDR: - { ret = mlx5dr_cmd_create_reformat_ctx(dmn->mdev, MLX5_REFORMAT_TYPE_INSERT_HDR, reformat_param_0, @@ -1002,7 +1151,12 @@ dr_action_create_reformat_action(struct mlx5dr_domain *dmn, action->reformat->param_0 = reformat_param_0; action->reformat->param_1 = reformat_param_1; return 0; - } + case DR_ACTION_TYP_REMOVE_HDR: + action->reformat->id = 0; + action->reformat->size = data_sz; + action->reformat->param_0 = reformat_param_0; + action->reformat->param_1 = reformat_param_1; + return 0; default: mlx5dr_info(dmn, "Reformat type is not supported %d\n", action->action_type); return -EINVAL; @@ -1658,6 +1812,7 @@ int mlx5dr_action_destroy(struct mlx5dr_action *action) } break; case DR_ACTION_TYP_TNL_L2_TO_L2: + case DR_ACTION_TYP_REMOVE_HDR: refcount_dec(&action->reformat->dmn->refcount); break; case DR_ACTION_TYP_TNL_L3_TO_L2: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c index 54e1f5438bbe..56307283bf9b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c @@ -655,6 +655,7 @@ int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev, MLX5_SET(set_fte_in, in, table_type, ft->type); MLX5_SET(set_fte_in, in, table_id, ft->id); MLX5_SET(set_fte_in, in, flow_index, fte->index); + MLX5_SET(set_fte_in, in, ignore_flow_level, fte->ignore_flow_level); if (ft->vport) { MLX5_SET(set_fte_in, in, vport_number, ft->vport); MLX5_SET(set_fte_in, in, other_vport, 1); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c index 7091b1be84ef..0fe159809ba1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_domain.c @@ -245,7 +245,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev, return -ENOTSUPP; dmn->info.supp_sw_steering = true; - dmn->info.rx.ste_type = MLX5DR_STE_TYPE_RX; + dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX; dmn->info.rx.default_icm_addr = dmn->info.caps.nic_rx_drop_address; dmn->info.rx.drop_icm_addr = dmn->info.caps.nic_rx_drop_address; break; @@ -254,7 +254,7 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev, return -ENOTSUPP; dmn->info.supp_sw_steering = true; - dmn->info.tx.ste_type = MLX5DR_STE_TYPE_TX; + dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX; dmn->info.tx.default_icm_addr = dmn->info.caps.nic_tx_allow_address; dmn->info.tx.drop_icm_addr = dmn->info.caps.nic_tx_drop_address; break; @@ -265,8 +265,8 @@ static int dr_domain_caps_init(struct mlx5_core_dev *mdev, if (!DR_DOMAIN_SW_STEERING_SUPPORTED(dmn, fdb)) return -ENOTSUPP; - dmn->info.rx.ste_type = MLX5DR_STE_TYPE_RX; - dmn->info.tx.ste_type = MLX5DR_STE_TYPE_TX; + dmn->info.rx.type = DR_DOMAIN_NIC_TYPE_RX; + dmn->info.tx.type = DR_DOMAIN_NIC_TYPE_TX; vport_cap = mlx5dr_get_vport_cap(&dmn->info.caps, 0); if (!vport_cap) { mlx5dr_err(dmn, "Failed to get esw manager vport\n"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c index 7ccfd40586ce..0d6f86eb248b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_fw.c @@ -103,7 +103,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn, int num_dest, bool reformat_req, u32 *tbl_id, - u32 *group_id) + u32 *group_id, + bool ignore_flow_level) { struct mlx5dr_cmd_create_flow_table_attr ft_attr = {}; struct mlx5dr_cmd_fte_info fte_info = {}; @@ -137,6 +138,7 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn, fte_info.dests_size = num_dest; fte_info.val = val; fte_info.dest_arr = dest; + fte_info.ignore_flow_level = ignore_flow_level; ret = mlx5dr_cmd_set_fte(dmn->mdev, 0, 0, &ft_info, *group_id, &fte_info); if (ret) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c index 6f6191d1d5a6..b5409cc021d3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_matcher.c @@ -396,13 +396,14 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, struct mlx5dr_domain *dmn = matcher->tbl->dmn; struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx; struct mlx5dr_match_param mask = {}; + bool allow_empty_match = false; struct mlx5dr_ste_build *sb; bool inner, rx; int idx = 0; int ret, i; sb = nic_matcher->ste_builder_arr[outer_ipv][inner_ipv]; - rx = nic_dmn->ste_type == MLX5DR_STE_TYPE_RX; + rx = nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX; /* Create a temporary mask to track and clear used mask fields */ if (matcher->match_criteria & DR_MATCHER_CRITERIA_OUTER) @@ -428,6 +429,16 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, if (ret) return ret; + /* Optimize RX pipe by reducing source port match, since + * the FDB RX part is connected only to the wire. + */ + if (dmn->type == MLX5DR_DOMAIN_TYPE_FDB && + rx && mask.misc.source_port) { + mask.misc.source_port = 0; + mask.misc.source_eswitch_owner_vhca_id = 0; + allow_empty_match = true; + } + /* Outer */ if (matcher->match_criteria & (DR_MATCHER_CRITERIA_OUTER | DR_MATCHER_CRITERIA_MISC | @@ -619,7 +630,8 @@ static int dr_matcher_set_ste_builders(struct mlx5dr_matcher *matcher, } /* Empty matcher, takes all */ - if (matcher->match_criteria == DR_MATCHER_CRITERIA_EMPTY) + if ((!idx && allow_empty_match) || + matcher->match_criteria == DR_MATCHER_CRITERIA_EMPTY) mlx5dr_ste_build_empty_always_hit(&sb[idx++], rx); if (idx == 0) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c index 43356fad53de..aca80efc28fa 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c @@ -81,6 +81,7 @@ dr_rule_create_collision_entry(struct mlx5dr_matcher *matcher, } ste->ste_chain_location = orig_ste->ste_chain_location; + ste->htbl->pointing_ste = orig_ste->htbl->pointing_ste; /* In collision entry, all members share the same miss_list_head */ ste->htbl->miss_list = mlx5dr_ste_get_miss_list(orig_ste); @@ -185,6 +186,9 @@ dr_rule_rehash_handle_collision(struct mlx5dr_matcher *matcher, if (!new_ste) return NULL; + /* Update collision pointing STE */ + new_ste->htbl->pointing_ste = col_ste->htbl->pointing_ste; + /* In collision entry, all members share the same miss_list_head */ new_ste->htbl->miss_list = mlx5dr_ste_get_miss_list(col_ste); @@ -212,7 +216,7 @@ static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher, new_ste->next_htbl = cur_ste->next_htbl; new_ste->ste_chain_location = cur_ste->ste_chain_location; - if (!mlx5dr_ste_is_last_in_rule(nic_matcher, new_ste->ste_chain_location)) + if (new_ste->next_htbl) new_ste->next_htbl->pointing_ste = new_ste; /* We need to copy the refcount since this ste @@ -220,10 +224,8 @@ static void dr_rule_rehash_copy_ste_ctrl(struct mlx5dr_matcher *matcher, */ new_ste->refcount = cur_ste->refcount; - /* Link old STEs rule_mem list to the new ste */ - mlx5dr_rule_update_rule_member(cur_ste, new_ste); - INIT_LIST_HEAD(&new_ste->rule_list); - list_splice_tail_init(&cur_ste->rule_list, &new_ste->rule_list); + /* Link old STEs rule to the new ste */ + mlx5dr_rule_set_last_member(cur_ste->rule_rx_tx, new_ste, false); } static struct mlx5dr_ste * @@ -404,7 +406,7 @@ dr_rule_rehash_htbl(struct mlx5dr_rule *rule, info.miss_icm_addr = nic_matcher->e_anchor->chunk->icm_addr; mlx5dr_ste_set_formatted_ste(dmn->ste_ctx, dmn->info.caps.gvmi, - nic_dmn, + nic_dmn->type, new_htbl, formatted_ste, &info); @@ -581,34 +583,66 @@ free_action_members: return -ENOMEM; } -/* While the pointer of ste is no longer valid, like while moving ste to be - * the first in the miss_list, and to be in the origin table, - * all rule-members that are attached to this ste should update their ste member - * to the new pointer - */ -void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *ste, - struct mlx5dr_ste *new_ste) +void mlx5dr_rule_set_last_member(struct mlx5dr_rule_rx_tx *nic_rule, + struct mlx5dr_ste *ste, + bool force) { - struct mlx5dr_rule_member *rule_mem; + /* Update rule member is usually done for the last STE or during rule + * creation to recover from mid-creation failure (for this peruse the + * force flag is used) + */ + if (ste->next_htbl && !force) + return; - list_for_each_entry(rule_mem, &ste->rule_list, use_ste_list) - rule_mem->ste = new_ste; + /* Update is required since each rule keeps track of its last STE */ + ste->rule_rx_tx = nic_rule; + nic_rule->last_rule_ste = ste; +} + +static struct mlx5dr_ste *dr_rule_get_pointed_ste(struct mlx5dr_ste *curr_ste) +{ + struct mlx5dr_ste *first_ste; + + first_ste = list_first_entry(mlx5dr_ste_get_miss_list(curr_ste), + struct mlx5dr_ste, miss_list_node); + + return first_ste->htbl->pointing_ste; +} + +int mlx5dr_rule_get_reverse_rule_members(struct mlx5dr_ste **ste_arr, + struct mlx5dr_ste *curr_ste, + int *num_of_stes) +{ + bool first = false; + + *num_of_stes = 0; + + if (!curr_ste) + return -ENOENT; + + /* Iterate from last to first */ + while (!first) { + first = curr_ste->ste_chain_location == 1; + ste_arr[*num_of_stes] = curr_ste; + *num_of_stes += 1; + curr_ste = dr_rule_get_pointed_ste(curr_ste); + } + + return 0; } static void dr_rule_clean_rule_members(struct mlx5dr_rule *rule, struct mlx5dr_rule_rx_tx *nic_rule) { - struct mlx5dr_rule_member *rule_mem; - struct mlx5dr_rule_member *tmp_mem; + struct mlx5dr_ste *ste_arr[DR_RULE_MAX_STES + DR_ACTION_MAX_STES]; + struct mlx5dr_ste *curr_ste = nic_rule->last_rule_ste; + int i; - if (list_empty(&nic_rule->rule_members_list)) + if (mlx5dr_rule_get_reverse_rule_members(ste_arr, curr_ste, &i)) return; - list_for_each_entry_safe(rule_mem, tmp_mem, &nic_rule->rule_members_list, list) { - list_del(&rule_mem->list); - list_del(&rule_mem->use_ste_list); - mlx5dr_ste_put(rule_mem->ste, rule->matcher, nic_rule->nic_matcher); - kvfree(rule_mem); - } + + while (i--) + mlx5dr_ste_put(ste_arr[i], rule->matcher, nic_rule->nic_matcher); } static u16 dr_get_bits_per_mask(u16 byte_mask) @@ -628,43 +662,25 @@ static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl, struct mlx5dr_domain_rx_tx *nic_dmn) { struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl; + int threshold; if (dmn->info.max_log_sw_icm_sz <= htbl->chunk_size) return false; - if (!ctrl->may_grow) + if (!mlx5dr_ste_htbl_may_grow(htbl)) return false; if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk_size) return false; - if (ctrl->num_of_collisions >= ctrl->increase_threshold && - (ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= ctrl->increase_threshold) + threshold = mlx5dr_ste_htbl_increase_threshold(htbl); + if (ctrl->num_of_collisions >= threshold && + (ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= threshold) return true; return false; } -static int dr_rule_add_member(struct mlx5dr_rule_rx_tx *nic_rule, - struct mlx5dr_ste *ste) -{ - struct mlx5dr_rule_member *rule_mem; - - rule_mem = kvzalloc(sizeof(*rule_mem), GFP_KERNEL); - if (!rule_mem) - return -ENOMEM; - - INIT_LIST_HEAD(&rule_mem->list); - INIT_LIST_HEAD(&rule_mem->use_ste_list); - - rule_mem->ste = ste; - list_add_tail(&rule_mem->list, &nic_rule->rule_members_list); - - list_add_tail(&rule_mem->use_ste_list, &ste->rule_list); - - return 0; -} - static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule, struct mlx5dr_rule_rx_tx *nic_rule, struct list_head *send_ste_list, @@ -679,15 +695,13 @@ static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule, struct mlx5dr_domain *dmn = matcher->tbl->dmn; u8 *curr_hw_ste, *prev_hw_ste; struct mlx5dr_ste *action_ste; - int i, k, ret; + int i, k; /* Two cases: * 1. num_of_builders is equal to new_hw_ste_arr_sz, the action in the ste * 2. num_of_builders is less then new_hw_ste_arr_sz, new ste was added * to support the action. */ - if (num_of_builders == new_hw_ste_arr_sz) - return 0; for (i = num_of_builders, k = 0; i < new_hw_ste_arr_sz; i++, k++) { curr_hw_ste = hw_ste_arr + i * DR_STE_SIZE; @@ -700,6 +714,10 @@ static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule, mlx5dr_ste_get(action_ste); + action_ste->htbl->pointing_ste = last_ste; + last_ste->next_htbl = action_ste->htbl; + last_ste = action_ste; + /* While free ste we go over the miss list, so add this ste to the list */ list_add_tail(&action_ste->miss_list_node, mlx5dr_ste_get_miss_list(action_ste)); @@ -713,21 +731,19 @@ static int dr_rule_handle_action_stes(struct mlx5dr_rule *rule, mlx5dr_ste_set_hit_addr_by_next_htbl(dmn->ste_ctx, prev_hw_ste, action_ste->htbl); - ret = dr_rule_add_member(nic_rule, action_ste); - if (ret) { - mlx5dr_dbg(dmn, "Failed adding rule member\n"); - goto free_ste_info; - } + + mlx5dr_rule_set_last_member(nic_rule, action_ste, true); + mlx5dr_send_fill_and_append_ste_send_info(action_ste, DR_STE_SIZE, 0, curr_hw_ste, ste_info_arr[k], send_ste_list, false); } + last_ste->next_htbl = NULL; + return 0; -free_ste_info: - kfree(ste_info_arr[k]); err_exit: mlx5dr_ste_put(action_ste, matcher, nic_matcher); return -ENOMEM; @@ -846,9 +862,9 @@ again: new_htbl = dr_rule_rehash(rule, nic_rule, cur_htbl, ste_location, send_ste_list); if (!new_htbl) { - mlx5dr_htbl_put(cur_htbl); mlx5dr_err(dmn, "Failed creating rehash table, htbl-log_size: %d\n", cur_htbl->chunk_size); + mlx5dr_htbl_put(cur_htbl); } else { cur_htbl = new_htbl; } @@ -1015,12 +1031,12 @@ static enum mlx5dr_ipv dr_rule_get_ipv(struct mlx5dr_match_spec *spec) } static bool dr_rule_skip(enum mlx5dr_domain_type domain, - enum mlx5dr_ste_entry_type ste_type, + enum mlx5dr_domain_nic_type nic_type, struct mlx5dr_match_param *mask, struct mlx5dr_match_param *value, u32 flow_source) { - bool rx = ste_type == MLX5DR_STE_TYPE_RX; + bool rx = nic_type == DR_DOMAIN_NIC_TYPE_RX; if (domain != MLX5DR_DOMAIN_TYPE_FDB) return false; @@ -1065,9 +1081,7 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule, nic_matcher = nic_rule->nic_matcher; nic_dmn = nic_matcher->nic_tbl->nic_dmn; - INIT_LIST_HEAD(&nic_rule->rule_members_list); - - if (dr_rule_skip(dmn->type, nic_dmn->ste_type, &matcher->mask, param, + if (dr_rule_skip(dmn->type, nic_dmn->type, &matcher->mask, param, rule->flow_source)) return 0; @@ -1121,14 +1135,8 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule, cur_htbl = ste->next_htbl; - /* Keep all STEs in the rule struct */ - ret = dr_rule_add_member(nic_rule, ste); - if (ret) { - mlx5dr_dbg(dmn, "Failed adding rule member index %d\n", i); - goto free_ste; - } - mlx5dr_ste_get(ste); + mlx5dr_rule_set_last_member(nic_rule, ste, true); } /* Connect actions */ @@ -1153,8 +1161,6 @@ dr_rule_create_rule_nic(struct mlx5dr_rule *rule, return 0; -free_ste: - mlx5dr_ste_put(ste, matcher, nic_matcher); free_rule: dr_rule_clean_rule_members(rule, nic_rule); /* Clean all ste_info's */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c index 9df0e73d1c35..bfb14b4b1906 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_send.c @@ -325,10 +325,14 @@ static int dr_handle_pending_wc(struct mlx5dr_domain *dmn, do { ne = dr_poll_cq(send_ring->cq, 1); - if (ne < 0) + if (unlikely(ne < 0)) { + mlx5_core_warn_once(dmn->mdev, "SMFS QPN 0x%x is disabled/limited", + send_ring->qp->qpn); + send_ring->err_state = true; return ne; - else if (ne == 1) + } else if (ne == 1) { send_ring->pending_wqe -= send_ring->signal_th; + } } while (is_drain && send_ring->pending_wqe); return 0; @@ -361,6 +365,14 @@ static int dr_postsend_icm_data(struct mlx5dr_domain *dmn, u32 buff_offset; int ret; + if (unlikely(dmn->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR || + send_ring->err_state)) { + mlx5_core_dbg_once(dmn->mdev, + "Skipping post send: QP err state: %d, device state: %d\n", + send_ring->err_state, dmn->mdev->state); + return 0; + } + spin_lock(&send_ring->lock); ret = dr_handle_pending_wc(dmn, send_ring); @@ -620,6 +632,7 @@ static int dr_cmd_modify_qp_rtr2rts(struct mlx5_core_dev *mdev, MLX5_SET(qpc, qpc, retry_count, attr->retry_cnt); MLX5_SET(qpc, qpc, rnr_retry, attr->rnr_retry); + MLX5_SET(qpc, qpc, primary_address_path.ack_timeout, 0x8); /* ~1ms */ MLX5_SET(rtr2rts_qp_in, in, opcode, MLX5_CMD_OP_RTR2RTS_QP); MLX5_SET(rtr2rts_qp_in, in, qpn, dr_qp->qpn); @@ -789,7 +802,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev, cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context); MLX5_SET(cqc, cqc, log_cq_size, ilog2(ncqe)); - MLX5_SET(cqc, cqc, c_eqn, eqn); + MLX5_SET(cqc, cqc, c_eqn_or_apu_element, eqn); MLX5_SET(cqc, cqc, uar_page, uar->index); MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c index 9b1529137cba..1cdfe4fccc7a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c @@ -172,9 +172,6 @@ static void dr_ste_replace(struct mlx5dr_ste *dst, struct mlx5dr_ste *src) dst->next_htbl->pointing_ste = dst; dst->refcount = src->refcount; - - INIT_LIST_HEAD(&dst->rule_list); - list_splice_tail_init(&src->rule_list, &dst->rule_list); } /* Free ste which is the head and the only one in miss_list */ @@ -233,12 +230,12 @@ dr_ste_replace_head_ste(struct mlx5dr_matcher_rx_tx *nic_matcher, /* Remove from the miss_list the next_ste before copy */ list_del_init(&next_ste->miss_list_node); - /* All rule-members that use next_ste should know about that */ - mlx5dr_rule_update_rule_member(next_ste, ste); - /* Move data from next into ste */ dr_ste_replace(ste, next_ste); + /* Update the rule on STE change */ + mlx5dr_rule_set_last_member(next_ste->rule_rx_tx, ste, false); + /* Copy all 64 hw_ste bytes */ memcpy(hw_ste, ste->hw_ste, DR_STE_SIZE_REDUCED); sb_idx = ste->ste_chain_location - 1; @@ -382,14 +379,15 @@ void mlx5dr_ste_prepare_for_postsend(struct mlx5dr_ste_ctx *ste_ctx, /* Init one ste as a pattern for ste data array */ void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx, u16 gvmi, - struct mlx5dr_domain_rx_tx *nic_dmn, + enum mlx5dr_domain_nic_type nic_type, struct mlx5dr_ste_htbl *htbl, u8 *formatted_ste, struct mlx5dr_htbl_connect_info *connect_info) { + bool is_rx = nic_type == DR_DOMAIN_NIC_TYPE_RX; struct mlx5dr_ste ste = {}; - ste_ctx->ste_init(formatted_ste, htbl->lu_type, nic_dmn->ste_type, gvmi); + ste_ctx->ste_init(formatted_ste, htbl->lu_type, is_rx, gvmi); ste.hw_ste = formatted_ste; if (connect_info->type == CONNECT_HIT) @@ -408,7 +406,7 @@ int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn, mlx5dr_ste_set_formatted_ste(dmn->ste_ctx, dmn->info.caps.gvmi, - nic_dmn, + nic_dmn->type, htbl, formatted_ste, connect_info); @@ -466,21 +464,6 @@ free_table: return -ENOENT; } -static void dr_ste_set_ctrl(struct mlx5dr_ste_htbl *htbl) -{ - struct mlx5dr_ste_htbl_ctrl *ctrl = &htbl->ctrl; - int num_of_entries; - - htbl->ctrl.may_grow = true; - - if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask) - htbl->ctrl.may_grow = false; - - /* Threshold is 50%, one is added to table of size 1 */ - num_of_entries = mlx5dr_icm_pool_chunk_size_to_entries(htbl->chunk_size); - ctrl->increase_threshold = (num_of_entries + 1) / 2; -} - struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool, enum mlx5dr_icm_chunk_size chunk_size, u16 lu_type, u16 byte_mask) @@ -513,11 +496,9 @@ struct mlx5dr_ste_htbl *mlx5dr_ste_htbl_alloc(struct mlx5dr_icm_pool *pool, ste->refcount = 0; INIT_LIST_HEAD(&ste->miss_list_node); INIT_LIST_HEAD(&htbl->miss_list[i]); - INIT_LIST_HEAD(&ste->rule_list); } htbl->chunk_size = chunk_size; - dr_ste_set_ctrl(htbl); return htbl; out_free_htbl: @@ -649,6 +630,7 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher, u8 *ste_arr) { struct mlx5dr_domain_rx_tx *nic_dmn = nic_matcher->nic_tbl->nic_dmn; + bool is_rx = nic_dmn->type == DR_DOMAIN_NIC_TYPE_RX; struct mlx5dr_domain *dmn = matcher->tbl->dmn; struct mlx5dr_ste_ctx *ste_ctx = dmn->ste_ctx; struct mlx5dr_ste_build *sb; @@ -663,7 +645,7 @@ int mlx5dr_ste_build_ste_arr(struct mlx5dr_matcher *matcher, for (i = 0; i < nic_matcher->num_of_builders; i++) { ste_ctx->ste_init(ste_arr, sb->lu_type, - nic_dmn->ste_type, + is_rx, dmn->info.caps.gvmi); mlx5dr_ste_set_bit_mask(ste_arr, sb->bit_mask); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h index 12a8bbbf944b..2d52d065dc8b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.h @@ -146,7 +146,7 @@ struct mlx5dr_ste_ctx { /* Getters and Setters */ void (*ste_init)(u8 *hw_ste_p, u16 lu_type, - u8 entry_type, u16 gvmi); + bool is_rx, u16 gvmi); void (*set_next_lu_type)(u8 *hw_ste_p, u16 lu_type); u16 (*get_next_lu_type)(u8 *hw_ste_p); void (*set_miss_addr)(u8 *hw_ste_p, u64 miss_addr); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c index e4dd4eed5aee..9c704bce3c12 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v0.c @@ -8,6 +8,12 @@ #define SVLAN_ETHERTYPE 0x88a8 #define DR_STE_ENABLE_FLOW_TAG BIT(31) +enum dr_ste_v0_entry_type { + DR_STE_TYPE_TX = 1, + DR_STE_TYPE_RX = 2, + DR_STE_TYPE_MODIFY_PKT = 6, +}; + enum dr_ste_v0_action_tunl { DR_STE_TUNL_ACTION_NONE = 0, DR_STE_TUNL_ACTION_ENABLE = 1, @@ -292,8 +298,8 @@ static void dr_ste_v0_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size) MLX5_SET(ste_general, hw_ste_p, next_table_base_31_5_size, index); } -static void dr_ste_v0_init(u8 *hw_ste_p, u16 lu_type, - u8 entry_type, u16 gvmi) +static void dr_ste_v0_init_full(u8 *hw_ste_p, u16 lu_type, + enum dr_ste_v0_entry_type entry_type, u16 gvmi) { dr_ste_v0_set_entry_type(hw_ste_p, entry_type); dr_ste_v0_set_lu_type(hw_ste_p, lu_type); @@ -307,6 +313,15 @@ static void dr_ste_v0_init(u8 *hw_ste_p, u16 lu_type, MLX5_SET(ste_rx_steering_mult, hw_ste_p, miss_address_63_48, gvmi); } +static void dr_ste_v0_init(u8 *hw_ste_p, u16 lu_type, + bool is_rx, u16 gvmi) +{ + enum dr_ste_v0_entry_type entry_type; + + entry_type = is_rx ? DR_STE_TYPE_RX : DR_STE_TYPE_TX; + dr_ste_v0_init_full(hw_ste_p, lu_type, entry_type, gvmi); +} + static void dr_ste_v0_rx_set_flow_tag(u8 *hw_ste_p, u32 flow_tag) { MLX5_SET(ste_rx_steering_mult, hw_ste_p, qp_list_pointer, @@ -380,13 +395,13 @@ static void dr_ste_v0_set_rewrite_actions(u8 *hw_ste_p, u16 num_of_actions, static void dr_ste_v0_arr_init_next(u8 **last_ste, u32 *added_stes, - enum mlx5dr_ste_entry_type entry_type, + enum dr_ste_v0_entry_type entry_type, u16 gvmi) { (*added_stes)++; *last_ste += DR_STE_SIZE; - dr_ste_v0_init(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE, - entry_type, gvmi); + dr_ste_v0_init_full(*last_ste, MLX5DR_STE_LU_TYPE_DONT_CARE, + entry_type, gvmi); } static void @@ -404,7 +419,7 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn, * modify headers for outer headers only */ if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) { - dr_ste_v0_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT); + dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT); dr_ste_v0_set_rewrite_actions(last_ste, attr->modify_actions, attr->modify_index); @@ -417,7 +432,7 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn, if (i || action_type_set[DR_ACTION_TYP_MODIFY_HDR]) dr_ste_v0_arr_init_next(&last_ste, added_stes, - MLX5DR_STE_TYPE_TX, + DR_STE_TYPE_TX, attr->gvmi); dr_ste_v0_set_tx_push_vlan(last_ste, @@ -435,7 +450,7 @@ dr_ste_v0_set_actions_tx(struct mlx5dr_domain *dmn, action_type_set[DR_ACTION_TYP_PUSH_VLAN]) dr_ste_v0_arr_init_next(&last_ste, added_stes, - MLX5DR_STE_TYPE_TX, + DR_STE_TYPE_TX, attr->gvmi); dr_ste_v0_set_tx_encap(last_ste, @@ -469,7 +484,7 @@ dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn, dr_ste_v0_set_counter_id(last_ste, attr->ctr_id); if (action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) { - dr_ste_v0_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT); + dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT); dr_ste_v0_set_rx_decap_l3(last_ste, attr->decap_with_vlan); dr_ste_v0_set_rewrite_actions(last_ste, attr->decap_actions, @@ -488,7 +503,7 @@ dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn, action_type_set[DR_ACTION_TYP_TNL_L3_TO_L2]) dr_ste_v0_arr_init_next(&last_ste, added_stes, - MLX5DR_STE_TYPE_RX, + DR_STE_TYPE_RX, attr->gvmi); dr_ste_v0_set_rx_pop_vlan(last_ste); @@ -496,13 +511,13 @@ dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn, } if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) { - if (dr_ste_v0_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT) + if (dr_ste_v0_get_entry_type(last_ste) == DR_STE_TYPE_MODIFY_PKT) dr_ste_v0_arr_init_next(&last_ste, added_stes, - MLX5DR_STE_TYPE_MODIFY_PKT, + DR_STE_TYPE_MODIFY_PKT, attr->gvmi); else - dr_ste_v0_set_entry_type(last_ste, MLX5DR_STE_TYPE_MODIFY_PKT); + dr_ste_v0_set_entry_type(last_ste, DR_STE_TYPE_MODIFY_PKT); dr_ste_v0_set_rewrite_actions(last_ste, attr->modify_actions, @@ -510,10 +525,10 @@ dr_ste_v0_set_actions_rx(struct mlx5dr_domain *dmn, } if (action_type_set[DR_ACTION_TYP_TAG]) { - if (dr_ste_v0_get_entry_type(last_ste) == MLX5DR_STE_TYPE_MODIFY_PKT) + if (dr_ste_v0_get_entry_type(last_ste) == DR_STE_TYPE_MODIFY_PKT) dr_ste_v0_arr_init_next(&last_ste, added_stes, - MLX5DR_STE_TYPE_RX, + DR_STE_TYPE_RX, attr->gvmi); dr_ste_v0_rx_set_flow_tag(last_ste, attr->flow_tag); @@ -1157,6 +1172,7 @@ dr_ste_v0_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value, u8 *tag) { struct mlx5dr_match_spec *spec = sb->inner ? &value->inner : &value->outer; + struct mlx5dr_match_misc *misc = &value->misc; DR_STE_SET_TAG(eth_l4, tag, dst_port, spec, tcp_dport); DR_STE_SET_TAG(eth_l4, tag, src_port, spec, tcp_sport); @@ -1168,6 +1184,11 @@ dr_ste_v0_build_eth_ipv6_l3_l4_tag(struct mlx5dr_match_param *value, DR_STE_SET_TAG(eth_l4, tag, ecn, spec, ip_ecn); DR_STE_SET_TAG(eth_l4, tag, ipv6_hop_limit, spec, ttl_hoplimit); + if (sb->inner) + DR_STE_SET_TAG(eth_l4, tag, flow_label, misc, inner_ipv6_flow_label); + else + DR_STE_SET_TAG(eth_l4, tag, flow_label, misc, outer_ipv6_flow_label); + if (spec->tcp_flags) { DR_STE_SET_TCP_FLAGS(eth_l4, tag, spec); spec->tcp_flags = 0; @@ -1772,7 +1793,7 @@ dr_ste_v0_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb, static int dr_ste_v0_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *value, struct mlx5dr_ste_build *sb, - uint8_t *tag) + u8 *tag) { struct mlx5dr_match_misc3 *misc3 = &value->misc3; @@ -1802,7 +1823,7 @@ static void dr_ste_v0_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *s static int dr_ste_v0_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value, struct mlx5dr_ste_build *sb, - uint8_t *tag) + u8 *tag) { if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_0)) DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3); @@ -1829,7 +1850,7 @@ dr_ste_v0_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb, static int dr_ste_v0_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value, struct mlx5dr_ste_build *sb, - uint8_t *tag) + u8 *tag) { if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_0)) DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c index 4aaca8eb7597..b2481c99da79 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste_v1.c @@ -322,7 +322,7 @@ static void dr_ste_v1_set_hit_addr(u8 *hw_ste_p, u64 icm_addr, u32 ht_size) } static void dr_ste_v1_init(u8 *hw_ste_p, u16 lu_type, - u8 entry_type, u16 gvmi) + bool is_rx, u16 gvmi) { dr_ste_v1_set_lu_type(hw_ste_p, lu_type); dr_ste_v1_set_next_lu_type(hw_ste_p, MLX5DR_STE_LU_TYPE_DONT_CARE); @@ -402,8 +402,23 @@ static void dr_ste_v1_set_insert_hdr(u8 *hw_ste_p, u8 *d_action, dr_ste_v1_set_reparse(hw_ste_p); } -static void dr_ste_v1_set_tx_push_vlan(u8 *hw_ste_p, u8 *d_action, - u32 vlan_hdr) +static void dr_ste_v1_set_remove_hdr(u8 *hw_ste_p, u8 *s_action, + u8 anchor, u8 offset, + int size) +{ + MLX5_SET(ste_single_action_remove_header_size_v1, s_action, + action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE); + MLX5_SET(ste_single_action_remove_header_size_v1, s_action, start_anchor, anchor); + + /* The hardware expects here size and offset in words (2 byte) */ + MLX5_SET(ste_single_action_remove_header_size_v1, s_action, remove_size, size / 2); + MLX5_SET(ste_single_action_remove_header_size_v1, s_action, start_offset, offset / 2); + + dr_ste_v1_set_reparse(hw_ste_p); +} + +static void dr_ste_v1_set_push_vlan(u8 *hw_ste_p, u8 *d_action, + u32 vlan_hdr) { MLX5_SET(ste_double_action_insert_with_inline_v1, d_action, action_id, DR_STE_V1_ACTION_ID_INSERT_INLINE); @@ -416,7 +431,7 @@ static void dr_ste_v1_set_tx_push_vlan(u8 *hw_ste_p, u8 *d_action, dr_ste_v1_set_reparse(hw_ste_p); } -static void dr_ste_v1_set_rx_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num) +static void dr_ste_v1_set_pop_vlan(u8 *hw_ste_p, u8 *s_action, u8 vlans_num) { MLX5_SET(ste_single_action_remove_header_size_v1, s_action, action_id, DR_STE_V1_ACTION_ID_REMOVE_BY_SIZE); @@ -503,13 +518,28 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, { u8 *action = MLX5_ADDR_OF(ste_match_bwc_v1, last_ste, action); u8 action_sz = DR_STE_ACTION_DOUBLE_SZ; + bool allow_modify_hdr = true; bool allow_encap = true; + if (action_type_set[DR_ACTION_TYP_POP_VLAN]) { + if (action_sz < DR_STE_ACTION_SINGLE_SZ) { + dr_ste_v1_arr_init_next_match(&last_ste, added_stes, + attr->gvmi); + action = MLX5_ADDR_OF(ste_mask_and_match_v1, + last_ste, action); + action_sz = DR_STE_ACTION_TRIPLE_SZ; + } + dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count); + action_sz -= DR_STE_ACTION_SINGLE_SZ; + action += DR_STE_ACTION_SINGLE_SZ; + allow_modify_hdr = false; + } + if (action_type_set[DR_ACTION_TYP_CTR]) dr_ste_v1_set_counter_id(last_ste, attr->ctr_id); if (action_type_set[DR_ACTION_TYP_MODIFY_HDR]) { - if (action_sz < DR_STE_ACTION_DOUBLE_SZ) { + if (!allow_modify_hdr || action_sz < DR_STE_ACTION_DOUBLE_SZ) { dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi); action = MLX5_ADDR_OF(ste_mask_and_match_v1, @@ -534,7 +564,8 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, action_sz = DR_STE_ACTION_TRIPLE_SZ; allow_encap = true; } - dr_ste_v1_set_tx_push_vlan(last_ste, action, attr->vlans.headers[i]); + dr_ste_v1_set_push_vlan(last_ste, action, + attr->vlans.headers[i]); action_sz -= DR_STE_ACTION_DOUBLE_SZ; action += DR_STE_ACTION_DOUBLE_SZ; } @@ -579,6 +610,18 @@ static void dr_ste_v1_set_actions_tx(struct mlx5dr_domain *dmn, attr->reformat.size); action_sz -= DR_STE_ACTION_DOUBLE_SZ; action += DR_STE_ACTION_DOUBLE_SZ; + } else if (action_type_set[DR_ACTION_TYP_REMOVE_HDR]) { + if (action_sz < DR_STE_ACTION_SINGLE_SZ) { + dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi); + action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action); + action_sz = DR_STE_ACTION_TRIPLE_SZ; + } + dr_ste_v1_set_remove_hdr(last_ste, action, + attr->reformat.param_0, + attr->reformat.param_1, + attr->reformat.size); + action_sz -= DR_STE_ACTION_SINGLE_SZ; + action += DR_STE_ACTION_SINGLE_SZ; } dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi); @@ -635,7 +678,7 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, allow_ctr = false; } - dr_ste_v1_set_rx_pop_vlan(last_ste, action, attr->vlans.count); + dr_ste_v1_set_pop_vlan(last_ste, action, attr->vlans.count); action_sz -= DR_STE_ACTION_SINGLE_SZ; action += DR_STE_ACTION_SINGLE_SZ; } @@ -656,6 +699,26 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, action += DR_STE_ACTION_DOUBLE_SZ; } + if (action_type_set[DR_ACTION_TYP_PUSH_VLAN]) { + int i; + + for (i = 0; i < attr->vlans.count; i++) { + if (action_sz < DR_STE_ACTION_DOUBLE_SZ || + !allow_modify_hdr) { + dr_ste_v1_arr_init_next_match(&last_ste, + added_stes, + attr->gvmi); + action = MLX5_ADDR_OF(ste_mask_and_match_v1, + last_ste, action); + action_sz = DR_STE_ACTION_TRIPLE_SZ; + } + dr_ste_v1_set_push_vlan(last_ste, action, + attr->vlans.headers[i]); + action_sz -= DR_STE_ACTION_DOUBLE_SZ; + action += DR_STE_ACTION_DOUBLE_SZ; + } + } + if (action_type_set[DR_ACTION_TYP_CTR]) { /* Counter action set after decap and before insert_hdr * to exclude decaped / encaped header respectively. @@ -714,6 +777,20 @@ static void dr_ste_v1_set_actions_rx(struct mlx5dr_domain *dmn, action_sz -= DR_STE_ACTION_DOUBLE_SZ; action += DR_STE_ACTION_DOUBLE_SZ; allow_modify_hdr = false; + } else if (action_type_set[DR_ACTION_TYP_REMOVE_HDR]) { + if (action_sz < DR_STE_ACTION_SINGLE_SZ) { + dr_ste_v1_arr_init_next_match(&last_ste, added_stes, attr->gvmi); + action = MLX5_ADDR_OF(ste_mask_and_match_v1, last_ste, action); + action_sz = DR_STE_ACTION_TRIPLE_SZ; + allow_modify_hdr = true; + allow_ctr = true; + } + dr_ste_v1_set_remove_hdr(last_ste, action, + attr->reformat.param_0, + attr->reformat.param_1, + attr->reformat.size); + action_sz -= DR_STE_ACTION_SINGLE_SZ; + action += DR_STE_ACTION_SINGLE_SZ; } dr_ste_v1_set_hit_gvmi(last_ste, attr->hit_gvmi); @@ -1844,7 +1921,7 @@ dr_ste_v1_build_flex_parser_tnl_geneve_tlv_opt_init(struct mlx5dr_ste_build *sb, static int dr_ste_v1_build_flex_parser_tnl_gtpu_tag(struct mlx5dr_match_param *value, struct mlx5dr_ste_build *sb, - uint8_t *tag) + u8 *tag) { struct mlx5dr_match_misc3 *misc3 = &value->misc3; @@ -1868,7 +1945,7 @@ static void dr_ste_v1_build_flex_parser_tnl_gtpu_init(struct mlx5dr_ste_build *s static int dr_ste_v1_build_tnl_gtpu_flex_parser_0_tag(struct mlx5dr_match_param *value, struct mlx5dr_ste_build *sb, - uint8_t *tag) + u8 *tag) { if (dr_is_flex_parser_0_id(sb->caps->flex_parser_id_gtpu_dw_0)) DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3); @@ -1895,7 +1972,7 @@ dr_ste_v1_build_tnl_gtpu_flex_parser_0_init(struct mlx5dr_ste_build *sb, static int dr_ste_v1_build_tnl_gtpu_flex_parser_1_tag(struct mlx5dr_match_param *value, struct mlx5dr_ste_build *sb, - uint8_t *tag) + u8 *tag) { if (dr_is_flex_parser_1_id(sb->caps->flex_parser_id_gtpu_dw_0)) DR_STE_SET_FLEX_PARSER_FIELD(tag, gtpu_dw_0, sb->caps, &value->misc3); @@ -1960,7 +2037,9 @@ struct mlx5dr_ste_ctx ste_ctx_v1 = { .set_byte_mask = &dr_ste_v1_set_byte_mask, .get_byte_mask = &dr_ste_v1_get_byte_mask, /* Actions */ - .actions_caps = DR_STE_CTX_ACTION_CAP_RX_ENCAP, + .actions_caps = DR_STE_CTX_ACTION_CAP_TX_POP | + DR_STE_CTX_ACTION_CAP_RX_PUSH | + DR_STE_CTX_ACTION_CAP_RX_ENCAP, .set_actions_rx = &dr_ste_v1_set_actions_rx, .set_actions_tx = &dr_ste_v1_set_actions_tx, .modify_field_arr_sz = ARRAY_SIZE(dr_ste_v1_action_modify_field_arr), diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h index f5e93fa87aff..b20e8aabb861 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_types.h @@ -83,15 +83,14 @@ enum { DR_STE_SIZE_CTRL = 32, DR_STE_SIZE_TAG = 16, DR_STE_SIZE_MASK = 16, -}; - -enum { DR_STE_SIZE_REDUCED = DR_STE_SIZE - DR_STE_SIZE_MASK, }; enum mlx5dr_ste_ctx_action_cap { DR_STE_CTX_ACTION_CAP_NONE = 0, - DR_STE_CTX_ACTION_CAP_RX_ENCAP = 1 << 0, + DR_STE_CTX_ACTION_CAP_TX_POP = 1 << 0, + DR_STE_CTX_ACTION_CAP_RX_PUSH = 1 << 1, + DR_STE_CTX_ACTION_CAP_RX_ENCAP = 1 << 2, }; enum { @@ -124,6 +123,7 @@ enum mlx5dr_action_type { DR_ACTION_TYP_POP_VLAN, DR_ACTION_TYP_PUSH_VLAN, DR_ACTION_TYP_INSERT_HDR, + DR_ACTION_TYP_REMOVE_HDR, DR_ACTION_TYP_SAMPLER, DR_ACTION_TYP_MAX, }; @@ -140,6 +140,7 @@ struct mlx5dr_icm_buddy_mem; struct mlx5dr_ste_htbl; struct mlx5dr_match_param; struct mlx5dr_cmd_caps; +struct mlx5dr_rule_rx_tx; struct mlx5dr_matcher_rx_tx; struct mlx5dr_ste_ctx; @@ -151,14 +152,14 @@ struct mlx5dr_ste { /* attached to the miss_list head at each htbl entry */ struct list_head miss_list_node; - /* each rule member that uses this ste attached here */ - struct list_head rule_list; - /* this ste is member of htbl */ struct mlx5dr_ste_htbl *htbl; struct mlx5dr_ste_htbl *next_htbl; + /* The rule this STE belongs to */ + struct mlx5dr_rule_rx_tx *rule_rx_tx; + /* this ste is part of a rule, located in ste's chain */ u8 ste_chain_location; }; @@ -171,8 +172,6 @@ struct mlx5dr_ste_htbl_ctrl { /* total number of collisions entries attached to this table */ unsigned int num_of_collisions; - unsigned int increase_threshold; - u8 may_grow:1; }; struct mlx5dr_ste_htbl { @@ -804,10 +803,15 @@ struct mlx5dr_cmd_caps { u8 isolate_vl_tc:1; }; +enum mlx5dr_domain_nic_type { + DR_DOMAIN_NIC_TYPE_RX, + DR_DOMAIN_NIC_TYPE_TX, +}; + struct mlx5dr_domain_rx_tx { u64 drop_icm_addr; u64 default_icm_addr; - enum mlx5dr_ste_entry_type ste_type; + enum mlx5dr_domain_nic_type type; struct mutex mutex; /* protect rx/tx domain */ }; @@ -885,14 +889,6 @@ struct mlx5dr_matcher { struct mlx5dv_flow_matcher *dv_matcher; }; -struct mlx5dr_rule_member { - struct mlx5dr_ste *ste; - /* attached to mlx5dr_rule via this */ - struct list_head list; - /* attached to mlx5dr_ste via this */ - struct list_head use_ste_list; -}; - struct mlx5dr_ste_action_modify_field { u16 hw_field; u8 start; @@ -993,8 +989,8 @@ struct mlx5dr_htbl_connect_info { }; struct mlx5dr_rule_rx_tx { - struct list_head rule_members_list; struct mlx5dr_matcher_rx_tx *nic_matcher; + struct mlx5dr_ste *last_rule_ste; }; struct mlx5dr_rule { @@ -1005,8 +1001,12 @@ struct mlx5dr_rule { u32 flow_source; }; -void mlx5dr_rule_update_rule_member(struct mlx5dr_ste *new_ste, - struct mlx5dr_ste *ste); +void mlx5dr_rule_set_last_member(struct mlx5dr_rule_rx_tx *nic_rule, + struct mlx5dr_ste *ste, + bool force); +int mlx5dr_rule_get_reverse_rule_members(struct mlx5dr_ste **ste_arr, + struct mlx5dr_ste *curr_ste, + int *num_of_stes); struct mlx5dr_icm_chunk { struct mlx5dr_icm_buddy_mem *buddy_mem; @@ -1083,6 +1083,25 @@ mlx5dr_icm_pool_chunk_size_to_byte(enum mlx5dr_icm_chunk_size chunk_size, return entry_size * num_of_entries; } +static inline int +mlx5dr_ste_htbl_increase_threshold(struct mlx5dr_ste_htbl *htbl) +{ + int num_of_entries = + mlx5dr_icm_pool_chunk_size_to_entries(htbl->chunk_size); + + /* Threshold is 50%, one is added to table of size 1 */ + return (num_of_entries + 1) / 2; +} + +static inline bool +mlx5dr_ste_htbl_may_grow(struct mlx5dr_ste_htbl *htbl) +{ + if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask) + return false; + + return true; +} + static inline struct mlx5dr_cmd_vport_cap * mlx5dr_get_vport_cap(struct mlx5dr_cmd_caps *caps, u32 vport) { @@ -1216,7 +1235,7 @@ int mlx5dr_ste_htbl_init_and_postsend(struct mlx5dr_domain *dmn, bool update_hw_ste); void mlx5dr_ste_set_formatted_ste(struct mlx5dr_ste_ctx *ste_ctx, u16 gvmi, - struct mlx5dr_domain_rx_tx *nic_dmn, + enum mlx5dr_domain_nic_type nic_type, struct mlx5dr_ste_htbl *htbl, u8 *formatted_ste, struct mlx5dr_htbl_connect_info *connect_info); @@ -1282,6 +1301,7 @@ struct mlx5dr_send_ring { u8 sync_buff[MIN_READ_SYNC]; struct mlx5dr_mr *sync_mr; spinlock_t lock; /* Protect the data path of the send ring */ + bool err_state; /* send_ring is not usable in err state */ }; int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn); @@ -1333,6 +1353,7 @@ struct mlx5dr_cmd_fte_info { u32 *val; struct mlx5_flow_act action; struct mlx5dr_cmd_flow_destination_hw_info *dest_arr; + bool ignore_flow_level; }; int mlx5dr_cmd_set_fte(struct mlx5_core_dev *dev, @@ -1362,7 +1383,8 @@ int mlx5dr_fw_create_md_tbl(struct mlx5dr_domain *dmn, int num_dest, bool reformat_req, u32 *tbl_id, - u32 *group_id); + u32 *group_id, + bool ignore_flow_level); void mlx5dr_fw_destroy_md_tbl(struct mlx5dr_domain *dmn, u32 tbl_id, u32 group_id); #endif /* _DR_TYPES_H_ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c index d5926dd7e972..7e58f4e594b7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c @@ -133,6 +133,9 @@ static int mlx5_cmd_dr_modify_flow_table(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, struct mlx5_flow_table *next_ft) { + if (mlx5_dr_is_fw_table(ft->flags)) + return mlx5_fs_cmd_get_fw_cmds()->modify_flow_table(ns, ft, next_ft); + return set_miss_action(ns, ft, next_ft); } @@ -487,9 +490,13 @@ static int mlx5_cmd_dr_create_fte(struct mlx5_flow_root_namespace *ns, actions[num_actions++] = term_actions->dest; } else if (num_term_actions > 1) { + bool ignore_flow_level = + !!(fte->action.flags & FLOW_ACT_IGNORE_FLOW_LEVEL); + tmp_action = mlx5dr_action_create_mult_dest_tbl(domain, term_actions, - num_term_actions); + num_term_actions, + ignore_flow_level); if (!tmp_action) { err = -EOPNOTSUPP; goto free_actions; @@ -557,6 +564,9 @@ static int mlx5_cmd_dr_packet_reformat_alloc(struct mlx5_flow_root_namespace *ns case MLX5_REFORMAT_TYPE_INSERT_HDR: dr_reformat = DR_ACTION_REFORMAT_TYP_INSERT_HDR; break; + case MLX5_REFORMAT_TYPE_REMOVE_HDR: + dr_reformat = DR_ACTION_REFORMAT_TYP_REMOVE_HDR; + break; default: mlx5_core_err(ns->dev, "Packet-reformat not supported(%d)\n", params->type); @@ -615,15 +625,6 @@ static void mlx5_cmd_dr_modify_header_dealloc(struct mlx5_flow_root_namespace *n mlx5dr_action_destroy(modify_hdr->action.dr_action); } -static int mlx5_cmd_dr_update_fte(struct mlx5_flow_root_namespace *ns, - struct mlx5_flow_table *ft, - struct mlx5_flow_group *group, - int modify_mask, - struct fs_fte *fte) -{ - return -EOPNOTSUPP; -} - static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_table *ft, struct fs_fte *fte) @@ -648,6 +649,36 @@ static int mlx5_cmd_dr_delete_fte(struct mlx5_flow_root_namespace *ns, return 0; } +static int mlx5_cmd_dr_update_fte(struct mlx5_flow_root_namespace *ns, + struct mlx5_flow_table *ft, + struct mlx5_flow_group *group, + int modify_mask, + struct fs_fte *fte) +{ + struct fs_fte fte_tmp = {}; + int ret; + + if (mlx5_dr_is_fw_table(ft->flags)) + return mlx5_fs_cmd_get_fw_cmds()->update_fte(ns, ft, group, modify_mask, fte); + + /* Backup current dr rule details */ + fte_tmp.fs_dr_rule = fte->fs_dr_rule; + memset(&fte->fs_dr_rule, 0, sizeof(struct mlx5_fs_dr_rule)); + + /* First add the new updated rule, then delete the old rule */ + ret = mlx5_cmd_dr_create_fte(ns, ft, group, fte); + if (ret) + goto restore_fte; + + ret = mlx5_cmd_dr_delete_fte(ns, ft, &fte_tmp); + WARN_ONCE(ret, "dr update fte duplicate rule deletion failed\n"); + return ret; + +restore_fte: + fte->fs_dr_rule = fte_tmp.fs_dr_rule; + return ret; +} + static int mlx5_cmd_dr_set_peer(struct mlx5_flow_root_namespace *ns, struct mlx5_flow_root_namespace *peer_ns) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h index 9643ee647f57..d2a937f69784 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5_ifc_dr.h @@ -8,12 +8,6 @@ enum { MLX5DR_STE_LU_TYPE_DONT_CARE = 0x0f, }; -enum mlx5dr_ste_entry_type { - MLX5DR_STE_TYPE_TX = 1, - MLX5DR_STE_TYPE_RX = 2, - MLX5DR_STE_TYPE_MODIFY_PKT = 6, -}; - struct mlx5_ifc_ste_general_bits { u8 entry_type[0x4]; u8 reserved_at_4[0x4]; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h index bbfe101d4e57..c5a8b1601999 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/mlx5dr.h @@ -27,6 +27,7 @@ enum mlx5dr_action_reformat_type { DR_ACTION_REFORMAT_TYP_TNL_L3_TO_L2, DR_ACTION_REFORMAT_TYP_L2_TO_TNL_L3, DR_ACTION_REFORMAT_TYP_INSERT_HDR, + DR_ACTION_REFORMAT_TYP_REMOVE_HDR, }; struct mlx5dr_match_parameters { @@ -94,7 +95,8 @@ mlx5dr_action_create_dest_vport(struct mlx5dr_domain *domain, struct mlx5dr_action * mlx5dr_action_create_mult_dest_tbl(struct mlx5dr_domain *dmn, struct mlx5dr_action_dest *dests, - u32 num_of_dests); + u32 num_of_dests, + bool ignore_flow_level); struct mlx5dr_action *mlx5dr_action_create_drop(void); diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c index a0a059e0154f..3e85b17f5857 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_main.c @@ -199,7 +199,7 @@ static int mlxbf_gige_stop(struct net_device *netdev) return 0; } -static int mlxbf_gige_do_ioctl(struct net_device *netdev, +static int mlxbf_gige_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { if (!(netif_running(netdev))) @@ -253,7 +253,7 @@ static const struct net_device_ops mlxbf_gige_netdev_ops = { .ndo_start_xmit = mlxbf_gige_start_xmit, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = mlxbf_gige_do_ioctl, + .ndo_eth_ioctl = mlxbf_gige_eth_ioctl, .ndo_set_rx_mode = mlxbf_gige_set_rx_mode, .ndo_get_stats64 = mlxbf_gige_get_stats64, }; @@ -269,9 +269,6 @@ static int mlxbf_gige_probe(struct platform_device *pdev) { struct phy_device *phydev; struct net_device *netdev; - struct resource *mac_res; - struct resource *llu_res; - struct resource *plu_res; struct mlxbf_gige *priv; void __iomem *llu_base; void __iomem *plu_base; @@ -280,27 +277,15 @@ static int mlxbf_gige_probe(struct platform_device *pdev) int addr; int err; - mac_res = platform_get_resource(pdev, IORESOURCE_MEM, MLXBF_GIGE_RES_MAC); - if (!mac_res) - return -ENXIO; - - base = devm_ioremap_resource(&pdev->dev, mac_res); + base = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_MAC); if (IS_ERR(base)) return PTR_ERR(base); - llu_res = platform_get_resource(pdev, IORESOURCE_MEM, MLXBF_GIGE_RES_LLU); - if (!llu_res) - return -ENXIO; - - llu_base = devm_ioremap_resource(&pdev->dev, llu_res); + llu_base = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_LLU); if (IS_ERR(llu_base)) return PTR_ERR(llu_base); - plu_res = platform_get_resource(pdev, IORESOURCE_MEM, MLXBF_GIGE_RES_PLU); - if (!plu_res) - return -ENXIO; - - plu_base = devm_ioremap_resource(&pdev->dev, plu_res); + plu_base = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_PLU); if (IS_ERR(plu_base)) return PTR_ERR(plu_base); diff --git a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c index e32dd34fdcc0..7905179a9575 100644 --- a/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c +++ b/drivers/net/ethernet/mellanox/mlxbf_gige/mlxbf_gige_mdio.c @@ -145,14 +145,9 @@ static int mlxbf_gige_mdio_write(struct mii_bus *bus, int phy_add, int mlxbf_gige_mdio_probe(struct platform_device *pdev, struct mlxbf_gige *priv) { struct device *dev = &pdev->dev; - struct resource *res; int ret; - res = platform_get_resource(pdev, IORESOURCE_MEM, MLXBF_GIGE_RES_MDIO9); - if (!res) - return -ENODEV; - - priv->mdio_io = devm_ioremap_resource(dev, res); + priv->mdio_io = devm_platform_ioremap_resource(pdev, MLXBF_GIGE_RES_MDIO9); if (IS_ERR(priv->mdio_io)) return PTR_ERR(priv->mdio_io); diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig index 12871c8dc7c1..d1ae248e125c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig +++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig @@ -58,10 +58,10 @@ config MLXSW_SPECTRUM depends on NET_IPGRE || NET_IPGRE=n depends on IPV6_GRE || IPV6_GRE=n depends on VXLAN || VXLAN=n + depends on PTP_1588_CLOCK_OPTIONAL select GENERIC_ALLOCATOR select PARMAN select OBJAGG - imply PTP_1588_CLOCK select NET_PTP_CLASSIFY if PTP_1588_CLOCK default m help diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index e775f08fb464..f080fab3de2b 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -1927,7 +1927,8 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, if (!reload) { alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size; - devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size); + devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size, + mlxsw_bus_info->dev); if (!devlink) { err = -ENOMEM; goto err_devlink_alloc; @@ -1974,7 +1975,7 @@ __mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, goto err_emad_init; if (!reload) { - err = devlink_register(devlink, mlxsw_bus_info->dev); + err = devlink_register(devlink); if (err) goto err_devlink_register; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index 88699e678544..250c5a24264d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -1207,7 +1207,7 @@ static const struct net_device_ops mlxsw_sp_port_netdev_ops = { .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid, .ndo_set_features = mlxsw_sp_set_features, .ndo_get_devlink_port = mlxsw_sp_port_get_devlink_port, - .ndo_do_ioctl = mlxsw_sp_port_ioctl, + .ndo_eth_ioctl = mlxsw_sp_port_ioctl, }; static int @@ -2717,6 +2717,22 @@ mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_netdevice_event(struct notifier_block *unused, unsigned long event, void *ptr); +#define MLXSW_SP_DEFAULT_PARSING_DEPTH 96 +#define MLXSW_SP_INCREASED_PARSING_DEPTH 128 +#define MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT 4789 + +static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp) +{ + mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH; + mlxsw_sp->parsing.vxlan_udp_dport = MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT; + mutex_init(&mlxsw_sp->parsing.lock); +} + +static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp) +{ + mutex_destroy(&mlxsw_sp->parsing.lock); +} + static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, const struct mlxsw_bus_info *mlxsw_bus_info, struct netlink_ext_ack *extack) @@ -2727,6 +2743,7 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->core = mlxsw_core; mlxsw_sp->bus_info = mlxsw_bus_info; + mlxsw_sp_parsing_init(mlxsw_sp); mlxsw_core_emad_string_tlv_enable(mlxsw_core); err = mlxsw_sp_base_mac_get(mlxsw_sp); @@ -2926,6 +2943,7 @@ err_policers_init: mlxsw_sp_fids_fini(mlxsw_sp); err_fids_init: mlxsw_sp_kvdl_fini(mlxsw_sp); + mlxsw_sp_parsing_fini(mlxsw_sp); return err; } @@ -3046,6 +3064,7 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) mlxsw_sp_policers_fini(mlxsw_sp); mlxsw_sp_fids_fini(mlxsw_sp); mlxsw_sp_kvdl_fini(mlxsw_sp); + mlxsw_sp_parsing_fini(mlxsw_sp); } /* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated @@ -3611,6 +3630,69 @@ void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) dev_put(mlxsw_sp_port->dev); } +int mlxsw_sp_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp) +{ + char mprs_pl[MLXSW_REG_MPRS_LEN]; + int err = 0; + + mutex_lock(&mlxsw_sp->parsing.lock); + + if (refcount_inc_not_zero(&mlxsw_sp->parsing.parsing_depth_ref)) + goto out_unlock; + + mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_INCREASED_PARSING_DEPTH, + mlxsw_sp->parsing.vxlan_udp_dport); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); + if (err) + goto out_unlock; + + mlxsw_sp->parsing.parsing_depth = MLXSW_SP_INCREASED_PARSING_DEPTH; + refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 1); + +out_unlock: + mutex_unlock(&mlxsw_sp->parsing.lock); + return err; +} + +void mlxsw_sp_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp) +{ + char mprs_pl[MLXSW_REG_MPRS_LEN]; + + mutex_lock(&mlxsw_sp->parsing.lock); + + if (!refcount_dec_and_test(&mlxsw_sp->parsing.parsing_depth_ref)) + goto out_unlock; + + mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_DEFAULT_PARSING_DEPTH, + mlxsw_sp->parsing.vxlan_udp_dport); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); + mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH; + +out_unlock: + mutex_unlock(&mlxsw_sp->parsing.lock); +} + +int mlxsw_sp_parsing_vxlan_udp_dport_set(struct mlxsw_sp *mlxsw_sp, + __be16 udp_dport) +{ + char mprs_pl[MLXSW_REG_MPRS_LEN]; + int err; + + mutex_lock(&mlxsw_sp->parsing.lock); + + mlxsw_reg_mprs_pack(mprs_pl, mlxsw_sp->parsing.parsing_depth, + be16_to_cpu(udp_dport)); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); + if (err) + goto out_unlock; + + mlxsw_sp->parsing.vxlan_udp_dport = be16_to_cpu(udp_dport); + +out_unlock: + mutex_unlock(&mlxsw_sp->parsing.lock); + return err; +} + static void mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, struct net_device *lag_dev) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index f99db88ee884..3a43cba6d23c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -148,6 +148,13 @@ struct mlxsw_sp_port_mapping { u8 lane; }; +struct mlxsw_sp_parsing { + refcount_t parsing_depth_ref; + u16 parsing_depth; + u16 vxlan_udp_dport; + struct mutex lock; /* Protects parsing configuration */ +}; + struct mlxsw_sp { struct mlxsw_sp_port **ports; struct mlxsw_core *core; @@ -173,6 +180,7 @@ struct mlxsw_sp { struct mlxsw_sp_counter_pool *counter_pool; struct mlxsw_sp_span *span; struct mlxsw_sp_trap *trap; + struct mlxsw_sp_parsing parsing; const struct mlxsw_sp_switchdev_ops *switchdev_ops; const struct mlxsw_sp_kvdl_ops *kvdl_ops; const struct mlxsw_afa_ops *afa_ops; @@ -652,6 +660,10 @@ struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev); struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev); void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port); struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev); +int mlxsw_sp_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp); +void mlxsw_sp_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp); +int mlxsw_sp_parsing_vxlan_udp_dport_set(struct mlxsw_sp *mlxsw_sp, + __be16 udp_dport); /* spectrum_dcb.c */ #ifdef CONFIG_MLXSW_SPECTRUM_DCB diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h index d8104fc6c900..98d1fdc25eac 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h @@ -29,7 +29,6 @@ struct mlxsw_sp_nve { unsigned int num_max_mc_entries[MLXSW_SP_L3_PROTO_MAX]; u32 tunnel_index; u16 ul_rif_index; /* Reserved for Spectrum */ - unsigned int inc_parsing_depth_refs; }; struct mlxsw_sp_nve_ops { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c index b84bb4b65098..d018d2da5949 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c @@ -10,14 +10,6 @@ #include "spectrum.h" #include "spectrum_nve.h" -/* Eth (18B) | IPv6 (40B) | UDP (8B) | VxLAN (8B) | Eth (14B) | IPv6 (40B) - * - * In the worst case - where we have a VLAN tag on the outer Ethernet - * header and IPv6 in overlay and underlay - we need to parse 128 bytes - */ -#define MLXSW_SP_NVE_VXLAN_PARSING_DEPTH 128 -#define MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH 96 - #define MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS (VXLAN_F_UDP_ZERO_CSUM_TX | \ VXLAN_F_LEARN) @@ -115,66 +107,6 @@ static void mlxsw_sp_nve_vxlan_config(const struct mlxsw_sp_nve *nve, config->udp_dport = cfg->dst_port; } -static int __mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp, - unsigned int parsing_depth, - __be16 udp_dport) -{ - char mprs_pl[MLXSW_REG_MPRS_LEN]; - - mlxsw_reg_mprs_pack(mprs_pl, parsing_depth, be16_to_cpu(udp_dport)); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl); -} - -static int mlxsw_sp_nve_parsing_set(struct mlxsw_sp *mlxsw_sp, - __be16 udp_dport) -{ - int parsing_depth = mlxsw_sp->nve->inc_parsing_depth_refs ? - MLXSW_SP_NVE_VXLAN_PARSING_DEPTH : - MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH; - - return __mlxsw_sp_nve_parsing_set(mlxsw_sp, parsing_depth, udp_dport); -} - -static int -__mlxsw_sp_nve_inc_parsing_depth_get(struct mlxsw_sp *mlxsw_sp, - __be16 udp_dport) -{ - int err; - - mlxsw_sp->nve->inc_parsing_depth_refs++; - - err = mlxsw_sp_nve_parsing_set(mlxsw_sp, udp_dport); - if (err) - goto err_nve_parsing_set; - return 0; - -err_nve_parsing_set: - mlxsw_sp->nve->inc_parsing_depth_refs--; - return err; -} - -static void -__mlxsw_sp_nve_inc_parsing_depth_put(struct mlxsw_sp *mlxsw_sp, - __be16 udp_dport) -{ - mlxsw_sp->nve->inc_parsing_depth_refs--; - mlxsw_sp_nve_parsing_set(mlxsw_sp, udp_dport); -} - -int mlxsw_sp_nve_inc_parsing_depth_get(struct mlxsw_sp *mlxsw_sp) -{ - __be16 udp_dport = mlxsw_sp->nve->config.udp_dport; - - return __mlxsw_sp_nve_inc_parsing_depth_get(mlxsw_sp, udp_dport); -} - -void mlxsw_sp_nve_inc_parsing_depth_put(struct mlxsw_sp *mlxsw_sp) -{ - __be16 udp_dport = mlxsw_sp->nve->config.udp_dport; - - __mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp, udp_dport); -} - static void mlxsw_sp_nve_vxlan_config_prepare(char *tngcr_pl, const struct mlxsw_sp_nve_config *config) @@ -238,10 +170,14 @@ static int mlxsw_sp1_nve_vxlan_init(struct mlxsw_sp_nve *nve, struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp; int err; - err = __mlxsw_sp_nve_inc_parsing_depth_get(mlxsw_sp, config->udp_dport); + err = mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, config->udp_dport); if (err) return err; + err = mlxsw_sp_parsing_depth_inc(mlxsw_sp); + if (err) + goto err_parsing_depth_inc; + err = mlxsw_sp1_nve_vxlan_config_set(mlxsw_sp, config); if (err) goto err_config_set; @@ -263,7 +199,9 @@ err_promote_decap: err_rtdp_set: mlxsw_sp1_nve_vxlan_config_clear(mlxsw_sp); err_config_set: - __mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp, 0); + mlxsw_sp_parsing_depth_dec(mlxsw_sp); +err_parsing_depth_inc: + mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, 0); return err; } @@ -275,7 +213,8 @@ static void mlxsw_sp1_nve_vxlan_fini(struct mlxsw_sp_nve *nve) mlxsw_sp_router_nve_demote_decap(mlxsw_sp, config->ul_tb_id, config->ul_proto, &config->ul_sip); mlxsw_sp1_nve_vxlan_config_clear(mlxsw_sp); - __mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp, 0); + mlxsw_sp_parsing_depth_dec(mlxsw_sp); + mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, 0); } static int @@ -412,10 +351,14 @@ static int mlxsw_sp2_nve_vxlan_init(struct mlxsw_sp_nve *nve, struct mlxsw_sp *mlxsw_sp = nve->mlxsw_sp; int err; - err = __mlxsw_sp_nve_inc_parsing_depth_get(mlxsw_sp, config->udp_dport); + err = mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, config->udp_dport); if (err) return err; + err = mlxsw_sp_parsing_depth_inc(mlxsw_sp); + if (err) + goto err_parsing_depth_inc; + err = mlxsw_sp2_nve_vxlan_config_set(mlxsw_sp, config); if (err) goto err_config_set; @@ -438,7 +381,9 @@ err_promote_decap: err_rtdp_set: mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp); err_config_set: - __mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp, 0); + mlxsw_sp_parsing_depth_dec(mlxsw_sp); +err_parsing_depth_inc: + mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, 0); return err; } @@ -450,7 +395,8 @@ static void mlxsw_sp2_nve_vxlan_fini(struct mlxsw_sp_nve *nve) mlxsw_sp_router_nve_demote_decap(mlxsw_sp, config->ul_tb_id, config->ul_proto, &config->ul_sip); mlxsw_sp2_nve_vxlan_config_clear(mlxsw_sp); - __mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp, 0); + mlxsw_sp_parsing_depth_dec(mlxsw_sp); + mlxsw_sp_parsing_vxlan_udp_dport_set(mlxsw_sp, 0); } const struct mlxsw_sp_nve_ops mlxsw_sp2_nve_vxlan_ops = { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c index bfef65d1587c..1a180384e7e8 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_ptp.c @@ -975,14 +975,14 @@ static int mlxsw_sp1_ptp_mtpppc_update(struct mlxsw_sp_port *mlxsw_sp_port, } if ((ing_types || egr_types) && !(orig_ing_types || orig_egr_types)) { - err = mlxsw_sp_nve_inc_parsing_depth_get(mlxsw_sp); + err = mlxsw_sp_parsing_depth_inc(mlxsw_sp); if (err) { netdev_err(mlxsw_sp_port->dev, "Failed to increase parsing depth"); return err; } } if (!(ing_types || egr_types) && (orig_ing_types || orig_egr_types)) - mlxsw_sp_nve_inc_parsing_depth_put(mlxsw_sp); + mlxsw_sp_parsing_depth_dec(mlxsw_sp); return mlxsw_sp1_ptp_mtpppc_set(mlxsw_sp_port->mlxsw_sp, ing_types, egr_types); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index f69cbb3852d5..19bb3ca0515e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -9484,6 +9484,7 @@ struct mlxsw_sp_mp_hash_config { DECLARE_BITMAP(fields, __MLXSW_REG_RECR2_FIELD_CNT); DECLARE_BITMAP(inner_headers, __MLXSW_REG_RECR2_HEADER_CNT); DECLARE_BITMAP(inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT); + bool inc_parsing_depth; }; #define MLXSW_SP_MP_HASH_HEADER_SET(_headers, _header) \ @@ -9654,6 +9655,7 @@ static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp, MLXSW_SP_MP_HASH_FIELD_SET(fields, IPV6_FLOW_LABEL); /* Inner */ mlxsw_sp_mp_hash_inner_l3(config); + config->inc_parsing_depth = true; break; case 3: /* Outer */ @@ -9678,22 +9680,53 @@ static void mlxsw_sp_mp6_hash_init(struct mlxsw_sp *mlxsw_sp, MLXSW_SP_MP_HASH_FIELD_SET(fields, TCP_UDP_DPORT); /* Inner */ mlxsw_sp_mp_hash_inner_custom(config, hash_fields); + if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK) + config->inc_parsing_depth = true; break; } } +static int mlxsw_sp_mp_hash_parsing_depth_adjust(struct mlxsw_sp *mlxsw_sp, + bool old_inc_parsing_depth, + bool new_inc_parsing_depth) +{ + int err; + + if (!old_inc_parsing_depth && new_inc_parsing_depth) { + err = mlxsw_sp_parsing_depth_inc(mlxsw_sp); + if (err) + return err; + mlxsw_sp->router->inc_parsing_depth = true; + } else if (old_inc_parsing_depth && !new_inc_parsing_depth) { + mlxsw_sp_parsing_depth_dec(mlxsw_sp); + mlxsw_sp->router->inc_parsing_depth = false; + } + + return 0; +} + static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp) { + bool old_inc_parsing_depth, new_inc_parsing_depth; struct mlxsw_sp_mp_hash_config config = {}; char recr2_pl[MLXSW_REG_RECR2_LEN]; unsigned long bit; u32 seed; + int err; seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac), 0); mlxsw_reg_recr2_pack(recr2_pl, seed); mlxsw_sp_mp4_hash_init(mlxsw_sp, &config); mlxsw_sp_mp6_hash_init(mlxsw_sp, &config); + old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth; + new_inc_parsing_depth = config.inc_parsing_depth; + err = mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, + old_inc_parsing_depth, + new_inc_parsing_depth); + if (err) + return err; + for_each_set_bit(bit, config.headers, __MLXSW_REG_RECR2_HEADER_CNT) mlxsw_reg_recr2_outer_header_enables_set(recr2_pl, bit, 1); for_each_set_bit(bit, config.fields, __MLXSW_REG_RECR2_FIELD_CNT) @@ -9703,7 +9736,16 @@ static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp) for_each_set_bit(bit, config.inner_fields, __MLXSW_REG_RECR2_INNER_FIELD_CNT) mlxsw_reg_recr2_inner_header_fields_enable_set(recr2_pl, bit, 1); - return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(recr2), recr2_pl); + if (err) + goto err_reg_write; + + return 0; + +err_reg_write: + mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, new_inc_parsing_depth, + old_inc_parsing_depth); + return err; } #else static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp) diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h index c5d7007f9173..25d3eae63501 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.h @@ -81,6 +81,7 @@ struct mlxsw_sp_router { size_t adj_grp_size_ranges_count; struct delayed_work nh_grp_activity_dw; struct list_head nh_res_grp_list; + bool inc_parsing_depth; }; struct mlxsw_sp_fib_entry_priv { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 8f90cd323d5f..22fede5cb32c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -335,14 +335,16 @@ mlxsw_sp_bridge_port_find(struct mlxsw_sp_bridge *bridge, static struct mlxsw_sp_bridge_port * mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device, - struct net_device *brport_dev) + struct net_device *brport_dev, + struct netlink_ext_ack *extack) { struct mlxsw_sp_bridge_port *bridge_port; struct mlxsw_sp_port *mlxsw_sp_port; + int err; bridge_port = kzalloc(sizeof(*bridge_port), GFP_KERNEL); if (!bridge_port) - return NULL; + return ERR_PTR(-ENOMEM); mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(brport_dev); bridge_port->lagged = mlxsw_sp_port->lagged; @@ -359,12 +361,23 @@ mlxsw_sp_bridge_port_create(struct mlxsw_sp_bridge_device *bridge_device, list_add(&bridge_port->list, &bridge_device->ports_list); bridge_port->ref_count = 1; + err = switchdev_bridge_port_offload(brport_dev, mlxsw_sp_port->dev, + NULL, NULL, NULL, false, extack); + if (err) + goto err_switchdev_offload; + return bridge_port; + +err_switchdev_offload: + list_del(&bridge_port->list); + kfree(bridge_port); + return ERR_PTR(err); } static void mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port) { + switchdev_bridge_port_unoffload(bridge_port->dev, NULL, NULL, NULL); list_del(&bridge_port->list); WARN_ON(!list_empty(&bridge_port->vlans_list)); kfree(bridge_port); @@ -390,9 +403,10 @@ mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge, if (IS_ERR(bridge_device)) return ERR_CAST(bridge_device); - bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev); - if (!bridge_port) { - err = -ENOMEM; + bridge_port = mlxsw_sp_bridge_port_create(bridge_device, brport_dev, + extack); + if (IS_ERR(bridge_port)) { + err = PTR_ERR(bridge_port); goto err_bridge_port_create; } @@ -1569,7 +1583,6 @@ mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp, { long *flood_bitmap; int num_of_ports; - int alloc_size; u16 mid_idx; int err; @@ -1579,18 +1592,17 @@ mlxsw_sp_mc_write_mdb_entry(struct mlxsw_sp *mlxsw_sp, return false; num_of_ports = mlxsw_core_max_ports(mlxsw_sp->core); - alloc_size = sizeof(long) * BITS_TO_LONGS(num_of_ports); - flood_bitmap = kzalloc(alloc_size, GFP_KERNEL); + flood_bitmap = bitmap_alloc(num_of_ports, GFP_KERNEL); if (!flood_bitmap) return false; - bitmap_copy(flood_bitmap, mid->ports_in_mid, num_of_ports); + bitmap_copy(flood_bitmap, mid->ports_in_mid, num_of_ports); mlxsw_sp_mc_get_mrouters_bitmap(flood_bitmap, bridge_device, mlxsw_sp); mid->mid = mid_idx; err = mlxsw_sp_port_smid_full_entry(mlxsw_sp, mid_idx, flood_bitmap, bridge_device->mrouter); - kfree(flood_bitmap); + bitmap_free(flood_bitmap); if (err) return false; diff --git a/drivers/net/ethernet/micrel/ks8851_common.c b/drivers/net/ethernet/micrel/ks8851_common.c index 831518466de2..3f69bb59ba49 100644 --- a/drivers/net/ethernet/micrel/ks8851_common.c +++ b/drivers/net/ethernet/micrel/ks8851_common.c @@ -689,7 +689,7 @@ static int ks8851_net_ioctl(struct net_device *dev, struct ifreq *req, int cmd) static const struct net_device_ops ks8851_netdev_ops = { .ndo_open = ks8851_net_open, .ndo_stop = ks8851_net_stop, - .ndo_do_ioctl = ks8851_net_ioctl, + .ndo_eth_ioctl = ks8851_net_ioctl, .ndo_start_xmit = ks8851_start_xmit, .ndo_set_mac_address = ks8851_set_mac_address, .ndo_set_rx_mode = ks8851_set_rx_mode, diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index 7945eb5e2fe8..a0ee155f9f51 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -6738,7 +6738,7 @@ static const struct net_device_ops netdev_ops = { .ndo_set_features = netdev_set_features, .ndo_set_mac_address = netdev_set_mac_address, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = netdev_ioctl, + .ndo_eth_ioctl = netdev_ioctl, .ndo_set_rx_mode = netdev_set_rx_mode, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = netdev_netpoll, diff --git a/drivers/net/ethernet/microchip/Kconfig b/drivers/net/ethernet/microchip/Kconfig index d54aa164c4e9..735eea1dacf1 100644 --- a/drivers/net/ethernet/microchip/Kconfig +++ b/drivers/net/ethernet/microchip/Kconfig @@ -45,6 +45,7 @@ config ENCX24J600 config LAN743X tristate "LAN743x support" depends on PCI + depends on PTP_1588_CLOCK_OPTIONAL select PHYLIB select CRC16 select CRC32 diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c index dae10328c6cf..9e8561cdc32a 100644 --- a/drivers/net/ethernet/microchip/lan743x_main.c +++ b/drivers/net/ethernet/microchip/lan743x_main.c @@ -2655,7 +2655,7 @@ static const struct net_device_ops lan743x_netdev_ops = { .ndo_open = lan743x_netdev_open, .ndo_stop = lan743x_netdev_close, .ndo_start_xmit = lan743x_netdev_xmit_frame, - .ndo_do_ioctl = lan743x_netdev_ioctl, + .ndo_eth_ioctl = lan743x_netdev_ioctl, .ndo_set_rx_mode = lan743x_netdev_set_multicast, .ndo_change_mtu = lan743x_netdev_change_mtu, .ndo_get_stats64 = lan743x_netdev_get_stats64, diff --git a/drivers/net/ethernet/microchip/sparx5/Makefile b/drivers/net/ethernet/microchip/sparx5/Makefile index faa8f07a6b75..c271e86ee292 100644 --- a/drivers/net/ethernet/microchip/sparx5/Makefile +++ b/drivers/net/ethernet/microchip/sparx5/Makefile @@ -7,4 +7,4 @@ obj-$(CONFIG_SPARX5_SWITCH) += sparx5-switch.o sparx5-switch-objs := sparx5_main.o sparx5_packet.o \ sparx5_netdev.o sparx5_phylink.o sparx5_port.o sparx5_mactable.o sparx5_vlan.o \ - sparx5_switchdev.o sparx5_calendar.o sparx5_ethtool.o + sparx5_switchdev.o sparx5_calendar.o sparx5_ethtool.o sparx5_fdma.o diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c new file mode 100644 index 000000000000..7436f62fa152 --- /dev/null +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_fdma.c @@ -0,0 +1,593 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Microchip Sparx5 Switch driver + * + * Copyright (c) 2021 Microchip Technology Inc. and its subsidiaries. + * + * The Sparx5 Chip Register Model can be browsed at this location: + * https://github.com/microchip-ung/sparx-5_reginfo + */ + +#include <linux/types.h> +#include <linux/skbuff.h> +#include <linux/netdevice.h> +#include <linux/interrupt.h> +#include <linux/ip.h> +#include <linux/dma-mapping.h> + +#include "sparx5_main_regs.h" +#include "sparx5_main.h" +#include "sparx5_port.h" + +#define FDMA_XTR_CHANNEL 6 +#define FDMA_INJ_CHANNEL 0 + +#define FDMA_DCB_INFO_DATAL(x) ((x) & GENMASK(15, 0)) +#define FDMA_DCB_INFO_TOKEN BIT(17) +#define FDMA_DCB_INFO_INTR BIT(18) +#define FDMA_DCB_INFO_SW(x) (((x) << 24) & GENMASK(31, 24)) + +#define FDMA_DCB_STATUS_BLOCKL(x) ((x) & GENMASK(15, 0)) +#define FDMA_DCB_STATUS_SOF BIT(16) +#define FDMA_DCB_STATUS_EOF BIT(17) +#define FDMA_DCB_STATUS_INTR BIT(18) +#define FDMA_DCB_STATUS_DONE BIT(19) +#define FDMA_DCB_STATUS_BLOCKO(x) (((x) << 20) & GENMASK(31, 20)) +#define FDMA_DCB_INVALID_DATA 0x1 + +#define FDMA_XTR_BUFFER_SIZE 2048 +#define FDMA_WEIGHT 4 + +/* Frame DMA DCB format + * + * +---------------------------+ + * | Next Ptr | + * +---------------------------+ + * | Reserved | Info | + * +---------------------------+ + * | Data0 Ptr | + * +---------------------------+ + * | Reserved | Status0 | + * +---------------------------+ + * | Data1 Ptr | + * +---------------------------+ + * | Reserved | Status1 | + * +---------------------------+ + * | Data2 Ptr | + * +---------------------------+ + * | Reserved | Status2 | + * |-------------|-------------| + * | | + * | | + * | | + * | | + * | | + * |---------------------------| + * | Data14 Ptr | + * +-------------|-------------+ + * | Reserved | Status14 | + * +-------------|-------------+ + */ + +/* For each hardware DB there is an entry in this list and when the HW DB + * entry is used, this SW DB entry is moved to the back of the list + */ +struct sparx5_db { + struct list_head list; + void *cpu_addr; +}; + +static void sparx5_fdma_rx_add_dcb(struct sparx5_rx *rx, + struct sparx5_rx_dcb_hw *dcb, + u64 nextptr) +{ + int idx = 0; + + /* Reset the status of the DB */ + for (idx = 0; idx < FDMA_RX_DCB_MAX_DBS; ++idx) { + struct sparx5_db_hw *db = &dcb->db[idx]; + + db->status = FDMA_DCB_STATUS_INTR; + } + dcb->nextptr = FDMA_DCB_INVALID_DATA; + dcb->info = FDMA_DCB_INFO_DATAL(FDMA_XTR_BUFFER_SIZE); + rx->last_entry->nextptr = nextptr; + rx->last_entry = dcb; +} + +static void sparx5_fdma_tx_add_dcb(struct sparx5_tx *tx, + struct sparx5_tx_dcb_hw *dcb, + u64 nextptr) +{ + int idx = 0; + + /* Reset the status of the DB */ + for (idx = 0; idx < FDMA_TX_DCB_MAX_DBS; ++idx) { + struct sparx5_db_hw *db = &dcb->db[idx]; + + db->status = FDMA_DCB_STATUS_DONE; + } + dcb->nextptr = FDMA_DCB_INVALID_DATA; + dcb->info = FDMA_DCB_INFO_DATAL(FDMA_XTR_BUFFER_SIZE); +} + +static void sparx5_fdma_rx_activate(struct sparx5 *sparx5, struct sparx5_rx *rx) +{ + /* Write the buffer address in the LLP and LLP1 regs */ + spx5_wr(((u64)rx->dma) & GENMASK(31, 0), sparx5, + FDMA_DCB_LLP(rx->channel_id)); + spx5_wr(((u64)rx->dma) >> 32, sparx5, FDMA_DCB_LLP1(rx->channel_id)); + + /* Set the number of RX DBs to be used, and DB end-of-frame interrupt */ + spx5_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_RX_DCB_MAX_DBS) | + FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) | + FDMA_CH_CFG_CH_INJ_PORT_SET(XTR_QUEUE), + sparx5, FDMA_CH_CFG(rx->channel_id)); + + /* Set the RX Watermark to max */ + spx5_rmw(FDMA_XTR_CFG_XTR_FIFO_WM_SET(31), FDMA_XTR_CFG_XTR_FIFO_WM, + sparx5, + FDMA_XTR_CFG); + + /* Start RX fdma */ + spx5_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(0), FDMA_PORT_CTRL_XTR_STOP, + sparx5, FDMA_PORT_CTRL(0)); + + /* Enable RX channel DB interrupt */ + spx5_rmw(BIT(rx->channel_id), + BIT(rx->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA, + sparx5, FDMA_INTR_DB_ENA); + + /* Activate the RX channel */ + spx5_wr(BIT(rx->channel_id), sparx5, FDMA_CH_ACTIVATE); +} + +static void sparx5_fdma_rx_deactivate(struct sparx5 *sparx5, struct sparx5_rx *rx) +{ + /* Dectivate the RX channel */ + spx5_rmw(0, BIT(rx->channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE, + sparx5, FDMA_CH_ACTIVATE); + + /* Disable RX channel DB interrupt */ + spx5_rmw(0, BIT(rx->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA, + sparx5, FDMA_INTR_DB_ENA); + + /* Stop RX fdma */ + spx5_rmw(FDMA_PORT_CTRL_XTR_STOP_SET(1), FDMA_PORT_CTRL_XTR_STOP, + sparx5, FDMA_PORT_CTRL(0)); +} + +static void sparx5_fdma_tx_activate(struct sparx5 *sparx5, struct sparx5_tx *tx) +{ + /* Write the buffer address in the LLP and LLP1 regs */ + spx5_wr(((u64)tx->dma) & GENMASK(31, 0), sparx5, + FDMA_DCB_LLP(tx->channel_id)); + spx5_wr(((u64)tx->dma) >> 32, sparx5, FDMA_DCB_LLP1(tx->channel_id)); + + /* Set the number of TX DBs to be used, and DB end-of-frame interrupt */ + spx5_wr(FDMA_CH_CFG_CH_DCB_DB_CNT_SET(FDMA_TX_DCB_MAX_DBS) | + FDMA_CH_CFG_CH_INTR_DB_EOF_ONLY_SET(1) | + FDMA_CH_CFG_CH_INJ_PORT_SET(INJ_QUEUE), + sparx5, FDMA_CH_CFG(tx->channel_id)); + + /* Start TX fdma */ + spx5_rmw(FDMA_PORT_CTRL_INJ_STOP_SET(0), FDMA_PORT_CTRL_INJ_STOP, + sparx5, FDMA_PORT_CTRL(0)); + + /* Activate the channel */ + spx5_wr(BIT(tx->channel_id), sparx5, FDMA_CH_ACTIVATE); +} + +static void sparx5_fdma_tx_deactivate(struct sparx5 *sparx5, struct sparx5_tx *tx) +{ + /* Disable the channel */ + spx5_rmw(0, BIT(tx->channel_id) & FDMA_CH_ACTIVATE_CH_ACTIVATE, + sparx5, FDMA_CH_ACTIVATE); +} + +static void sparx5_fdma_rx_reload(struct sparx5 *sparx5, struct sparx5_rx *rx) +{ + /* Reload the RX channel */ + spx5_wr(BIT(rx->channel_id), sparx5, FDMA_CH_RELOAD); +} + +static void sparx5_fdma_tx_reload(struct sparx5 *sparx5, struct sparx5_tx *tx) +{ + /* Reload the TX channel */ + spx5_wr(BIT(tx->channel_id), sparx5, FDMA_CH_RELOAD); +} + +static struct sk_buff *sparx5_fdma_rx_alloc_skb(struct sparx5_rx *rx) +{ + return __netdev_alloc_skb(rx->ndev, FDMA_XTR_BUFFER_SIZE, + GFP_ATOMIC); +} + +static bool sparx5_fdma_rx_get_frame(struct sparx5 *sparx5, struct sparx5_rx *rx) +{ + struct sparx5_db_hw *db_hw; + unsigned int packet_size; + struct sparx5_port *port; + struct sk_buff *new_skb; + struct frame_info fi; + struct sk_buff *skb; + dma_addr_t dma_addr; + + /* Check if the DCB is done */ + db_hw = &rx->dcb_entries[rx->dcb_index].db[rx->db_index]; + if (unlikely(!(db_hw->status & FDMA_DCB_STATUS_DONE))) + return false; + skb = rx->skb[rx->dcb_index][rx->db_index]; + /* Replace the DB entry with a new SKB */ + new_skb = sparx5_fdma_rx_alloc_skb(rx); + if (unlikely(!new_skb)) + return false; + /* Map the new skb data and set the new skb */ + dma_addr = virt_to_phys(new_skb->data); + rx->skb[rx->dcb_index][rx->db_index] = new_skb; + db_hw->dataptr = dma_addr; + packet_size = FDMA_DCB_STATUS_BLOCKL(db_hw->status); + skb_put(skb, packet_size); + /* Now do the normal processing of the skb */ + sparx5_ifh_parse((u32 *)skb->data, &fi); + /* Map to port netdev */ + port = fi.src_port < SPX5_PORTS ? sparx5->ports[fi.src_port] : NULL; + if (!port || !port->ndev) { + dev_err(sparx5->dev, "Data on inactive port %d\n", fi.src_port); + sparx5_xtr_flush(sparx5, XTR_QUEUE); + return false; + } + skb->dev = port->ndev; + skb_pull(skb, IFH_LEN * sizeof(u32)); + if (likely(!(skb->dev->features & NETIF_F_RXFCS))) + skb_trim(skb, skb->len - ETH_FCS_LEN); + skb->protocol = eth_type_trans(skb, skb->dev); + /* Everything we see on an interface that is in the HW bridge + * has already been forwarded + */ + if (test_bit(port->portno, sparx5->bridge_mask)) + skb->offload_fwd_mark = 1; + skb->dev->stats.rx_bytes += skb->len; + skb->dev->stats.rx_packets++; + rx->packets++; + netif_receive_skb(skb); + return true; +} + +static int sparx5_fdma_napi_callback(struct napi_struct *napi, int weight) +{ + struct sparx5_rx *rx = container_of(napi, struct sparx5_rx, napi); + struct sparx5 *sparx5 = container_of(rx, struct sparx5, rx); + int counter = 0; + + while (counter < weight && sparx5_fdma_rx_get_frame(sparx5, rx)) { + struct sparx5_rx_dcb_hw *old_dcb; + + rx->db_index++; + counter++; + /* Check if the DCB can be reused */ + if (rx->db_index != FDMA_RX_DCB_MAX_DBS) + continue; + /* As the DCB can be reused, just advance the dcb_index + * pointer and set the nextptr in the DCB + */ + rx->db_index = 0; + old_dcb = &rx->dcb_entries[rx->dcb_index]; + rx->dcb_index++; + rx->dcb_index &= FDMA_DCB_MAX - 1; + sparx5_fdma_rx_add_dcb(rx, old_dcb, + rx->dma + + ((unsigned long)old_dcb - + (unsigned long)rx->dcb_entries)); + } + if (counter < weight) { + napi_complete_done(&rx->napi, counter); + spx5_rmw(BIT(rx->channel_id), + BIT(rx->channel_id) & FDMA_INTR_DB_ENA_INTR_DB_ENA, + sparx5, FDMA_INTR_DB_ENA); + } + if (counter) + sparx5_fdma_rx_reload(sparx5, rx); + return counter; +} + +static struct sparx5_tx_dcb_hw *sparx5_fdma_next_dcb(struct sparx5_tx *tx, + struct sparx5_tx_dcb_hw *dcb) +{ + struct sparx5_tx_dcb_hw *next_dcb; + + next_dcb = dcb; + next_dcb++; + /* Handle wrap-around */ + if ((unsigned long)next_dcb >= + ((unsigned long)tx->first_entry + FDMA_DCB_MAX * sizeof(*dcb))) + next_dcb = tx->first_entry; + return next_dcb; +} + +int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb) +{ + struct sparx5_tx_dcb_hw *next_dcb_hw; + struct sparx5_tx *tx = &sparx5->tx; + static bool first_time = true; + struct sparx5_db_hw *db_hw; + struct sparx5_db *db; + + next_dcb_hw = sparx5_fdma_next_dcb(tx, tx->curr_entry); + db_hw = &next_dcb_hw->db[0]; + if (!(db_hw->status & FDMA_DCB_STATUS_DONE)) + tx->dropped++; + db = list_first_entry(&tx->db_list, struct sparx5_db, list); + list_move_tail(&db->list, &tx->db_list); + next_dcb_hw->nextptr = FDMA_DCB_INVALID_DATA; + tx->curr_entry->nextptr = tx->dma + + ((unsigned long)next_dcb_hw - + (unsigned long)tx->first_entry); + tx->curr_entry = next_dcb_hw; + memset(db->cpu_addr, 0, FDMA_XTR_BUFFER_SIZE); + memcpy(db->cpu_addr, ifh, IFH_LEN * 4); + memcpy(db->cpu_addr + IFH_LEN * 4, skb->data, skb->len); + db_hw->status = FDMA_DCB_STATUS_SOF | + FDMA_DCB_STATUS_EOF | + FDMA_DCB_STATUS_BLOCKO(0) | + FDMA_DCB_STATUS_BLOCKL(skb->len + IFH_LEN * 4 + 4); + if (first_time) { + sparx5_fdma_tx_activate(sparx5, tx); + first_time = false; + } else { + sparx5_fdma_tx_reload(sparx5, tx); + } + return NETDEV_TX_OK; +} + +static int sparx5_fdma_rx_alloc(struct sparx5 *sparx5) +{ + struct sparx5_rx *rx = &sparx5->rx; + struct sparx5_rx_dcb_hw *dcb; + int idx, jdx; + int size; + + size = sizeof(struct sparx5_rx_dcb_hw) * FDMA_DCB_MAX; + size = ALIGN(size, PAGE_SIZE); + rx->dcb_entries = devm_kzalloc(sparx5->dev, size, GFP_KERNEL); + if (!rx->dcb_entries) + return -ENOMEM; + rx->dma = virt_to_phys(rx->dcb_entries); + rx->last_entry = rx->dcb_entries; + rx->db_index = 0; + rx->dcb_index = 0; + /* Now for each dcb allocate the db */ + for (idx = 0; idx < FDMA_DCB_MAX; ++idx) { + dcb = &rx->dcb_entries[idx]; + dcb->info = 0; + /* For each db allocate an skb and map skb data pointer to the DB + * dataptr. In this way when the frame is received the skb->data + * will contain the frame, so no memcpy is needed + */ + for (jdx = 0; jdx < FDMA_RX_DCB_MAX_DBS; ++jdx) { + struct sparx5_db_hw *db_hw = &dcb->db[jdx]; + dma_addr_t dma_addr; + struct sk_buff *skb; + + skb = sparx5_fdma_rx_alloc_skb(rx); + if (!skb) + return -ENOMEM; + + dma_addr = virt_to_phys(skb->data); + db_hw->dataptr = dma_addr; + db_hw->status = 0; + rx->skb[idx][jdx] = skb; + } + sparx5_fdma_rx_add_dcb(rx, dcb, rx->dma + sizeof(*dcb) * idx); + } + netif_napi_add(rx->ndev, &rx->napi, sparx5_fdma_napi_callback, FDMA_WEIGHT); + napi_enable(&rx->napi); + sparx5_fdma_rx_activate(sparx5, rx); + return 0; +} + +static int sparx5_fdma_tx_alloc(struct sparx5 *sparx5) +{ + struct sparx5_tx *tx = &sparx5->tx; + struct sparx5_tx_dcb_hw *dcb; + int idx, jdx; + int size; + + size = sizeof(struct sparx5_tx_dcb_hw) * FDMA_DCB_MAX; + size = ALIGN(size, PAGE_SIZE); + tx->curr_entry = devm_kzalloc(sparx5->dev, size, GFP_KERNEL); + if (!tx->curr_entry) + return -ENOMEM; + tx->dma = virt_to_phys(tx->curr_entry); + tx->first_entry = tx->curr_entry; + INIT_LIST_HEAD(&tx->db_list); + /* Now for each dcb allocate the db */ + for (idx = 0; idx < FDMA_DCB_MAX; ++idx) { + dcb = &tx->curr_entry[idx]; + dcb->info = 0; + /* TX databuffers must be 16byte aligned */ + for (jdx = 0; jdx < FDMA_TX_DCB_MAX_DBS; ++jdx) { + struct sparx5_db_hw *db_hw = &dcb->db[jdx]; + struct sparx5_db *db; + dma_addr_t phys; + void *cpu_addr; + + cpu_addr = devm_kzalloc(sparx5->dev, + FDMA_XTR_BUFFER_SIZE, + GFP_KERNEL); + if (!cpu_addr) + return -ENOMEM; + phys = virt_to_phys(cpu_addr); + db_hw->dataptr = phys; + db_hw->status = 0; + db = devm_kzalloc(sparx5->dev, sizeof(*db), GFP_KERNEL); + db->cpu_addr = cpu_addr; + list_add_tail(&db->list, &tx->db_list); + } + sparx5_fdma_tx_add_dcb(tx, dcb, tx->dma + sizeof(*dcb) * idx); + /* Let the curr_entry to point to the last allocated entry */ + if (idx == FDMA_DCB_MAX - 1) + tx->curr_entry = dcb; + } + return 0; +} + +static void sparx5_fdma_rx_init(struct sparx5 *sparx5, + struct sparx5_rx *rx, int channel) +{ + int idx; + + rx->channel_id = channel; + /* Fetch a netdev for SKB and NAPI use, any will do */ + for (idx = 0; idx < SPX5_PORTS; ++idx) { + struct sparx5_port *port = sparx5->ports[idx]; + + if (port && port->ndev) { + rx->ndev = port->ndev; + break; + } + } +} + +static void sparx5_fdma_tx_init(struct sparx5 *sparx5, + struct sparx5_tx *tx, int channel) +{ + tx->channel_id = channel; +} + +irqreturn_t sparx5_fdma_handler(int irq, void *args) +{ + struct sparx5 *sparx5 = args; + u32 db = 0, err = 0; + + db = spx5_rd(sparx5, FDMA_INTR_DB); + err = spx5_rd(sparx5, FDMA_INTR_ERR); + /* Clear interrupt */ + if (db) { + spx5_wr(0, sparx5, FDMA_INTR_DB_ENA); + spx5_wr(db, sparx5, FDMA_INTR_DB); + napi_schedule(&sparx5->rx.napi); + } + if (err) { + u32 err_type = spx5_rd(sparx5, FDMA_ERRORS); + + dev_err_ratelimited(sparx5->dev, + "ERR: int: %#x, type: %#x\n", + err, err_type); + spx5_wr(err, sparx5, FDMA_INTR_ERR); + spx5_wr(err_type, sparx5, FDMA_ERRORS); + } + return IRQ_HANDLED; +} + +static void sparx5_fdma_injection_mode(struct sparx5 *sparx5) +{ + const int byte_swap = 1; + int portno; + int urgency; + + /* Change mode to fdma extraction and injection */ + spx5_wr(QS_XTR_GRP_CFG_MODE_SET(2) | + QS_XTR_GRP_CFG_STATUS_WORD_POS_SET(1) | + QS_XTR_GRP_CFG_BYTE_SWAP_SET(byte_swap), + sparx5, QS_XTR_GRP_CFG(XTR_QUEUE)); + spx5_wr(QS_INJ_GRP_CFG_MODE_SET(2) | + QS_INJ_GRP_CFG_BYTE_SWAP_SET(byte_swap), + sparx5, QS_INJ_GRP_CFG(INJ_QUEUE)); + + /* CPU ports capture setup */ + for (portno = SPX5_PORT_CPU_0; portno <= SPX5_PORT_CPU_1; portno++) { + /* ASM CPU port: No preamble, IFH, enable padding */ + spx5_wr(ASM_PORT_CFG_PAD_ENA_SET(1) | + ASM_PORT_CFG_NO_PREAMBLE_ENA_SET(1) | + ASM_PORT_CFG_INJ_FORMAT_CFG_SET(1), /* 1 = IFH */ + sparx5, ASM_PORT_CFG(portno)); + + /* Reset WM cnt to unclog queued frames */ + spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR_SET(1), + DSM_DEV_TX_STOP_WM_CFG_DEV_TX_CNT_CLR, + sparx5, + DSM_DEV_TX_STOP_WM_CFG(portno)); + + /* Set Disassembler Stop Watermark level */ + spx5_rmw(DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM_SET(100), + DSM_DEV_TX_STOP_WM_CFG_DEV_TX_STOP_WM, + sparx5, + DSM_DEV_TX_STOP_WM_CFG(portno)); + + /* Enable port in queue system */ + urgency = sparx5_port_fwd_urg(sparx5, SPEED_2500); + spx5_rmw(QFWD_SWITCH_PORT_MODE_PORT_ENA_SET(1) | + QFWD_SWITCH_PORT_MODE_FWD_URGENCY_SET(urgency), + QFWD_SWITCH_PORT_MODE_PORT_ENA | + QFWD_SWITCH_PORT_MODE_FWD_URGENCY, + sparx5, + QFWD_SWITCH_PORT_MODE(portno)); + + /* Disable Disassembler buffer underrun watchdog + * to avoid truncated packets in XTR + */ + spx5_rmw(DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS_SET(1), + DSM_BUF_CFG_UNDERFLOW_WATCHDOG_DIS, + sparx5, + DSM_BUF_CFG(portno)); + + /* Disabling frame aging */ + spx5_rmw(HSCH_PORT_MODE_AGE_DIS_SET(1), + HSCH_PORT_MODE_AGE_DIS, + sparx5, + HSCH_PORT_MODE(portno)); + } +} + +int sparx5_fdma_start(struct sparx5 *sparx5) +{ + int err; + + /* Reset FDMA state */ + spx5_wr(FDMA_CTRL_NRESET_SET(0), sparx5, FDMA_CTRL); + spx5_wr(FDMA_CTRL_NRESET_SET(1), sparx5, FDMA_CTRL); + + /* Force ACP caching but disable read/write allocation */ + spx5_rmw(CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA_SET(1) | + CPU_PROC_CTRL_ACP_AWCACHE_SET(0) | + CPU_PROC_CTRL_ACP_ARCACHE_SET(0), + CPU_PROC_CTRL_ACP_CACHE_FORCE_ENA | + CPU_PROC_CTRL_ACP_AWCACHE | + CPU_PROC_CTRL_ACP_ARCACHE, + sparx5, CPU_PROC_CTRL); + + sparx5_fdma_injection_mode(sparx5); + sparx5_fdma_rx_init(sparx5, &sparx5->rx, FDMA_XTR_CHANNEL); + sparx5_fdma_tx_init(sparx5, &sparx5->tx, FDMA_INJ_CHANNEL); + err = sparx5_fdma_rx_alloc(sparx5); + if (err) { + dev_err(sparx5->dev, "Could not allocate RX buffers: %d\n", err); + return err; + } + err = sparx5_fdma_tx_alloc(sparx5); + if (err) { + dev_err(sparx5->dev, "Could not allocate TX buffers: %d\n", err); + return err; + } + return err; +} + +static u32 sparx5_fdma_port_ctrl(struct sparx5 *sparx5) +{ + return spx5_rd(sparx5, FDMA_PORT_CTRL(0)); +} + +int sparx5_fdma_stop(struct sparx5 *sparx5) +{ + u32 val; + + napi_disable(&sparx5->rx.napi); + /* Stop the fdma and channel interrupts */ + sparx5_fdma_rx_deactivate(sparx5, &sparx5->rx); + sparx5_fdma_tx_deactivate(sparx5, &sparx5->tx); + /* Wait for the RX channel to stop */ + read_poll_timeout(sparx5_fdma_port_ctrl, val, + FDMA_PORT_CTRL_XTR_BUF_IS_EMPTY_GET(val) == 0, + 500, 10000, 0, sparx5); + return 0; +} diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c index f666133a15de..cbece6e9bff2 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c @@ -640,8 +640,23 @@ static int sparx5_start(struct sparx5 *sparx5) sparx5_board_init(sparx5); err = sparx5_register_notifier_blocks(sparx5); - /* Start register based INJ/XTR */ + /* Start Frame DMA with fallback to register based INJ/XTR */ err = -ENXIO; + if (sparx5->fdma_irq >= 0) { + if (GCB_CHIP_ID_REV_ID_GET(sparx5->chip_id) > 0) + err = devm_request_threaded_irq(sparx5->dev, + sparx5->fdma_irq, + NULL, + sparx5_fdma_handler, + IRQF_ONESHOT, + "sparx5-fdma", sparx5); + if (!err) + err = sparx5_fdma_start(sparx5); + if (err) + sparx5->fdma_irq = -ENXIO; + } else { + sparx5->fdma_irq = -ENXIO; + } if (err && sparx5->xtr_irq >= 0) { err = devm_request_irq(sparx5->dev, sparx5->xtr_irq, sparx5_xtr_handler, IRQF_SHARED, @@ -766,6 +781,7 @@ static int mchp_sparx5_probe(struct platform_device *pdev) sparx5->base_mac[5] = 0; } + sparx5->fdma_irq = platform_get_irq_byname(sparx5->pdev, "fdma"); sparx5->xtr_irq = platform_get_irq_byname(sparx5->pdev, "xtr"); /* Read chip ID to check CPU interface */ @@ -824,6 +840,11 @@ static int mchp_sparx5_remove(struct platform_device *pdev) disable_irq(sparx5->xtr_irq); sparx5->xtr_irq = -ENXIO; } + if (sparx5->fdma_irq) { + disable_irq(sparx5->fdma_irq); + sparx5->fdma_irq = -ENXIO; + } + sparx5_fdma_stop(sparx5); sparx5_cleanup_ports(sparx5); /* Unregister netdevs */ sparx5_unregister_notifier_blocks(sparx5); diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h index 4d5f44c3a421..a1acc9b461f2 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.h +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.h @@ -73,8 +73,61 @@ enum sparx5_vlan_port_type { #define XTR_QUEUE 0 #define INJ_QUEUE 0 +#define FDMA_DCB_MAX 64 +#define FDMA_RX_DCB_MAX_DBS 15 +#define FDMA_TX_DCB_MAX_DBS 1 + struct sparx5; +struct sparx5_db_hw { + u64 dataptr; + u64 status; +}; + +struct sparx5_rx_dcb_hw { + u64 nextptr; + u64 info; + struct sparx5_db_hw db[FDMA_RX_DCB_MAX_DBS]; +}; + +struct sparx5_tx_dcb_hw { + u64 nextptr; + u64 info; + struct sparx5_db_hw db[FDMA_TX_DCB_MAX_DBS]; +}; + +/* Frame DMA receive state: + * For each DB, there is a SKB, and the skb data pointer is mapped in + * the DB. Once a frame is received the skb is given to the upper layers + * and a new skb is added to the dcb. + * When the db_index reached FDMA_RX_DCB_MAX_DBS the DB is reused. + */ +struct sparx5_rx { + struct sparx5_rx_dcb_hw *dcb_entries; + struct sparx5_rx_dcb_hw *last_entry; + struct sk_buff *skb[FDMA_DCB_MAX][FDMA_RX_DCB_MAX_DBS]; + int db_index; + int dcb_index; + dma_addr_t dma; + struct napi_struct napi; + u32 channel_id; + struct net_device *ndev; + u64 packets; +}; + +/* Frame DMA transmit state: + * DCBs are chained using the DCBs nextptr field. + */ +struct sparx5_tx { + struct sparx5_tx_dcb_hw *curr_entry; + struct sparx5_tx_dcb_hw *first_entry; + struct list_head db_list; + dma_addr_t dma; + u32 channel_id; + u64 packets; + u64 dropped; +}; + struct sparx5_port_config { phy_interface_t portmode; u32 bandwidth; @@ -167,6 +220,10 @@ struct sparx5 { bool sd_sgpio_remapping; /* Register based inj/xtr */ int xtr_irq; + /* Frame DMA */ + int fdma_irq; + struct sparx5_rx rx; + struct sparx5_tx tx; }; /* sparx5_switchdev.c */ @@ -174,11 +231,23 @@ int sparx5_register_notifier_blocks(struct sparx5 *sparx5); void sparx5_unregister_notifier_blocks(struct sparx5 *sparx5); /* sparx5_packet.c */ +struct frame_info { + int src_port; +}; + +void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp); +void sparx5_ifh_parse(u32 *ifh, struct frame_info *info); irqreturn_t sparx5_xtr_handler(int irq, void *_priv); int sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev); int sparx5_manual_injection_mode(struct sparx5 *sparx5); void sparx5_port_inj_timer_setup(struct sparx5_port *port); +/* sparx5_fdma.c */ +int sparx5_fdma_start(struct sparx5 *sparx5); +int sparx5_fdma_stop(struct sparx5 *sparx5); +int sparx5_fdma_xmit(struct sparx5 *sparx5, u32 *ifh, struct sk_buff *skb); +irqreturn_t sparx5_fdma_handler(int irq, void *args); + /* sparx5_mactable.c */ void sparx5_mact_pull_work(struct work_struct *work); int sparx5_mact_learn(struct sparx5 *sparx5, int port, diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c index 09ca7a3bafdc..dc7e5ea6ec15 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_packet.c @@ -20,11 +20,7 @@ #define INJ_TIMEOUT_NS 50000 -struct frame_info { - int src_port; -}; - -static void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp) +void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp) { /* Start flush */ spx5_wr(QS_XTR_FLUSH_FLUSH_SET(BIT(grp)), sparx5, QS_XTR_FLUSH); @@ -36,7 +32,7 @@ static void sparx5_xtr_flush(struct sparx5 *sparx5, u8 grp) spx5_wr(0, sparx5, QS_XTR_FLUSH); } -static void sparx5_ifh_parse(u32 *ifh, struct frame_info *info) +void sparx5_ifh_parse(u32 *ifh, struct frame_info *info) { u8 *xtr_hdr = (u8 *)ifh; @@ -224,7 +220,10 @@ int sparx5_port_xmit_impl(struct sk_buff *skb, struct net_device *dev) struct sparx5 *sparx5 = port->sparx5; int ret; - ret = sparx5_inject(sparx5, port->ifh, skb, dev); + if (sparx5->fdma_irq > 0) + ret = sparx5_fdma_xmit(sparx5, port->ifh, skb); + else + ret = sparx5_inject(sparx5, port->ifh, skb, dev); if (ret == NETDEV_TX_OK) { stats->tx_bytes += skb->len; diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c index d2e3250928bf..189a6a0a2e08 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.c @@ -596,7 +596,7 @@ static int sparx5_port_max_tags_set(struct sparx5 *sparx5, return 0; } -static int sparx5_port_fwd_urg(struct sparx5 *sparx5, u32 speed) +int sparx5_port_fwd_urg(struct sparx5 *sparx5, u32 speed) { u32 clk_period_ps = 1600; /* 625Mhz for now */ u32 urg = 672000; diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_port.h b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h index fd05ab6436d1..2f8043eac71b 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_port.h +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_port.h @@ -89,5 +89,6 @@ int sparx5_get_port_status(struct sparx5 *sparx5, struct sparx5_port_status *status); void sparx5_port_enable(struct sparx5_port *port, bool enable); +int sparx5_port_fwd_urg(struct sparx5 *sparx5, u32 speed); #endif /* __SPARX5_PORT_H__ */ diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c index a72e3b3b596e..649ca609884a 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_switchdev.c @@ -93,9 +93,12 @@ static int sparx5_port_attr_set(struct net_device *dev, const void *ctx, } static int sparx5_port_bridge_join(struct sparx5_port *port, - struct net_device *bridge) + struct net_device *bridge, + struct netlink_ext_ack *extack) { struct sparx5 *sparx5 = port->sparx5; + struct net_device *ndev = port->ndev; + int err; if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS)) /* First bridged port */ @@ -109,12 +112,21 @@ static int sparx5_port_bridge_join(struct sparx5_port *port, set_bit(port->portno, sparx5->bridge_mask); + err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL, + false, extack); + if (err) + goto err_switchdev_offload; + /* Port enters in bridge mode therefor don't need to copy to CPU * frames for multicast in case the bridge is not requesting them */ - __dev_mc_unsync(port->ndev, sparx5_mc_unsync); + __dev_mc_unsync(ndev, sparx5_mc_unsync); return 0; + +err_switchdev_offload: + clear_bit(port->portno, sparx5->bridge_mask); + return err; } static void sparx5_port_bridge_leave(struct sparx5_port *port, @@ -122,6 +134,8 @@ static void sparx5_port_bridge_leave(struct sparx5_port *port, { struct sparx5 *sparx5 = port->sparx5; + switchdev_bridge_port_unoffload(port->ndev, NULL, NULL, NULL); + clear_bit(port->portno, sparx5->bridge_mask); if (bitmap_empty(sparx5->bridge_mask, SPX5_PORTS)) sparx5->hw_bridge_dev = NULL; @@ -139,11 +153,15 @@ static int sparx5_port_changeupper(struct net_device *dev, struct netdev_notifier_changeupper_info *info) { struct sparx5_port *port = netdev_priv(dev); + struct netlink_ext_ack *extack; int err = 0; + extack = netdev_notifier_info_to_extack(&info->info); + if (netif_is_bridge_master(info->upper_dev)) { if (info->linking) - err = sparx5_port_bridge_join(port, info->upper_dev); + err = sparx5_port_bridge_join(port, info->upper_dev, + extack); else sparx5_port_bridge_leave(port, info->upper_dev); diff --git a/drivers/net/ethernet/microsoft/mana/gdma.h b/drivers/net/ethernet/microsoft/mana/gdma.h index 33e53d32e891..41ecd156e95f 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma.h +++ b/drivers/net/ethernet/microsoft/mana/gdma.h @@ -239,10 +239,8 @@ struct gdma_event { struct gdma_queue; -#define CQE_POLLING_BUFFER 512 struct mana_eq { struct gdma_queue *eq; - struct gdma_comp cqe_poll[CQE_POLLING_BUFFER]; }; typedef void gdma_eq_callback(void *context, struct gdma_queue *q, @@ -291,11 +289,6 @@ struct gdma_queue { unsigned int msix_index; u32 log2_throttle_limit; - - /* NAPI data */ - struct napi_struct napi; - int work_done; - int budget; } eq; struct { @@ -319,9 +312,6 @@ struct gdma_queue_spec { void *context; unsigned long log2_throttle_limit; - - /* Only used by the MANA device. */ - struct net_device *ndev; } eq; struct { @@ -406,7 +396,7 @@ void mana_gd_destroy_queue(struct gdma_context *gc, struct gdma_queue *queue); int mana_gd_poll_cq(struct gdma_queue *cq, struct gdma_comp *comp, int num_cqe); -void mana_gd_arm_cq(struct gdma_queue *cq); +void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit); struct gdma_wqe { u32 reserved :24; @@ -496,16 +486,28 @@ enum { GDMA_PROTOCOL_LAST = GDMA_PROTOCOL_V1, }; +#define GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT BIT(0) + +#define GDMA_DRV_CAP_FLAGS1 GDMA_DRV_CAP_FLAG_1_EQ_SHARING_MULTI_VPORT + +#define GDMA_DRV_CAP_FLAGS2 0 + +#define GDMA_DRV_CAP_FLAGS3 0 + +#define GDMA_DRV_CAP_FLAGS4 0 + struct gdma_verify_ver_req { struct gdma_req_hdr hdr; /* Mandatory fields required for protocol establishment */ u64 protocol_ver_min; u64 protocol_ver_max; - u64 drv_cap_flags1; - u64 drv_cap_flags2; - u64 drv_cap_flags3; - u64 drv_cap_flags4; + + /* Gdma Driver Capability Flags */ + u64 gd_drv_cap_flags1; + u64 gd_drv_cap_flags2; + u64 gd_drv_cap_flags3; + u64 gd_drv_cap_flags4; /* Advisory fields */ u64 drv_ver; diff --git a/drivers/net/ethernet/microsoft/mana/gdma_main.c b/drivers/net/ethernet/microsoft/mana/gdma_main.c index 2f87bf90f8ec..cee75b561f59 100644 --- a/drivers/net/ethernet/microsoft/mana/gdma_main.c +++ b/drivers/net/ethernet/microsoft/mana/gdma_main.c @@ -67,6 +67,10 @@ static int mana_gd_query_max_resources(struct pci_dev *pdev) if (gc->max_num_queues > resp.max_rq) gc->max_num_queues = resp.max_rq; + /* The Hardware Channel (HWC) used 1 MSI-X */ + if (gc->max_num_queues > gc->num_msix_usable - 1) + gc->max_num_queues = gc->num_msix_usable - 1; + return 0; } @@ -267,7 +271,7 @@ void mana_gd_wq_ring_doorbell(struct gdma_context *gc, struct gdma_queue *queue) queue->id, queue->head * GDMA_WQE_BU_SIZE, 1); } -void mana_gd_arm_cq(struct gdma_queue *cq) +void mana_gd_ring_cq(struct gdma_queue *cq, u8 arm_bit) { struct gdma_context *gc = cq->gdma_dev->gdma_context; @@ -276,7 +280,7 @@ void mana_gd_arm_cq(struct gdma_queue *cq) u32 head = cq->head % (num_cqe << GDMA_CQE_OWNER_BITS); mana_gd_ring_doorbell(gc, cq->gdma_dev->doorbell, cq->type, cq->id, - head, SET_ARM_BIT); + head, arm_bit); } static void mana_gd_process_eqe(struct gdma_queue *eq) @@ -339,7 +343,6 @@ static void mana_gd_process_eq_events(void *arg) struct gdma_queue *eq = arg; struct gdma_context *gc; struct gdma_eqe *eqe; - unsigned int arm_bit; u32 head, num_eqe; int i; @@ -370,92 +373,54 @@ static void mana_gd_process_eq_events(void *arg) eq->head++; } - /* Always rearm the EQ for HWC. For MANA, rearm it when NAPI is done. */ - if (mana_gd_is_hwc(eq->gdma_dev)) { - arm_bit = SET_ARM_BIT; - } else if (eq->eq.work_done < eq->eq.budget && - napi_complete_done(&eq->eq.napi, eq->eq.work_done)) { - arm_bit = SET_ARM_BIT; - } else { - arm_bit = 0; - } - head = eq->head % (num_eqe << GDMA_EQE_OWNER_BITS); mana_gd_ring_doorbell(gc, eq->gdma_dev->doorbell, eq->type, eq->id, - head, arm_bit); -} - -static int mana_poll(struct napi_struct *napi, int budget) -{ - struct gdma_queue *eq = container_of(napi, struct gdma_queue, eq.napi); - - eq->eq.work_done = 0; - eq->eq.budget = budget; - - mana_gd_process_eq_events(eq); - - return min(eq->eq.work_done, budget); -} - -static void mana_gd_schedule_napi(void *arg) -{ - struct gdma_queue *eq = arg; - struct napi_struct *napi; - - napi = &eq->eq.napi; - napi_schedule_irqoff(napi); + head, SET_ARM_BIT); } static int mana_gd_register_irq(struct gdma_queue *queue, const struct gdma_queue_spec *spec) { struct gdma_dev *gd = queue->gdma_dev; - bool is_mana = mana_gd_is_mana(gd); struct gdma_irq_context *gic; struct gdma_context *gc; struct gdma_resource *r; unsigned int msi_index; unsigned long flags; - int err; + struct device *dev; + int err = 0; gc = gd->gdma_context; r = &gc->msix_resource; + dev = gc->dev; spin_lock_irqsave(&r->lock, flags); msi_index = find_first_zero_bit(r->map, r->size); - if (msi_index >= r->size) { + if (msi_index >= r->size || msi_index >= gc->num_msix_usable) { err = -ENOSPC; } else { bitmap_set(r->map, msi_index, 1); queue->eq.msix_index = msi_index; - err = 0; } spin_unlock_irqrestore(&r->lock, flags); - if (err) - return err; + if (err) { + dev_err(dev, "Register IRQ err:%d, msi:%u rsize:%u, nMSI:%u", + err, msi_index, r->size, gc->num_msix_usable); - WARN_ON(msi_index >= gc->num_msix_usable); + return err; + } gic = &gc->irq_contexts[msi_index]; - if (is_mana) { - netif_napi_add(spec->eq.ndev, &queue->eq.napi, mana_poll, - NAPI_POLL_WEIGHT); - napi_enable(&queue->eq.napi); - } - WARN_ON(gic->handler || gic->arg); gic->arg = queue; - if (is_mana) - gic->handler = mana_gd_schedule_napi; - else - gic->handler = mana_gd_process_eq_events; + gic->handler = mana_gd_process_eq_events; return 0; } @@ -549,11 +514,6 @@ static void mana_gd_destroy_eq(struct gdma_context *gc, bool flush_evenets, mana_gd_deregiser_irq(queue); - if (mana_gd_is_mana(queue->gdma_dev)) { - napi_disable(&queue->eq.napi); - netif_napi_del(&queue->eq.napi); - } - if (queue->eq.disable_needed) mana_gd_disable_queue(queue); } @@ -883,6 +843,11 @@ int mana_gd_verify_vf_version(struct pci_dev *pdev) req.protocol_ver_min = GDMA_PROTOCOL_FIRST; req.protocol_ver_max = GDMA_PROTOCOL_LAST; + req.gd_drv_cap_flags1 = GDMA_DRV_CAP_FLAGS1; + req.gd_drv_cap_flags2 = GDMA_DRV_CAP_FLAGS2; + req.gd_drv_cap_flags3 = GDMA_DRV_CAP_FLAGS3; + req.gd_drv_cap_flags4 = GDMA_DRV_CAP_FLAGS4; + err = mana_gd_send_request(gc, sizeof(req), &req, sizeof(resp), &resp); if (err || resp.hdr.status) { dev_err(gc->dev, "VfVerifyVersionOutput: %d, status=0x%x\n", @@ -1128,7 +1093,7 @@ static int mana_gd_read_cqe(struct gdma_queue *cq, struct gdma_comp *comp) new_bits = (cq->head / num_cqe) & GDMA_CQE_OWNER_MASK; /* Return -1 if overflow detected. */ - if (owner_bits != new_bits) + if (WARN_ON_ONCE(owner_bits != new_bits)) return -1; comp->wq_num = cqe->cqe_info.wq_num; @@ -1201,10 +1166,8 @@ static int mana_gd_setup_irqs(struct pci_dev *pdev) if (max_queues_per_port > MANA_MAX_NUM_QUEUES) max_queues_per_port = MANA_MAX_NUM_QUEUES; - max_irqs = max_queues_per_port * MAX_PORTS_IN_MANA_DEV; - /* Need 1 interrupt for the Hardware communication Channel (HWC) */ - max_irqs++; + max_irqs = max_queues_per_port + 1; nvec = pci_alloc_irq_vectors(pdev, 2, max_irqs, PCI_IRQ_MSIX); if (nvec < 0) @@ -1291,6 +1254,9 @@ static int mana_gd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) int bar = 0; int err; + /* Each port has 2 CQs, each CQ has at most 1 EQE at a time */ + BUILD_BUG_ON(2 * MAX_PORTS_IN_MANA_DEV * GDMA_EQE_SIZE > EQ_SIZE); + err = pci_enable_device(pdev); if (err) return -ENXIO; diff --git a/drivers/net/ethernet/microsoft/mana/hw_channel.c b/drivers/net/ethernet/microsoft/mana/hw_channel.c index 1a923fd99990..c1310ea1c216 100644 --- a/drivers/net/ethernet/microsoft/mana/hw_channel.c +++ b/drivers/net/ethernet/microsoft/mana/hw_channel.c @@ -304,7 +304,7 @@ static void mana_hwc_comp_event(void *ctx, struct gdma_queue *q_self) &comp_data); } - mana_gd_arm_cq(q_self); + mana_gd_ring_cq(q_self, SET_ARM_BIT); } static void mana_hwc_destroy_cq(struct gdma_context *gc, struct hwc_cq *hwc_cq) diff --git a/drivers/net/ethernet/microsoft/mana/mana.h b/drivers/net/ethernet/microsoft/mana/mana.h index a2c3f826f022..fc98a5ba5ed0 100644 --- a/drivers/net/ethernet/microsoft/mana/mana.h +++ b/drivers/net/ethernet/microsoft/mana/mana.h @@ -46,7 +46,7 @@ enum TRI_STATE { #define EQ_SIZE (8 * PAGE_SIZE) #define LOG2_EQ_THROTTLE 3 -#define MAX_PORTS_IN_MANA_DEV 16 +#define MAX_PORTS_IN_MANA_DEV 256 struct mana_stats { u64 packets; @@ -225,6 +225,8 @@ struct mana_tx_comp_oob { struct mana_rxq; +#define CQE_POLLING_BUFFER 512 + struct mana_cq { struct gdma_queue *gdma_cq; @@ -244,8 +246,13 @@ struct mana_cq { */ struct mana_txq *txq; - /* Pointer to a buffer which the CQ handler can copy the CQE's into. */ - struct gdma_comp *gdma_comp_buf; + /* Buffer which the CQ handler can copy the CQE's into. */ + struct gdma_comp gdma_comp_buf[CQE_POLLING_BUFFER]; + + /* NAPI data */ + struct napi_struct napi; + int work_done; + int budget; }; #define GDMA_MAX_RQE_SGES 15 @@ -315,6 +322,8 @@ struct mana_context { u16 num_ports; + struct mana_eq *eqs; + struct net_device *ports[MAX_PORTS_IN_MANA_DEV]; }; @@ -324,8 +333,6 @@ struct mana_port_context { u8 mac_addr[ETH_ALEN]; - struct mana_eq *eqs; - enum TRI_STATE rss_state; mana_handle_t default_rxobj; @@ -395,11 +402,11 @@ enum mana_command_code { struct mana_query_device_cfg_req { struct gdma_req_hdr hdr; - /* Driver Capability flags */ - u64 drv_cap_flags1; - u64 drv_cap_flags2; - u64 drv_cap_flags3; - u64 drv_cap_flags4; + /* MANA Nic Driver Capability flags */ + u64 mn_drv_cap_flags1; + u64 mn_drv_cap_flags2; + u64 mn_drv_cap_flags3; + u64 mn_drv_cap_flags4; u32 proto_major_ver; u32 proto_minor_ver; @@ -516,7 +523,7 @@ struct mana_cfg_rx_steer_resp { struct gdma_resp_hdr hdr; }; /* HW DATA */ -#define MANA_MAX_NUM_QUEUES 16 +#define MANA_MAX_NUM_QUEUES 64 #define MANA_SHORT_VPORT_OFFSET_MAX ((1U << 8) - 1) diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index fff78900fc8a..1b21030308e5 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -696,66 +696,56 @@ static void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type, resp.hdr.status); } -static void mana_init_cqe_poll_buf(struct gdma_comp *cqe_poll_buf) -{ - int i; - - for (i = 0; i < CQE_POLLING_BUFFER; i++) - memset(&cqe_poll_buf[i], 0, sizeof(struct gdma_comp)); -} - -static void mana_destroy_eq(struct gdma_context *gc, - struct mana_port_context *apc) +static void mana_destroy_eq(struct mana_context *ac) { + struct gdma_context *gc = ac->gdma_dev->gdma_context; struct gdma_queue *eq; int i; - if (!apc->eqs) + if (!ac->eqs) return; - for (i = 0; i < apc->num_queues; i++) { - eq = apc->eqs[i].eq; + for (i = 0; i < gc->max_num_queues; i++) { + eq = ac->eqs[i].eq; if (!eq) continue; mana_gd_destroy_queue(gc, eq); } - kfree(apc->eqs); - apc->eqs = NULL; + kfree(ac->eqs); + ac->eqs = NULL; } -static int mana_create_eq(struct mana_port_context *apc) +static int mana_create_eq(struct mana_context *ac) { - struct gdma_dev *gd = apc->ac->gdma_dev; + struct gdma_dev *gd = ac->gdma_dev; + struct gdma_context *gc = gd->gdma_context; struct gdma_queue_spec spec = {}; int err; int i; - apc->eqs = kcalloc(apc->num_queues, sizeof(struct mana_eq), - GFP_KERNEL); - if (!apc->eqs) + ac->eqs = kcalloc(gc->max_num_queues, sizeof(struct mana_eq), + GFP_KERNEL); + if (!ac->eqs) return -ENOMEM; spec.type = GDMA_EQ; spec.monitor_avl_buf = false; spec.queue_size = EQ_SIZE; spec.eq.callback = NULL; - spec.eq.context = apc->eqs; + spec.eq.context = ac->eqs; spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE; - spec.eq.ndev = apc->ndev; - - for (i = 0; i < apc->num_queues; i++) { - mana_init_cqe_poll_buf(apc->eqs[i].cqe_poll); - err = mana_gd_create_mana_eq(gd, &spec, &apc->eqs[i].eq); + for (i = 0; i < gc->max_num_queues; i++) { + err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq); if (err) goto out; } return 0; out: - mana_destroy_eq(gd->gdma_context, apc); + mana_destroy_eq(ac); return err; } @@ -790,7 +780,6 @@ static void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc) static void mana_poll_tx_cq(struct mana_cq *cq) { - struct gdma_queue *gdma_eq = cq->gdma_cq->cq.parent; struct gdma_comp *completions = cq->gdma_comp_buf; struct gdma_posted_wqe_info *wqe_info; unsigned int pkt_transmitted = 0; @@ -812,6 +801,9 @@ static void mana_poll_tx_cq(struct mana_cq *cq) comp_read = mana_gd_poll_cq(cq->gdma_cq, completions, CQE_POLLING_BUFFER); + if (comp_read < 1) + return; + for (i = 0; i < comp_read; i++) { struct mana_tx_comp_oob *cqe_oob; @@ -861,7 +853,7 @@ static void mana_poll_tx_cq(struct mana_cq *cq) mana_unmap_skb(skb, apc); - napi_consume_skb(skb, gdma_eq->eq.budget); + napi_consume_skb(skb, cq->budget); pkt_transmitted++; } @@ -890,6 +882,8 @@ static void mana_poll_tx_cq(struct mana_cq *cq) if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0) WARN_ON_ONCE(1); + + cq->work_done = pkt_transmitted; } static void mana_post_pkt_rxq(struct mana_rxq *rxq) @@ -918,17 +912,13 @@ static void mana_rx_skb(void *buf_va, struct mana_rxcomp_oob *cqe, struct mana_stats *rx_stats = &rxq->stats; struct net_device *ndev = rxq->ndev; uint pkt_len = cqe->ppi[0].pkt_len; - struct mana_port_context *apc; u16 rxq_idx = rxq->rxq_idx; struct napi_struct *napi; - struct gdma_queue *eq; struct sk_buff *skb; u32 hash_value; - apc = netdev_priv(ndev); - eq = apc->eqs[rxq_idx].eq; - eq->eq.work_done++; - napi = &eq->eq.napi; + rxq->rx_cq.work_done++; + napi = &rxq->rx_cq.napi; if (!buf_va) { ++ndev->stats.rx_dropped; @@ -1081,6 +1071,7 @@ static void mana_poll_rx_cq(struct mana_cq *cq) static void mana_cq_handler(void *context, struct gdma_queue *gdma_queue) { struct mana_cq *cq = context; + u8 arm_bit; WARN_ON_ONCE(cq->gdma_cq != gdma_queue); @@ -1089,7 +1080,33 @@ static void mana_cq_handler(void *context, struct gdma_queue *gdma_queue) else mana_poll_tx_cq(cq); - mana_gd_arm_cq(gdma_queue); + if (cq->work_done < cq->budget && + napi_complete_done(&cq->napi, cq->work_done)) { + arm_bit = SET_ARM_BIT; + } else { + arm_bit = 0; + } + + mana_gd_ring_cq(gdma_queue, arm_bit); +} + +static int mana_poll(struct napi_struct *napi, int budget) +{ + struct mana_cq *cq = container_of(napi, struct mana_cq, napi); + + cq->work_done = 0; + cq->budget = budget; + + mana_cq_handler(cq, cq->gdma_cq); + + return min(cq->work_done, budget); +} + +static void mana_schedule_napi(void *context, struct gdma_queue *gdma_queue) +{ + struct mana_cq *cq = context; + + napi_schedule_irqoff(&cq->napi); } static void mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq) @@ -1114,12 +1131,18 @@ static void mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq) static void mana_destroy_txq(struct mana_port_context *apc) { + struct napi_struct *napi; int i; if (!apc->tx_qp) return; for (i = 0; i < apc->num_queues; i++) { + napi = &apc->tx_qp[i].tx_cq.napi; + napi_synchronize(napi); + napi_disable(napi); + netif_napi_del(napi); + mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object); mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq); @@ -1134,7 +1157,8 @@ static void mana_destroy_txq(struct mana_port_context *apc) static int mana_create_txq(struct mana_port_context *apc, struct net_device *net) { - struct gdma_dev *gd = apc->ac->gdma_dev; + struct mana_context *ac = apc->ac; + struct gdma_dev *gd = ac->gdma_dev; struct mana_obj_spec wq_spec; struct mana_obj_spec cq_spec; struct gdma_queue_spec spec; @@ -1186,7 +1210,6 @@ static int mana_create_txq(struct mana_port_context *apc, /* Create SQ's CQ */ cq = &apc->tx_qp[i].tx_cq; - cq->gdma_comp_buf = apc->eqs[i].cqe_poll; cq->type = MANA_CQ_TYPE_TX; cq->txq = txq; @@ -1195,8 +1218,8 @@ static int mana_create_txq(struct mana_port_context *apc, spec.type = GDMA_CQ; spec.monitor_avl_buf = false; spec.queue_size = cq_size; - spec.cq.callback = mana_cq_handler; - spec.cq.parent_eq = apc->eqs[i].eq; + spec.cq.callback = mana_schedule_napi; + spec.cq.parent_eq = ac->eqs[i].eq; spec.cq.context = cq; err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq); if (err) @@ -1237,7 +1260,10 @@ static int mana_create_txq(struct mana_port_context *apc, gc->cq_table[cq->gdma_id] = cq->gdma_cq; - mana_gd_arm_cq(cq->gdma_cq); + netif_tx_napi_add(net, &cq->napi, mana_poll, NAPI_POLL_WEIGHT); + napi_enable(&cq->napi); + + mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT); } return 0; @@ -1246,21 +1272,6 @@ out: return err; } -static void mana_napi_sync_for_rx(struct mana_rxq *rxq) -{ - struct net_device *ndev = rxq->ndev; - struct mana_port_context *apc; - u16 rxq_idx = rxq->rxq_idx; - struct napi_struct *napi; - struct gdma_queue *eq; - - apc = netdev_priv(ndev); - eq = apc->eqs[rxq_idx].eq; - napi = &eq->eq.napi; - - napi_synchronize(napi); -} - static void mana_destroy_rxq(struct mana_port_context *apc, struct mana_rxq *rxq, bool validate_state) @@ -1268,13 +1279,19 @@ static void mana_destroy_rxq(struct mana_port_context *apc, struct gdma_context *gc = apc->ac->gdma_dev->gdma_context; struct mana_recv_buf_oob *rx_oob; struct device *dev = gc->dev; + struct napi_struct *napi; int i; if (!rxq) return; + napi = &rxq->rx_cq.napi; + if (validate_state) - mana_napi_sync_for_rx(rxq); + napi_synchronize(napi); + + napi_disable(napi); + netif_napi_del(napi); mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj); @@ -1418,7 +1435,6 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, /* Create RQ's CQ */ cq = &rxq->rx_cq; - cq->gdma_comp_buf = eq->cqe_poll; cq->type = MANA_CQ_TYPE_RX; cq->rxq = rxq; @@ -1426,7 +1442,7 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, spec.type = GDMA_CQ; spec.monitor_avl_buf = false; spec.queue_size = cq_size; - spec.cq.callback = mana_cq_handler; + spec.cq.callback = mana_schedule_napi; spec.cq.parent_eq = eq->eq; spec.cq.context = cq; err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq); @@ -1466,7 +1482,10 @@ static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc, gc->cq_table[cq->gdma_id] = cq->gdma_cq; - mana_gd_arm_cq(cq->gdma_cq); + netif_napi_add(ndev, &cq->napi, mana_poll, 1); + napi_enable(&cq->napi); + + mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT); out: if (!err) return rxq; @@ -1484,12 +1503,13 @@ out: static int mana_add_rx_queues(struct mana_port_context *apc, struct net_device *ndev) { + struct mana_context *ac = apc->ac; struct mana_rxq *rxq; int err = 0; int i; for (i = 0; i < apc->num_queues; i++) { - rxq = mana_create_rxq(apc, i, &apc->eqs[i], ndev); + rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev); if (!rxq) { err = -ENOMEM; goto out; @@ -1601,16 +1621,11 @@ reset_apc: int mana_alloc_queues(struct net_device *ndev) { struct mana_port_context *apc = netdev_priv(ndev); - struct gdma_dev *gd = apc->ac->gdma_dev; int err; - err = mana_create_eq(apc); - if (err) - return err; - err = mana_create_vport(apc, ndev); if (err) - goto destroy_eq; + return err; err = netif_set_real_num_tx_queues(ndev, apc->num_queues); if (err) @@ -1636,8 +1651,6 @@ int mana_alloc_queues(struct net_device *ndev) destroy_vport: mana_destroy_vport(apc); -destroy_eq: - mana_destroy_eq(gd->gdma_context, apc); return err; } @@ -1714,8 +1727,6 @@ static int mana_dealloc_queues(struct net_device *ndev) mana_destroy_vport(apc); - mana_destroy_eq(apc->ac->gdma_dev->gdma_context, apc); - return 0; } @@ -1768,7 +1779,7 @@ static int mana_probe_port(struct mana_context *ac, int port_idx, apc->ac = ac; apc->ndev = ndev; apc->max_queues = gc->max_num_queues; - apc->num_queues = min_t(uint, gc->max_num_queues, MANA_MAX_NUM_QUEUES); + apc->num_queues = gc->max_num_queues; apc->port_handle = INVALID_MANA_HANDLE; apc->port_idx = port_idx; @@ -1839,6 +1850,10 @@ int mana_probe(struct gdma_dev *gd) ac->num_ports = 1; gd->driver_data = ac; + err = mana_create_eq(ac); + if (err) + goto out; + err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION, &ac->num_ports); if (err) @@ -1888,6 +1903,9 @@ void mana_remove(struct gdma_dev *gd) free_netdev(ndev); } + + mana_destroy_eq(ac); + out: mana_gd_deregister_device(gd); gd->driver_data = NULL; diff --git a/drivers/net/ethernet/mscc/Kconfig b/drivers/net/ethernet/mscc/Kconfig index 2d3157e4d081..b6a73d151dec 100644 --- a/drivers/net/ethernet/mscc/Kconfig +++ b/drivers/net/ethernet/mscc/Kconfig @@ -16,7 +16,7 @@ config MSCC_OCELOT_SWITCH_LIB select NET_DEVLINK select REGMAP_MMIO select PACKING - select PHYLIB + select PHYLINK tristate help This is a hardware support library for Ocelot network switches. It is @@ -24,6 +24,7 @@ config MSCC_OCELOT_SWITCH_LIB config MSCC_OCELOT_SWITCH tristate "Ocelot switch driver" + depends on PTP_1588_CLOCK_OPTIONAL depends on BRIDGE || BRIDGE=n depends on NET_SWITCHDEV depends on HAS_IOMEM diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 2948d731a1c1..c581b955efb3 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -222,8 +222,35 @@ static void ocelot_port_set_pvid(struct ocelot *ocelot, int port, ANA_PORT_DROP_CFG, port); } +static int ocelot_vlan_member_set(struct ocelot *ocelot, u32 vlan_mask, u16 vid) +{ + int err; + + err = ocelot_vlant_set_mask(ocelot, vid, vlan_mask); + if (err) + return err; + + ocelot->vlan_mask[vid] = vlan_mask; + + return 0; +} + +static int ocelot_vlan_member_add(struct ocelot *ocelot, int port, u16 vid) +{ + return ocelot_vlan_member_set(ocelot, + ocelot->vlan_mask[vid] | BIT(port), + vid); +} + +static int ocelot_vlan_member_del(struct ocelot *ocelot, int port, u16 vid) +{ + return ocelot_vlan_member_set(ocelot, + ocelot->vlan_mask[vid] & ~BIT(port), + vid); +} + int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, - bool vlan_aware) + bool vlan_aware, struct netlink_ext_ack *extack) { struct ocelot_vcap_block *block = &ocelot->block[VCAP_IS1]; struct ocelot_port *ocelot_port = ocelot->ports[port]; @@ -233,8 +260,8 @@ int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, list_for_each_entry(filter, &block->rules, list) { if (filter->ingress_port_mask & BIT(port) && filter->action.vid_replace_ena) { - dev_err(ocelot->dev, - "Cannot change VLAN state with vlan modify rules active\n"); + NL_SET_ERR_MSG_MOD(extack, + "Cannot change VLAN state with vlan modify rules active"); return -EBUSY; } } @@ -259,16 +286,15 @@ int ocelot_port_vlan_filtering(struct ocelot *ocelot, int port, EXPORT_SYMBOL(ocelot_port_vlan_filtering); int ocelot_vlan_prepare(struct ocelot *ocelot, int port, u16 vid, bool pvid, - bool untagged) + bool untagged, struct netlink_ext_ack *extack) { struct ocelot_port *ocelot_port = ocelot->ports[port]; /* Deny changing the native VLAN, but always permit deleting it */ if (untagged && ocelot_port->native_vlan.vid != vid && ocelot_port->native_vlan.valid) { - dev_err(ocelot->dev, - "Port already has a native VLAN: %d\n", - ocelot_port->native_vlan.vid); + NL_SET_ERR_MSG_MOD(extack, + "Port already has a native VLAN"); return -EBUSY; } @@ -279,13 +305,11 @@ EXPORT_SYMBOL(ocelot_vlan_prepare); int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid, bool untagged) { - int ret; + int err; - /* Make the port a member of the VLAN */ - ocelot->vlan_mask[vid] |= BIT(port); - ret = ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]); - if (ret) - return ret; + err = ocelot_vlan_member_add(ocelot, port, vid); + if (err) + return err; /* Default ingress vlan classification */ if (pvid) { @@ -312,13 +336,11 @@ EXPORT_SYMBOL(ocelot_vlan_add); int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid) { struct ocelot_port *ocelot_port = ocelot->ports[port]; - int ret; + int err; - /* Stop the port from being a member of the vlan */ - ocelot->vlan_mask[vid] &= ~BIT(port); - ret = ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]); - if (ret) - return ret; + err = ocelot_vlan_member_del(ocelot, port, vid); + if (err) + return err; /* Ingress */ if (ocelot_port->pvid_vlan.vid == vid) { @@ -340,6 +362,7 @@ EXPORT_SYMBOL(ocelot_vlan_del); static void ocelot_vlan_init(struct ocelot *ocelot) { + unsigned long all_ports = GENMASK(ocelot->num_phys_ports - 1, 0); u16 port, vid; /* Clear VLAN table, by default all ports are members of all VLANs */ @@ -348,23 +371,19 @@ static void ocelot_vlan_init(struct ocelot *ocelot) ocelot_vlant_wait_for_completion(ocelot); /* Configure the port VLAN memberships */ - for (vid = 1; vid < VLAN_N_VID; vid++) { - ocelot->vlan_mask[vid] = 0; - ocelot_vlant_set_mask(ocelot, vid, ocelot->vlan_mask[vid]); - } + for (vid = 1; vid < VLAN_N_VID; vid++) + ocelot_vlan_member_set(ocelot, 0, vid); /* Because VLAN filtering is enabled, we need VID 0 to get untagged * traffic. It is added automatically if 8021q module is loaded, but * we can't rely on it since module may be not loaded. */ - ocelot->vlan_mask[0] = GENMASK(ocelot->num_phys_ports - 1, 0); - ocelot_vlant_set_mask(ocelot, 0, ocelot->vlan_mask[0]); + ocelot_vlan_member_set(ocelot, all_ports, 0); /* Set vlan ingress filter mask to all ports but the CPU port by * default. */ - ocelot_write(ocelot, GENMASK(ocelot->num_phys_ports - 1, 0), - ANA_VLANMASK); + ocelot_write(ocelot, all_ports, ANA_VLANMASK); for (port = 0; port < ocelot->num_phys_ports; port++) { ocelot_write_gix(ocelot, 0, REW_PORT_VLAN_CFG, port); @@ -377,7 +396,7 @@ static u32 ocelot_read_eq_avail(struct ocelot *ocelot, int port) return ocelot_read_rix(ocelot, QSYS_SW_STATUS, port); } -int ocelot_port_flush(struct ocelot *ocelot, int port) +static int ocelot_port_flush(struct ocelot *ocelot, int port) { unsigned int pause_ena; int err, val; @@ -429,63 +448,118 @@ int ocelot_port_flush(struct ocelot *ocelot, int port) return err; } -EXPORT_SYMBOL(ocelot_port_flush); -void ocelot_adjust_link(struct ocelot *ocelot, int port, - struct phy_device *phydev) +void ocelot_phylink_mac_link_down(struct ocelot *ocelot, int port, + unsigned int link_an_mode, + phy_interface_t interface, + unsigned long quirks) { struct ocelot_port *ocelot_port = ocelot->ports[port]; - int speed, mode = 0; + int err; + + ocelot_port_rmwl(ocelot_port, 0, DEV_MAC_ENA_CFG_RX_ENA, + DEV_MAC_ENA_CFG); + + ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 0); + + err = ocelot_port_flush(ocelot, port); + if (err) + dev_err(ocelot->dev, "failed to flush port %d: %d\n", + port, err); + + /* Put the port in reset. */ + if (interface != PHY_INTERFACE_MODE_QSGMII || + !(quirks & OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP)) + ocelot_port_rmwl(ocelot_port, + DEV_CLOCK_CFG_MAC_TX_RST | + DEV_CLOCK_CFG_MAC_TX_RST, + DEV_CLOCK_CFG_MAC_TX_RST | + DEV_CLOCK_CFG_MAC_TX_RST, + DEV_CLOCK_CFG); +} +EXPORT_SYMBOL_GPL(ocelot_phylink_mac_link_down); + +void ocelot_phylink_mac_link_up(struct ocelot *ocelot, int port, + struct phy_device *phydev, + unsigned int link_an_mode, + phy_interface_t interface, + int speed, int duplex, + bool tx_pause, bool rx_pause, + unsigned long quirks) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + int mac_speed, mode = 0; + u32 mac_fc_cfg; + + /* The MAC might be integrated in systems where the MAC speed is fixed + * and it's the PCS who is performing the rate adaptation, so we have + * to write "1000Mbps" into the LINK_SPEED field of DEV_CLOCK_CFG + * (which is also its default value). + */ + if ((quirks & OCELOT_QUIRK_PCS_PERFORMS_RATE_ADAPTATION) || + speed == SPEED_1000) { + mac_speed = OCELOT_SPEED_1000; + mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA; + } else if (speed == SPEED_2500) { + mac_speed = OCELOT_SPEED_2500; + mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA; + } else if (speed == SPEED_100) { + mac_speed = OCELOT_SPEED_100; + } else { + mac_speed = OCELOT_SPEED_10; + } - switch (phydev->speed) { + if (duplex == DUPLEX_FULL) + mode |= DEV_MAC_MODE_CFG_FDX_ENA; + + ocelot_port_writel(ocelot_port, mode, DEV_MAC_MODE_CFG); + + /* Take port out of reset by clearing the MAC_TX_RST, MAC_RX_RST and + * PORT_RST bits in DEV_CLOCK_CFG. + */ + ocelot_port_writel(ocelot_port, DEV_CLOCK_CFG_LINK_SPEED(mac_speed), + DEV_CLOCK_CFG); + + switch (speed) { case SPEED_10: - speed = OCELOT_SPEED_10; + mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(OCELOT_SPEED_10); break; case SPEED_100: - speed = OCELOT_SPEED_100; + mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(OCELOT_SPEED_100); break; case SPEED_1000: - speed = OCELOT_SPEED_1000; - mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA; - break; case SPEED_2500: - speed = OCELOT_SPEED_2500; - mode = DEV_MAC_MODE_CFG_GIGA_MODE_ENA; + mac_fc_cfg = SYS_MAC_FC_CFG_FC_LINK_SPEED(OCELOT_SPEED_1000); break; default: - dev_err(ocelot->dev, "Unsupported PHY speed on port %d: %d\n", - port, phydev->speed); + dev_err(ocelot->dev, "Unsupported speed on port %d: %d\n", + port, speed); return; } - phy_print_status(phydev); - - if (!phydev->link) - return; - - /* Only full duplex supported for now */ - ocelot_port_writel(ocelot_port, DEV_MAC_MODE_CFG_FDX_ENA | - mode, DEV_MAC_MODE_CFG); - - /* Disable HDX fast control */ - ocelot_port_writel(ocelot_port, DEV_PORT_MISC_HDX_FAST_DIS, - DEV_PORT_MISC); + /* Handle RX pause in all cases, with 2500base-X this is used for rate + * adaptation. + */ + mac_fc_cfg |= SYS_MAC_FC_CFG_RX_FC_ENA; - /* SGMII only for now */ - ocelot_port_writel(ocelot_port, PCS1G_MODE_CFG_SGMII_MODE_ENA, - PCS1G_MODE_CFG); - ocelot_port_writel(ocelot_port, PCS1G_SD_CFG_SD_SEL, PCS1G_SD_CFG); + if (tx_pause) + mac_fc_cfg |= SYS_MAC_FC_CFG_TX_FC_ENA | + SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) | + SYS_MAC_FC_CFG_FC_LATENCY_CFG(0x7) | + SYS_MAC_FC_CFG_ZERO_PAUSE_ENA; - /* Enable PCS */ - ocelot_port_writel(ocelot_port, PCS1G_CFG_PCS_ENA, PCS1G_CFG); + /* Flow control. Link speed is only used here to evaluate the time + * specification in incoming pause frames. + */ + ocelot_write_rix(ocelot, mac_fc_cfg, SYS_MAC_FC_CFG, port); - /* No aneg on SGMII */ - ocelot_port_writel(ocelot_port, 0, PCS1G_ANEG_CFG); + ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port); - /* No loopback */ - ocelot_port_writel(ocelot_port, 0, PCS1G_LB_CFG); + ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, tx_pause); - /* Enable MAC module */ + /* Undo the effects of ocelot_phylink_mac_link_down: + * enable MAC module + */ ocelot_port_writel(ocelot_port, DEV_MAC_ENA_CFG_RX_ENA | DEV_MAC_ENA_CFG_TX_ENA, DEV_MAC_ENA_CFG); @@ -502,39 +576,8 @@ void ocelot_adjust_link(struct ocelot *ocelot, int port, /* Core: Enable port for frame transfer */ ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 1); - - /* Flow control */ - ocelot_write_rix(ocelot, SYS_MAC_FC_CFG_PAUSE_VAL_CFG(0xffff) | - SYS_MAC_FC_CFG_RX_FC_ENA | SYS_MAC_FC_CFG_TX_FC_ENA | - SYS_MAC_FC_CFG_ZERO_PAUSE_ENA | - SYS_MAC_FC_CFG_FC_LATENCY_CFG(0x7) | - SYS_MAC_FC_CFG_FC_LINK_SPEED(speed), - SYS_MAC_FC_CFG, port); - ocelot_write_rix(ocelot, 0, ANA_POL_FLOWC, port); -} -EXPORT_SYMBOL(ocelot_adjust_link); - -void ocelot_port_enable(struct ocelot *ocelot, int port, - struct phy_device *phy) -{ - /* Enable receiving frames on the port, and activate auto-learning of - * MAC addresses. - */ - ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_LEARNAUTO | - ANA_PORT_PORT_CFG_RECV_ENA | - ANA_PORT_PORT_CFG_PORTID_VAL(port), - ANA_PORT_PORT_CFG, port); } -EXPORT_SYMBOL(ocelot_port_enable); - -void ocelot_port_disable(struct ocelot *ocelot, int port) -{ - struct ocelot_port *ocelot_port = ocelot->ports[port]; - - ocelot_port_writel(ocelot_port, 0, DEV_MAC_ENA_CFG); - ocelot_fields_write(ocelot, port, QSYS_SWITCH_PORT_MODE_PORT_ENA, 0); -} -EXPORT_SYMBOL(ocelot_port_disable); +EXPORT_SYMBOL_GPL(ocelot_phylink_mac_link_up); static void ocelot_port_add_txtstamp_skb(struct ocelot *ocelot, int port, struct sk_buff *clone) @@ -1957,6 +2000,15 @@ void ocelot_init_port(struct ocelot *ocelot, int port) /* Disable source address learning for standalone mode */ ocelot_port_set_learning(ocelot, port, false); + /* Set the port's initial logical port ID value, enable receiving + * frames on it, and configure the MAC address learning type to + * automatic. + */ + ocelot_write_gix(ocelot, ANA_PORT_PORT_CFG_LEARNAUTO | + ANA_PORT_PORT_CFG_RECV_ENA | + ANA_PORT_PORT_CFG_PORTID_VAL(port), + ANA_PORT_PORT_CFG, port); + /* Enable vcap lookups */ ocelot_vcap_enable(ocelot, port); } diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h index db6b1a4c3926..1952d6a1b98a 100644 --- a/drivers/net/ethernet/mscc/ocelot.h +++ b/drivers/net/ethernet/mscc/ocelot.h @@ -12,8 +12,7 @@ #include <linux/etherdevice.h> #include <linux/if_vlan.h> #include <linux/net_tstamp.h> -#include <linux/phy.h> -#include <linux/phy/phy.h> +#include <linux/phylink.h> #include <linux/platform_device.h> #include <linux/regmap.h> @@ -42,11 +41,9 @@ struct ocelot_port_tc { struct ocelot_port_private { struct ocelot_port port; struct net_device *dev; - struct phy_device *phy; + struct phylink *phylink; + struct phylink_config phylink_config; u8 chip_port; - - struct phy *serdes; - struct ocelot_port_tc tc; }; @@ -107,7 +104,7 @@ u32 ocelot_port_readl(struct ocelot_port *port, u32 reg); void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg); int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target, - struct phy_device *phy); + struct device_node *portnp); void ocelot_release_port(struct ocelot_port *ocelot_port); int ocelot_devlink_init(struct ocelot *ocelot); void ocelot_devlink_teardown(struct ocelot *ocelot); diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index e9d260d84bf3..c0c465a4a981 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -9,10 +9,14 @@ */ #include <linux/if_bridge.h> +#include <linux/of_net.h> +#include <linux/phy/phy.h> #include <net/pkt_cls.h> #include "ocelot.h" #include "ocelot_vcap.h" +#define OCELOT_MAC_QUIRKS OCELOT_QUIRK_QSGMII_PORTS_MUST_BE_UP + static struct ocelot *devlink_port_to_ocelot(struct devlink_port *dlp) { return devlink_priv(dlp->devlink); @@ -160,6 +164,7 @@ int ocelot_port_devlink_init(struct ocelot *ocelot, int port, struct devlink *dl = ocelot->devlink; struct devlink_port_attrs attrs = {}; + memset(dlp, 0, sizeof(*dlp)); memcpy(attrs.switch_id.id, &ocelot->base_mac, id_len); attrs.switch_id.id_len = id_len; attrs.phys.port_number = port; @@ -381,26 +386,6 @@ static int ocelot_setup_tc(struct net_device *dev, enum tc_setup_type type, return 0; } -static void ocelot_port_adjust_link(struct net_device *dev) -{ - struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot *ocelot = priv->port.ocelot; - int port = priv->chip_port; - - ocelot_adjust_link(ocelot, port, dev->phydev); -} - -static int ocelot_vlan_vid_prepare(struct net_device *dev, u16 vid, bool pvid, - bool untagged) -{ - struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot_port *ocelot_port = &priv->port; - struct ocelot *ocelot = ocelot_port->ocelot; - int port = priv->chip_port; - - return ocelot_vlan_prepare(ocelot, port, vid, pvid, untagged); -} - static int ocelot_vlan_vid_add(struct net_device *dev, u16 vid, bool pvid, bool untagged) { @@ -448,33 +433,8 @@ static int ocelot_vlan_vid_del(struct net_device *dev, u16 vid) static int ocelot_port_open(struct net_device *dev) { struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot_port *ocelot_port = &priv->port; - struct ocelot *ocelot = ocelot_port->ocelot; - int port = priv->chip_port; - int err; - if (priv->serdes) { - err = phy_set_mode_ext(priv->serdes, PHY_MODE_ETHERNET, - ocelot_port->phy_mode); - if (err) { - netdev_err(dev, "Could not set mode of SerDes\n"); - return err; - } - } - - err = phy_connect_direct(dev, priv->phy, &ocelot_port_adjust_link, - ocelot_port->phy_mode); - if (err) { - netdev_err(dev, "Could not attach to PHY\n"); - return err; - } - - dev->phydev = priv->phy; - - phy_attached_info(priv->phy); - phy_start(priv->phy); - - ocelot_port_enable(ocelot, port, priv->phy); + phylink_start(priv->phylink); return 0; } @@ -482,14 +442,8 @@ static int ocelot_port_open(struct net_device *dev) static int ocelot_port_stop(struct net_device *dev) { struct ocelot_port_private *priv = netdev_priv(dev); - struct ocelot *ocelot = priv->port.ocelot; - int port = priv->chip_port; - - phy_disconnect(priv->phy); - - dev->phydev = NULL; - ocelot_port_disable(ocelot, port); + phylink_stop(priv->phylink); return 0; } @@ -823,7 +777,7 @@ static const struct net_device_ops ocelot_port_netdev_ops = { .ndo_vlan_rx_kill_vid = ocelot_vlan_rx_kill_vid, .ndo_set_features = ocelot_set_features, .ndo_setup_tc = ocelot_setup_tc, - .ndo_do_ioctl = ocelot_ioctl, + .ndo_eth_ioctl = ocelot_ioctl, .ndo_get_devlink_port = ocelot_get_devlink_port, }; @@ -959,7 +913,8 @@ static int ocelot_port_attr_set(struct net_device *dev, const void *ctx, ocelot_port_attr_ageing_set(ocelot, port, attr->u.ageing_time); break; case SWITCHDEV_ATTR_ID_BRIDGE_VLAN_FILTERING: - ocelot_port_vlan_filtering(ocelot, port, attr->u.vlan_filtering); + ocelot_port_vlan_filtering(ocelot, port, attr->u.vlan_filtering, + extack); break; case SWITCHDEV_ATTR_ID_BRIDGE_MC_DISABLED: ocelot_port_attr_mc_set(ocelot, port, !attr->u.mc_disabled); @@ -979,14 +934,26 @@ static int ocelot_port_attr_set(struct net_device *dev, const void *ctx, return err; } +static int ocelot_vlan_vid_prepare(struct net_device *dev, u16 vid, bool pvid, + bool untagged, struct netlink_ext_ack *extack) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + struct ocelot_port *ocelot_port = &priv->port; + struct ocelot *ocelot = ocelot_port->ocelot; + int port = priv->chip_port; + + return ocelot_vlan_prepare(ocelot, port, vid, pvid, untagged, extack); +} + static int ocelot_port_obj_add_vlan(struct net_device *dev, - const struct switchdev_obj_port_vlan *vlan) + const struct switchdev_obj_port_vlan *vlan, + struct netlink_ext_ack *extack) { bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; bool pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; int ret; - ret = ocelot_vlan_vid_prepare(dev, vlan->vid, pvid, untagged); + ret = ocelot_vlan_vid_prepare(dev, vlan->vid, pvid, untagged, extack); if (ret) return ret; @@ -1074,7 +1041,8 @@ static int ocelot_port_obj_add(struct net_device *dev, const void *ctx, switch (obj->id) { case SWITCHDEV_OBJ_ID_PORT_VLAN: ret = ocelot_port_obj_add_vlan(dev, - SWITCHDEV_OBJ_PORT_VLAN(obj)); + SWITCHDEV_OBJ_PORT_VLAN(obj), + extack); break; case SWITCHDEV_OBJ_ID_PORT_MDB: ret = ocelot_port_obj_add_mdb(dev, SWITCHDEV_OBJ_PORT_MDB(obj)); @@ -1154,45 +1122,27 @@ static int ocelot_switchdev_sync(struct ocelot *ocelot, int port, struct net_device *bridge_dev, struct netlink_ext_ack *extack) { - struct ocelot_port *ocelot_port = ocelot->ports[port]; - struct ocelot_port_private *priv; clock_t ageing_time; u8 stp_state; - int err; - - priv = container_of(ocelot_port, struct ocelot_port_private, port); ocelot_inherit_brport_flags(ocelot, port, brport_dev); stp_state = br_port_get_stp_state(brport_dev); ocelot_bridge_stp_state_set(ocelot, port, stp_state); - err = ocelot_port_vlan_filtering(ocelot, port, - br_vlan_enabled(bridge_dev)); - if (err) - return err; - ageing_time = br_get_ageing_time(bridge_dev); ocelot_port_attr_ageing_set(ocelot, port, ageing_time); - err = br_mdb_replay(bridge_dev, brport_dev, priv, true, - &ocelot_switchdev_blocking_nb, extack); - if (err && err != -EOPNOTSUPP) - return err; - - err = br_vlan_replay(bridge_dev, brport_dev, priv, true, - &ocelot_switchdev_blocking_nb, extack); - if (err && err != -EOPNOTSUPP) - return err; - - return 0; + return ocelot_port_vlan_filtering(ocelot, port, + br_vlan_enabled(bridge_dev), + extack); } static int ocelot_switchdev_unsync(struct ocelot *ocelot, int port) { int err; - err = ocelot_port_vlan_filtering(ocelot, port, false); + err = ocelot_port_vlan_filtering(ocelot, port, false, NULL); if (err) return err; @@ -1216,6 +1166,13 @@ static int ocelot_netdevice_bridge_join(struct net_device *dev, ocelot_port_bridge_join(ocelot, port, bridge); + err = switchdev_bridge_port_offload(brport_dev, dev, priv, + &ocelot_netdevice_nb, + &ocelot_switchdev_blocking_nb, + false, extack); + if (err) + goto err_switchdev_offload; + err = ocelot_switchdev_sync(ocelot, port, brport_dev, bridge, extack); if (err) goto err_switchdev_sync; @@ -1223,10 +1180,24 @@ static int ocelot_netdevice_bridge_join(struct net_device *dev, return 0; err_switchdev_sync: + switchdev_bridge_port_unoffload(brport_dev, priv, + &ocelot_netdevice_nb, + &ocelot_switchdev_blocking_nb); +err_switchdev_offload: ocelot_port_bridge_leave(ocelot, port, bridge); return err; } +static void ocelot_netdevice_pre_bridge_leave(struct net_device *dev, + struct net_device *brport_dev) +{ + struct ocelot_port_private *priv = netdev_priv(dev); + + switchdev_bridge_port_unoffload(brport_dev, priv, + &ocelot_netdevice_nb, + &ocelot_switchdev_blocking_nb); +} + static int ocelot_netdevice_bridge_leave(struct net_device *dev, struct net_device *brport_dev, struct net_device *bridge) @@ -1279,6 +1250,18 @@ err_bridge_join: return err; } +static void ocelot_netdevice_pre_lag_leave(struct net_device *dev, + struct net_device *bond) +{ + struct net_device *bridge_dev; + + bridge_dev = netdev_master_upper_dev_get(bond); + if (!bridge_dev || !netif_is_bridge_master(bridge_dev)) + return; + + ocelot_netdevice_pre_bridge_leave(dev, bond); +} + static int ocelot_netdevice_lag_leave(struct net_device *dev, struct net_device *bond) { @@ -1356,6 +1339,43 @@ ocelot_netdevice_lag_changeupper(struct net_device *dev, } static int +ocelot_netdevice_prechangeupper(struct net_device *dev, + struct net_device *brport_dev, + struct netdev_notifier_changeupper_info *info) +{ + if (netif_is_bridge_master(info->upper_dev) && !info->linking) + ocelot_netdevice_pre_bridge_leave(dev, brport_dev); + + if (netif_is_lag_master(info->upper_dev) && !info->linking) + ocelot_netdevice_pre_lag_leave(dev, info->upper_dev); + + return NOTIFY_DONE; +} + +static int +ocelot_netdevice_lag_prechangeupper(struct net_device *dev, + struct netdev_notifier_changeupper_info *info) +{ + struct net_device *lower; + struct list_head *iter; + int err = NOTIFY_DONE; + + netdev_for_each_lower_dev(dev, lower, iter) { + struct ocelot_port_private *priv = netdev_priv(lower); + struct ocelot_port *ocelot_port = &priv->port; + + if (ocelot_port->bond != dev) + return NOTIFY_OK; + + err = ocelot_netdevice_prechangeupper(dev, lower, info); + if (err) + return err; + } + + return NOTIFY_DONE; +} + +static int ocelot_netdevice_changelowerstate(struct net_device *dev, struct netdev_lag_lower_state_info *info) { @@ -1382,6 +1402,17 @@ static int ocelot_netdevice_event(struct notifier_block *unused, struct net_device *dev = netdev_notifier_info_to_dev(ptr); switch (event) { + case NETDEV_PRECHANGEUPPER: { + struct netdev_notifier_changeupper_info *info = ptr; + + if (ocelot_netdevice_dev_check(dev)) + return ocelot_netdevice_prechangeupper(dev, dev, info); + + if (netif_is_lag_master(dev)) + return ocelot_netdevice_lag_prechangeupper(dev, info); + + break; + } case NETDEV_CHANGEUPPER: { struct netdev_notifier_changeupper_info *info = ptr; @@ -1466,8 +1497,188 @@ struct notifier_block ocelot_switchdev_blocking_nb __read_mostly = { .notifier_call = ocelot_switchdev_blocking_event, }; +static void vsc7514_phylink_validate(struct phylink_config *config, + unsigned long *supported, + struct phylink_link_state *state) +{ + struct net_device *ndev = to_net_dev(config->dev); + struct ocelot_port_private *priv = netdev_priv(ndev); + struct ocelot_port *ocelot_port = &priv->port; + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = {}; + + if (state->interface != PHY_INTERFACE_MODE_NA && + state->interface != ocelot_port->phy_mode) { + bitmap_zero(supported, __ETHTOOL_LINK_MODE_MASK_NBITS); + return; + } + + phylink_set_port_modes(mask); + + phylink_set(mask, Pause); + phylink_set(mask, Autoneg); + phylink_set(mask, Asym_Pause); + phylink_set(mask, 10baseT_Half); + phylink_set(mask, 10baseT_Full); + phylink_set(mask, 100baseT_Half); + phylink_set(mask, 100baseT_Full); + phylink_set(mask, 1000baseT_Half); + phylink_set(mask, 1000baseT_Full); + phylink_set(mask, 1000baseX_Full); + phylink_set(mask, 2500baseT_Full); + phylink_set(mask, 2500baseX_Full); + + bitmap_and(supported, supported, mask, __ETHTOOL_LINK_MODE_MASK_NBITS); + bitmap_and(state->advertising, state->advertising, mask, + __ETHTOOL_LINK_MODE_MASK_NBITS); +} + +static void vsc7514_phylink_mac_config(struct phylink_config *config, + unsigned int link_an_mode, + const struct phylink_link_state *state) +{ + struct net_device *ndev = to_net_dev(config->dev); + struct ocelot_port_private *priv = netdev_priv(ndev); + struct ocelot_port *ocelot_port = &priv->port; + + /* Disable HDX fast control */ + ocelot_port_writel(ocelot_port, DEV_PORT_MISC_HDX_FAST_DIS, + DEV_PORT_MISC); + + /* SGMII only for now */ + ocelot_port_writel(ocelot_port, PCS1G_MODE_CFG_SGMII_MODE_ENA, + PCS1G_MODE_CFG); + ocelot_port_writel(ocelot_port, PCS1G_SD_CFG_SD_SEL, PCS1G_SD_CFG); + + /* Enable PCS */ + ocelot_port_writel(ocelot_port, PCS1G_CFG_PCS_ENA, PCS1G_CFG); + + /* No aneg on SGMII */ + ocelot_port_writel(ocelot_port, 0, PCS1G_ANEG_CFG); + + /* No loopback */ + ocelot_port_writel(ocelot_port, 0, PCS1G_LB_CFG); +} + +static void vsc7514_phylink_mac_link_down(struct phylink_config *config, + unsigned int link_an_mode, + phy_interface_t interface) +{ + struct net_device *ndev = to_net_dev(config->dev); + struct ocelot_port_private *priv = netdev_priv(ndev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->chip_port; + + ocelot_phylink_mac_link_down(ocelot, port, link_an_mode, interface, + OCELOT_MAC_QUIRKS); +} + +static void vsc7514_phylink_mac_link_up(struct phylink_config *config, + struct phy_device *phydev, + unsigned int link_an_mode, + phy_interface_t interface, + int speed, int duplex, + bool tx_pause, bool rx_pause) +{ + struct net_device *ndev = to_net_dev(config->dev); + struct ocelot_port_private *priv = netdev_priv(ndev); + struct ocelot *ocelot = priv->port.ocelot; + int port = priv->chip_port; + + ocelot_phylink_mac_link_up(ocelot, port, phydev, link_an_mode, + interface, speed, duplex, + tx_pause, rx_pause, OCELOT_MAC_QUIRKS); +} + +static const struct phylink_mac_ops ocelot_phylink_ops = { + .validate = vsc7514_phylink_validate, + .mac_config = vsc7514_phylink_mac_config, + .mac_link_down = vsc7514_phylink_mac_link_down, + .mac_link_up = vsc7514_phylink_mac_link_up, +}; + +static int ocelot_port_phylink_create(struct ocelot *ocelot, int port, + struct device_node *portnp) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + struct ocelot_port_private *priv; + struct device *dev = ocelot->dev; + phy_interface_t phy_mode; + struct phylink *phylink; + int err; + + of_get_phy_mode(portnp, &phy_mode); + /* DT bindings of internal PHY ports are broken and don't + * specify a phy-mode + */ + if (phy_mode == PHY_INTERFACE_MODE_NA) + phy_mode = PHY_INTERFACE_MODE_INTERNAL; + + if (phy_mode != PHY_INTERFACE_MODE_SGMII && + phy_mode != PHY_INTERFACE_MODE_QSGMII && + phy_mode != PHY_INTERFACE_MODE_INTERNAL) { + dev_err(dev, "unsupported phy mode %s for port %d\n", + phy_modes(phy_mode), port); + return -EINVAL; + } + + /* Ensure clock signals and speed are set on all QSGMII links */ + if (phy_mode == PHY_INTERFACE_MODE_QSGMII) + ocelot_port_rmwl(ocelot_port, 0, + DEV_CLOCK_CFG_MAC_TX_RST | + DEV_CLOCK_CFG_MAC_TX_RST, + DEV_CLOCK_CFG); + + ocelot_port->phy_mode = phy_mode; + + if (phy_mode != PHY_INTERFACE_MODE_INTERNAL) { + struct phy *serdes = of_phy_get(portnp, NULL); + + if (IS_ERR(serdes)) { + err = PTR_ERR(serdes); + dev_err_probe(dev, err, + "missing SerDes phys for port %d\n", + port); + return err; + } + + err = phy_set_mode_ext(serdes, PHY_MODE_ETHERNET, phy_mode); + of_phy_put(serdes); + if (err) { + dev_err(dev, "Could not SerDes mode on port %d: %pe\n", + port, ERR_PTR(err)); + return err; + } + } + + priv = container_of(ocelot_port, struct ocelot_port_private, port); + + priv->phylink_config.dev = &priv->dev->dev; + priv->phylink_config.type = PHYLINK_NETDEV; + + phylink = phylink_create(&priv->phylink_config, + of_fwnode_handle(portnp), + phy_mode, &ocelot_phylink_ops); + if (IS_ERR(phylink)) { + err = PTR_ERR(phylink); + dev_err(dev, "Could not create phylink (%pe)\n", phylink); + return err; + } + + priv->phylink = phylink; + + err = phylink_of_phy_connect(phylink, portnp, 0); + if (err) { + dev_err(dev, "Could not connect to PHY: %pe\n", ERR_PTR(err)); + phylink_destroy(phylink); + priv->phylink = NULL; + return err; + } + + return 0; +} + int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target, - struct phy_device *phy) + struct device_node *portnp) { struct ocelot_port_private *priv; struct ocelot_port *ocelot_port; @@ -1480,7 +1691,6 @@ int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target, SET_NETDEV_DEV(dev, ocelot->dev); priv = netdev_priv(dev); priv->dev = dev; - priv->phy = phy; priv->chip_port = port; ocelot_port = &priv->port; ocelot_port->ocelot = ocelot; @@ -1501,15 +1711,23 @@ int ocelot_probe_port(struct ocelot *ocelot, int port, struct regmap *target, ocelot_init_port(ocelot, port); + err = ocelot_port_phylink_create(ocelot, port, portnp); + if (err) + goto out; + err = register_netdev(dev); if (err) { dev_err(ocelot->dev, "register_netdev failed\n"); - free_netdev(dev); - ocelot->ports[port] = NULL; - return err; + goto out; } return 0; + +out: + ocelot->ports[port] = NULL; + free_netdev(dev); + + return err; } void ocelot_release_port(struct ocelot_port *ocelot_port) @@ -1519,5 +1737,14 @@ void ocelot_release_port(struct ocelot_port *ocelot_port) port); unregister_netdev(priv->dev); + + if (priv->phylink) { + rtnl_lock(); + phylink_disconnect_phy(priv->phylink); + rtnl_unlock(); + + phylink_destroy(priv->phylink); + } + free_netdev(priv->dev); } diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c index 4bd7e9d9ec61..291ae6817c26 100644 --- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c +++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c @@ -9,6 +9,7 @@ #include <linux/module.h> #include <linux/of_net.h> #include <linux/netdevice.h> +#include <linux/phylink.h> #include <linux/of_mdio.h> #include <linux/of_platform.h> #include <linux/mfd/syscon.h> @@ -945,13 +946,9 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev, for_each_available_child_of_node(ports, portnp) { struct ocelot_port_private *priv; struct ocelot_port *ocelot_port; - struct device_node *phy_node; struct devlink_port *dlp; - phy_interface_t phy_mode; - struct phy_device *phy; struct regmap *target; struct resource *res; - struct phy *serdes; char res_name[8]; if (of_property_read_u32(portnp, "reg", ®)) @@ -975,77 +972,26 @@ static int mscc_ocelot_init_ports(struct platform_device *pdev, goto out_teardown; } - phy_node = of_parse_phandle(portnp, "phy-handle", 0); - if (!phy_node) - continue; - - phy = of_phy_find_device(phy_node); - of_node_put(phy_node); - if (!phy) - continue; - err = ocelot_port_devlink_init(ocelot, port, DEVLINK_PORT_FLAVOUR_PHYSICAL); if (err) { of_node_put(portnp); goto out_teardown; } - devlink_ports_registered |= BIT(port); - err = ocelot_probe_port(ocelot, port, target, phy); + err = ocelot_probe_port(ocelot, port, target, portnp); if (err) { - of_node_put(portnp); - goto out_teardown; + ocelot_port_devlink_teardown(ocelot, port); + continue; } + devlink_ports_registered |= BIT(port); + ocelot_port = ocelot->ports[port]; priv = container_of(ocelot_port, struct ocelot_port_private, port); dlp = &ocelot->devlink_ports[port]; devlink_port_type_eth_set(dlp, priv->dev); - - of_get_phy_mode(portnp, &phy_mode); - - ocelot_port->phy_mode = phy_mode; - - switch (ocelot_port->phy_mode) { - case PHY_INTERFACE_MODE_NA: - continue; - case PHY_INTERFACE_MODE_SGMII: - break; - case PHY_INTERFACE_MODE_QSGMII: - /* Ensure clock signals and speed is set on all - * QSGMII links - */ - ocelot_port_writel(ocelot_port, - DEV_CLOCK_CFG_LINK_SPEED - (OCELOT_SPEED_1000), - DEV_CLOCK_CFG); - break; - default: - dev_err(ocelot->dev, - "invalid phy mode for port%d, (Q)SGMII only\n", - port); - of_node_put(portnp); - err = -EINVAL; - goto out_teardown; - } - - serdes = devm_of_phy_get(ocelot->dev, portnp, NULL); - if (IS_ERR(serdes)) { - err = PTR_ERR(serdes); - if (err == -EPROBE_DEFER) - dev_dbg(ocelot->dev, "deferring probe\n"); - else - dev_err(ocelot->dev, - "missing SerDes phys for port%d\n", - port); - - of_node_put(portnp); - goto out_teardown; - } - - priv->serdes = serdes; } /* Initialize unused devlink ports at the end */ @@ -1103,7 +1049,8 @@ static int mscc_ocelot_probe(struct platform_device *pdev) if (!np && !pdev->dev.platform_data) return -ENODEV; - devlink = devlink_alloc(&ocelot_devlink_ops, sizeof(*ocelot)); + devlink = + devlink_alloc(&ocelot_devlink_ops, sizeof(*ocelot), &pdev->dev); if (!devlink) return -ENOMEM; @@ -1187,7 +1134,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev) if (err) goto out_put_ports; - err = devlink_register(devlink, ocelot->dev); + err = devlink_register(devlink); if (err) goto out_ocelot_deinit; diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index fc99ad8e4a38..c1a75b08ced7 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -850,9 +850,9 @@ static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type) dmatest_page = alloc_page(GFP_KERNEL); if (!dmatest_page) return -ENOMEM; - dmatest_bus = pci_map_page(mgp->pdev, dmatest_page, 0, PAGE_SIZE, - DMA_BIDIRECTIONAL); - if (unlikely(pci_dma_mapping_error(mgp->pdev, dmatest_bus))) { + dmatest_bus = dma_map_page(&mgp->pdev->dev, dmatest_page, 0, + PAGE_SIZE, DMA_BIDIRECTIONAL); + if (unlikely(dma_mapping_error(&mgp->pdev->dev, dmatest_bus))) { __free_page(dmatest_page); return -ENOMEM; } @@ -899,7 +899,8 @@ static int myri10ge_dma_test(struct myri10ge_priv *mgp, int test_type) (cmd.data0 & 0xffff); abort: - pci_unmap_page(mgp->pdev, dmatest_bus, PAGE_SIZE, DMA_BIDIRECTIONAL); + dma_unmap_page(&mgp->pdev->dev, dmatest_bus, PAGE_SIZE, + DMA_BIDIRECTIONAL); put_page(dmatest_page); if (status != 0 && test_type != MXGEFW_CMD_UNALIGNED_TEST) @@ -1205,10 +1206,10 @@ myri10ge_alloc_rx_pages(struct myri10ge_priv *mgp, struct myri10ge_rx_buf *rx, return; } - bus = pci_map_page(mgp->pdev, page, 0, + bus = dma_map_page(&mgp->pdev->dev, page, 0, MYRI10GE_ALLOC_SIZE, - PCI_DMA_FROMDEVICE); - if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) { + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(&mgp->pdev->dev, bus))) { __free_pages(page, MYRI10GE_ALLOC_ORDER); if (rx->fill_cnt - rx->cnt < 16) rx->watchdog_needed = 1; @@ -1256,9 +1257,9 @@ myri10ge_unmap_rx_page(struct pci_dev *pdev, /* unmap the recvd page if we're the only or last user of it */ if (bytes >= MYRI10GE_ALLOC_SIZE / 2 || (info->page_offset + 2 * bytes) > MYRI10GE_ALLOC_SIZE) { - pci_unmap_page(pdev, (dma_unmap_addr(info, bus) - & ~(MYRI10GE_ALLOC_SIZE - 1)), - MYRI10GE_ALLOC_SIZE, PCI_DMA_FROMDEVICE); + dma_unmap_page(&pdev->dev, (dma_unmap_addr(info, bus) + & ~(MYRI10GE_ALLOC_SIZE - 1)), + MYRI10GE_ALLOC_SIZE, DMA_FROM_DEVICE); } } @@ -1398,16 +1399,16 @@ myri10ge_tx_done(struct myri10ge_slice_state *ss, int mcp_index) ss->stats.tx_packets++; dev_consume_skb_irq(skb); if (len) - pci_unmap_single(pdev, + dma_unmap_single(&pdev->dev, dma_unmap_addr(&tx->info[idx], bus), len, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); } else { if (len) - pci_unmap_page(pdev, + dma_unmap_page(&pdev->dev, dma_unmap_addr(&tx->info[idx], bus), len, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); } } @@ -1651,8 +1652,10 @@ myri10ge_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) strlcpy(info->bus_info, pci_name(mgp->pdev), sizeof(info->bus_info)); } -static int -myri10ge_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal) +static int myri10ge_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct myri10ge_priv *mgp = netdev_priv(netdev); @@ -1660,8 +1663,10 @@ myri10ge_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal) return 0; } -static int -myri10ge_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *coal) +static int myri10ge_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct myri10ge_priv *mgp = netdev_priv(netdev); @@ -2110,16 +2115,16 @@ static void myri10ge_free_rings(struct myri10ge_slice_state *ss) ss->stats.tx_dropped++; dev_kfree_skb_any(skb); if (len) - pci_unmap_single(mgp->pdev, + dma_unmap_single(&mgp->pdev->dev, dma_unmap_addr(&tx->info[idx], bus), len, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); } else { if (len) - pci_unmap_page(mgp->pdev, + dma_unmap_page(&mgp->pdev->dev, dma_unmap_addr(&tx->info[idx], bus), len, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); } } kfree(ss->rx_big.info); @@ -2584,15 +2589,15 @@ static void myri10ge_unmap_tx_dma(struct myri10ge_priv *mgp, len = dma_unmap_len(&tx->info[idx], len); if (len) { if (tx->info[idx].skb != NULL) - pci_unmap_single(mgp->pdev, + dma_unmap_single(&mgp->pdev->dev, dma_unmap_addr(&tx->info[idx], bus), len, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); else - pci_unmap_page(mgp->pdev, + dma_unmap_page(&mgp->pdev->dev, dma_unmap_addr(&tx->info[idx], bus), len, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); dma_unmap_len_set(&tx->info[idx], len, 0); tx->info[idx].skb = NULL; } @@ -2715,8 +2720,8 @@ again: /* map the skb for DMA */ len = skb_headlen(skb); - bus = pci_map_single(mgp->pdev, skb->data, len, PCI_DMA_TODEVICE); - if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) + bus = dma_map_single(&mgp->pdev->dev, skb->data, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(&mgp->pdev->dev, bus))) goto drop; idx = tx->req & tx->mask; @@ -2824,7 +2829,7 @@ again: len = skb_frag_size(frag); bus = skb_frag_dma_map(&mgp->pdev->dev, frag, 0, len, DMA_TO_DEVICE); - if (unlikely(pci_dma_mapping_error(mgp->pdev, bus))) { + if (unlikely(dma_mapping_error(&mgp->pdev->dev, bus))) { myri10ge_unmap_tx_dma(mgp, tx, idx); goto drop; } @@ -3776,19 +3781,17 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) myri10ge_mask_surprise_down(pdev); pci_set_master(pdev); dac_enabled = 1; - status = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); + status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (status != 0) { dac_enabled = 0; dev_err(&pdev->dev, - "64-bit pci address mask was refused, " - "trying 32-bit\n"); - status = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + "64-bit pci address mask was refused, trying 32-bit\n"); + status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); } if (status != 0) { dev_err(&pdev->dev, "Error %d setting DMA mask\n", status); goto abort_with_enabled; } - (void)pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd), &mgp->cmd_bus, GFP_KERNEL); if (!mgp->cmd) { diff --git a/drivers/net/ethernet/natsemi/jazzsonic.c b/drivers/net/ethernet/natsemi/jazzsonic.c index ce3eca5d152b..d74a80f010c5 100644 --- a/drivers/net/ethernet/natsemi/jazzsonic.c +++ b/drivers/net/ethernet/natsemi/jazzsonic.c @@ -193,8 +193,6 @@ static int jazz_sonic_probe(struct platform_device *pdev) SET_NETDEV_DEV(dev, &pdev->dev); platform_set_drvdata(pdev, dev); - netdev_boot_setup_check(dev); - dev->base_addr = res->start; dev->irq = platform_get_irq(pdev, 0); err = sonic_probe1(dev); diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c index 84f7dbe9edff..3f982033944b 100644 --- a/drivers/net/ethernet/natsemi/natsemi.c +++ b/drivers/net/ethernet/natsemi/natsemi.c @@ -790,7 +790,7 @@ static const struct net_device_ops natsemi_netdev_ops = { .ndo_get_stats = get_stats, .ndo_set_rx_mode = set_rx_mode, .ndo_change_mtu = natsemi_change_mtu, - .ndo_do_ioctl = netdev_ioctl, + .ndo_eth_ioctl = netdev_ioctl, .ndo_tx_timeout = ns_tx_timeout, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/ethernet/natsemi/xtsonic.c b/drivers/net/ethernet/natsemi/xtsonic.c index 28d9e98db81a..ca4686094701 100644 --- a/drivers/net/ethernet/natsemi/xtsonic.c +++ b/drivers/net/ethernet/natsemi/xtsonic.c @@ -215,7 +215,6 @@ int xtsonic_probe(struct platform_device *pdev) lp->device = &pdev->dev; platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); - netdev_boot_setup_check(dev); dev->base_addr = resmem->start; dev->irq = resirq->start; diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index 0b017d4f5c08..09c0e839cca5 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -7625,7 +7625,7 @@ static const struct net_device_ops s2io_netdev_ops = { .ndo_start_xmit = s2io_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = s2io_ndo_set_multicast, - .ndo_do_ioctl = s2io_ioctl, + .ndo_eth_ioctl = s2io_ioctl, .ndo_set_mac_address = s2io_set_mac_addr, .ndo_change_mtu = s2io_change_mtu, .ndo_set_features = s2io_set_features, diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c index 7abd13e69471..df4a3f3da83a 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-main.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c @@ -3339,7 +3339,7 @@ static const struct net_device_ops vxge_netdev_ops = { .ndo_start_xmit = vxge_xmit, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = vxge_set_multicast, - .ndo_do_ioctl = vxge_ioctl, + .ndo_eth_ioctl = vxge_ioctl, .ndo_set_mac_address = vxge_set_mac_addr, .ndo_change_mtu = vxge_change_mtu, .ndo_fix_features = vxge_fix_features, diff --git a/drivers/net/ethernet/netronome/Kconfig b/drivers/net/ethernet/netronome/Kconfig index b82758d5beed..8844d1ac053a 100644 --- a/drivers/net/ethernet/netronome/Kconfig +++ b/drivers/net/ethernet/netronome/Kconfig @@ -23,6 +23,7 @@ config NFP depends on TLS && TLS_DEVICE || TLS_DEVICE=n select NET_DEVLINK select CRC32 + select DIMLIB help This driver supports the Netronome(R) NFP4000/NFP6000 based cards working as a advanced Ethernet NIC. It works with both diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 1cbe2c9f3959..2a432de11858 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -262,10 +262,10 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output, } static bool -nfp_flower_tun_is_gre(struct flow_cls_offload *flow, int start_idx) +nfp_flower_tun_is_gre(struct flow_rule *rule, int start_idx) { - struct flow_action_entry *act = flow->rule->action.entries; - int num_act = flow->rule->action.num_entries; + struct flow_action_entry *act = rule->action.entries; + int num_act = rule->action.num_entries; int act_idx; /* Preparse action list for next mirred or redirect action */ @@ -279,7 +279,7 @@ nfp_flower_tun_is_gre(struct flow_cls_offload *flow, int start_idx) static enum nfp_flower_tun_type nfp_fl_get_tun_from_act(struct nfp_app *app, - struct flow_cls_offload *flow, + struct flow_rule *rule, const struct flow_action_entry *act, int act_idx) { const struct ip_tunnel_info *tun = act->tunnel; @@ -288,7 +288,7 @@ nfp_fl_get_tun_from_act(struct nfp_app *app, /* Determine the tunnel type based on the egress netdev * in the mirred action for tunnels without l4. */ - if (nfp_flower_tun_is_gre(flow, act_idx)) + if (nfp_flower_tun_is_gre(rule, act_idx)) return NFP_FL_TUNNEL_GRE; switch (tun->key.tp_dst) { @@ -788,11 +788,10 @@ struct nfp_flower_pedit_acts { }; static int -nfp_fl_commit_mangle(struct flow_cls_offload *flow, char *nfp_action, +nfp_fl_commit_mangle(struct flow_rule *rule, char *nfp_action, int *a_len, struct nfp_flower_pedit_acts *set_act, u32 *csum_updated) { - struct flow_rule *rule = flow_cls_offload_flow_rule(flow); size_t act_size = 0; u8 ip_proto = 0; @@ -890,7 +889,7 @@ nfp_fl_commit_mangle(struct flow_cls_offload *flow, char *nfp_action, static int nfp_fl_pedit(const struct flow_action_entry *act, - struct flow_cls_offload *flow, char *nfp_action, int *a_len, + char *nfp_action, int *a_len, u32 *csum_updated, struct nfp_flower_pedit_acts *set_act, struct netlink_ext_ack *extack) { @@ -977,7 +976,7 @@ nfp_flower_output_action(struct nfp_app *app, static int nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act, - struct flow_cls_offload *flow, + struct flow_rule *rule, struct nfp_fl_payload *nfp_fl, int *a_len, struct net_device *netdev, enum nfp_flower_tun_type *tun_type, int *tun_out_cnt, @@ -1045,7 +1044,7 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act, case FLOW_ACTION_TUNNEL_ENCAP: { const struct ip_tunnel_info *ip_tun = act->tunnel; - *tun_type = nfp_fl_get_tun_from_act(app, flow, act, act_idx); + *tun_type = nfp_fl_get_tun_from_act(app, rule, act, act_idx); if (*tun_type == NFP_FL_TUNNEL_NONE) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: unsupported tunnel type in action list"); return -EOPNOTSUPP; @@ -1086,7 +1085,7 @@ nfp_flower_loop_action(struct nfp_app *app, const struct flow_action_entry *act, /* Tunnel decap is handled by default so accept action. */ return 0; case FLOW_ACTION_MANGLE: - if (nfp_fl_pedit(act, flow, &nfp_fl->action_data[*a_len], + if (nfp_fl_pedit(act, &nfp_fl->action_data[*a_len], a_len, csum_updated, set_act, extack)) return -EOPNOTSUPP; break; @@ -1195,7 +1194,7 @@ static bool nfp_fl_check_mangle_end(struct flow_action *flow_act, } int nfp_flower_compile_action(struct nfp_app *app, - struct flow_cls_offload *flow, + struct flow_rule *rule, struct net_device *netdev, struct nfp_fl_payload *nfp_flow, struct netlink_ext_ack *extack) @@ -1207,7 +1206,7 @@ int nfp_flower_compile_action(struct nfp_app *app, bool pkt_host = false; u32 csum_updated = 0; - if (!flow_action_hw_stats_check(&flow->rule->action, extack, + if (!flow_action_hw_stats_check(&rule->action, extack, FLOW_ACTION_HW_STATS_DELAYED_BIT)) return -EOPNOTSUPP; @@ -1219,18 +1218,18 @@ int nfp_flower_compile_action(struct nfp_app *app, tun_out_cnt = 0; out_cnt = 0; - flow_action_for_each(i, act, &flow->rule->action) { - if (nfp_fl_check_mangle_start(&flow->rule->action, i)) + flow_action_for_each(i, act, &rule->action) { + if (nfp_fl_check_mangle_start(&rule->action, i)) memset(&set_act, 0, sizeof(set_act)); - err = nfp_flower_loop_action(app, act, flow, nfp_flow, &act_len, + err = nfp_flower_loop_action(app, act, rule, nfp_flow, &act_len, netdev, &tun_type, &tun_out_cnt, &out_cnt, &csum_updated, &set_act, &pkt_host, extack, i); if (err) return err; act_cnt++; - if (nfp_fl_check_mangle_end(&flow->rule->action, i)) - nfp_fl_commit_mangle(flow, + if (nfp_fl_check_mangle_end(&rule->action, i)) + nfp_fl_commit_mangle(rule, &nfp_flow->action_data[act_len], &act_len, &set_act, &csum_updated); } diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c index 062bb2db68bf..bfd7d1c35076 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c +++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c @@ -2,6 +2,7 @@ /* Copyright (C) 2021 Corigine, Inc. */ #include "conntrack.h" +#include "../nfp_port.h" const struct rhashtable_params nfp_tc_ct_merge_params = { .head_offset = offsetof(struct nfp_fl_ct_tc_merge, @@ -407,15 +408,491 @@ static int nfp_ct_check_meta(struct nfp_fl_ct_flow_entry *post_ct_entry, return -EINVAL; } +static int +nfp_fl_calc_key_layers_sz(struct nfp_fl_key_ls in_key_ls, uint16_t *map) +{ + int key_size; + + /* This field must always be present */ + key_size = sizeof(struct nfp_flower_meta_tci); + map[FLOW_PAY_META_TCI] = 0; + + if (in_key_ls.key_layer & NFP_FLOWER_LAYER_EXT_META) { + map[FLOW_PAY_EXT_META] = key_size; + key_size += sizeof(struct nfp_flower_ext_meta); + } + if (in_key_ls.key_layer & NFP_FLOWER_LAYER_PORT) { + map[FLOW_PAY_INPORT] = key_size; + key_size += sizeof(struct nfp_flower_in_port); + } + if (in_key_ls.key_layer & NFP_FLOWER_LAYER_MAC) { + map[FLOW_PAY_MAC_MPLS] = key_size; + key_size += sizeof(struct nfp_flower_mac_mpls); + } + if (in_key_ls.key_layer & NFP_FLOWER_LAYER_TP) { + map[FLOW_PAY_L4] = key_size; + key_size += sizeof(struct nfp_flower_tp_ports); + } + if (in_key_ls.key_layer & NFP_FLOWER_LAYER_IPV4) { + map[FLOW_PAY_IPV4] = key_size; + key_size += sizeof(struct nfp_flower_ipv4); + } + if (in_key_ls.key_layer & NFP_FLOWER_LAYER_IPV6) { + map[FLOW_PAY_IPV6] = key_size; + key_size += sizeof(struct nfp_flower_ipv6); + } + + if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GRE) { + map[FLOW_PAY_GRE] = key_size; + if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) + key_size += sizeof(struct nfp_flower_ipv6_gre_tun); + else + key_size += sizeof(struct nfp_flower_ipv4_gre_tun); + } + + if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_QINQ) { + map[FLOW_PAY_QINQ] = key_size; + key_size += sizeof(struct nfp_flower_vlan); + } + + if ((in_key_ls.key_layer & NFP_FLOWER_LAYER_VXLAN) || + (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GENEVE)) { + map[FLOW_PAY_UDP_TUN] = key_size; + if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) + key_size += sizeof(struct nfp_flower_ipv6_udp_tun); + else + key_size += sizeof(struct nfp_flower_ipv4_udp_tun); + } + + if (in_key_ls.key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) { + map[FLOW_PAY_GENEVE_OPT] = key_size; + key_size += sizeof(struct nfp_flower_geneve_options); + } + + return key_size; +} + +static int nfp_fl_merge_actions_offload(struct flow_rule **rules, + struct nfp_flower_priv *priv, + struct net_device *netdev, + struct nfp_fl_payload *flow_pay) +{ + struct flow_action_entry *a_in; + int i, j, num_actions, id; + struct flow_rule *a_rule; + int err = 0, offset = 0; + + num_actions = rules[CT_TYPE_PRE_CT]->action.num_entries + + rules[CT_TYPE_NFT]->action.num_entries + + rules[CT_TYPE_POST_CT]->action.num_entries; + + a_rule = flow_rule_alloc(num_actions); + if (!a_rule) + return -ENOMEM; + + /* Actions need a BASIC dissector. */ + a_rule->match = rules[CT_TYPE_PRE_CT]->match; + + /* Copy actions */ + for (j = 0; j < _CT_TYPE_MAX; j++) { + if (flow_rule_match_key(rules[j], FLOW_DISSECTOR_KEY_BASIC)) { + struct flow_match_basic match; + + /* ip_proto is the only field that needed in later compile_action, + * needed to set the correct checksum flags. It doesn't really matter + * which input rule's ip_proto field we take as the earlier merge checks + * would have made sure that they don't conflict. We do not know which + * of the subflows would have the ip_proto filled in, so we need to iterate + * through the subflows and assign the proper subflow to a_rule + */ + flow_rule_match_basic(rules[j], &match); + if (match.mask->ip_proto) + a_rule->match = rules[j]->match; + } + + for (i = 0; i < rules[j]->action.num_entries; i++) { + a_in = &rules[j]->action.entries[i]; + id = a_in->id; + + /* Ignore CT related actions as these would already have + * been taken care of by previous checks, and we do not send + * any CT actions to the firmware. + */ + switch (id) { + case FLOW_ACTION_CT: + case FLOW_ACTION_GOTO: + case FLOW_ACTION_CT_METADATA: + continue; + default: + memcpy(&a_rule->action.entries[offset++], + a_in, sizeof(struct flow_action_entry)); + break; + } + } + } + + /* Some actions would have been ignored, so update the num_entries field */ + a_rule->action.num_entries = offset; + err = nfp_flower_compile_action(priv->app, a_rule, netdev, flow_pay, NULL); + kfree(a_rule); + + return err; +} + static int nfp_fl_ct_add_offload(struct nfp_fl_nft_tc_merge *m_entry) { - return 0; + enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE; + struct nfp_fl_ct_zone_entry *zt = m_entry->zt; + struct nfp_fl_key_ls key_layer, tmp_layer; + struct nfp_flower_priv *priv = zt->priv; + u16 key_map[_FLOW_PAY_LAYERS_MAX]; + struct nfp_fl_payload *flow_pay; + + struct flow_rule *rules[_CT_TYPE_MAX]; + u8 *key, *msk, *kdata, *mdata; + struct nfp_port *port = NULL; + struct net_device *netdev; + bool qinq_sup; + u32 port_id; + u16 offset; + int i, err; + + netdev = m_entry->netdev; + qinq_sup = !!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_QINQ); + + rules[CT_TYPE_PRE_CT] = m_entry->tc_m_parent->pre_ct_parent->rule; + rules[CT_TYPE_NFT] = m_entry->nft_parent->rule; + rules[CT_TYPE_POST_CT] = m_entry->tc_m_parent->post_ct_parent->rule; + + memset(&key_layer, 0, sizeof(struct nfp_fl_key_ls)); + memset(&key_map, 0, sizeof(key_map)); + + /* Calculate the resultant key layer and size for offload */ + for (i = 0; i < _CT_TYPE_MAX; i++) { + err = nfp_flower_calculate_key_layers(priv->app, + m_entry->netdev, + &tmp_layer, rules[i], + &tun_type, NULL); + if (err) + return err; + + key_layer.key_layer |= tmp_layer.key_layer; + key_layer.key_layer_two |= tmp_layer.key_layer_two; + } + key_layer.key_size = nfp_fl_calc_key_layers_sz(key_layer, key_map); + + flow_pay = nfp_flower_allocate_new(&key_layer); + if (!flow_pay) + return -ENOMEM; + + memset(flow_pay->unmasked_data, 0, key_layer.key_size); + memset(flow_pay->mask_data, 0, key_layer.key_size); + + kdata = flow_pay->unmasked_data; + mdata = flow_pay->mask_data; + + offset = key_map[FLOW_PAY_META_TCI]; + key = kdata + offset; + msk = mdata + offset; + nfp_flower_compile_meta((struct nfp_flower_meta_tci *)key, + (struct nfp_flower_meta_tci *)msk, + key_layer.key_layer); + + if (NFP_FLOWER_LAYER_EXT_META & key_layer.key_layer) { + offset = key_map[FLOW_PAY_EXT_META]; + key = kdata + offset; + msk = mdata + offset; + nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)key, + key_layer.key_layer_two); + nfp_flower_compile_ext_meta((struct nfp_flower_ext_meta *)msk, + key_layer.key_layer_two); + } + + /* Using in_port from the -trk rule. The tc merge checks should already + * be checking that the ingress netdevs are the same + */ + port_id = nfp_flower_get_port_id_from_netdev(priv->app, netdev); + offset = key_map[FLOW_PAY_INPORT]; + key = kdata + offset; + msk = mdata + offset; + err = nfp_flower_compile_port((struct nfp_flower_in_port *)key, + port_id, false, tun_type, NULL); + if (err) + goto ct_offload_err; + err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk, + port_id, true, tun_type, NULL); + if (err) + goto ct_offload_err; + + /* This following part works on the assumption that previous checks has + * already filtered out flows that has different values for the different + * layers. Here we iterate through all three rules and merge their respective + * masked value(cared bits), basic method is: + * final_key = (r1_key & r1_mask) | (r2_key & r2_mask) | (r3_key & r3_mask) + * final_mask = r1_mask | r2_mask | r3_mask + * If none of the rules contains a match that is also fine, that simply means + * that the layer is not present. + */ + if (!qinq_sup) { + for (i = 0; i < _CT_TYPE_MAX; i++) { + offset = key_map[FLOW_PAY_META_TCI]; + key = kdata + offset; + msk = mdata + offset; + nfp_flower_compile_tci((struct nfp_flower_meta_tci *)key, + (struct nfp_flower_meta_tci *)msk, + rules[i]); + } + } + + if (NFP_FLOWER_LAYER_MAC & key_layer.key_layer) { + offset = key_map[FLOW_PAY_MAC_MPLS]; + key = kdata + offset; + msk = mdata + offset; + for (i = 0; i < _CT_TYPE_MAX; i++) { + nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)key, + (struct nfp_flower_mac_mpls *)msk, + rules[i]); + err = nfp_flower_compile_mpls((struct nfp_flower_mac_mpls *)key, + (struct nfp_flower_mac_mpls *)msk, + rules[i], NULL); + if (err) + goto ct_offload_err; + } + } + + if (NFP_FLOWER_LAYER_IPV4 & key_layer.key_layer) { + offset = key_map[FLOW_PAY_IPV4]; + key = kdata + offset; + msk = mdata + offset; + for (i = 0; i < _CT_TYPE_MAX; i++) { + nfp_flower_compile_ipv4((struct nfp_flower_ipv4 *)key, + (struct nfp_flower_ipv4 *)msk, + rules[i]); + } + } + + if (NFP_FLOWER_LAYER_IPV6 & key_layer.key_layer) { + offset = key_map[FLOW_PAY_IPV6]; + key = kdata + offset; + msk = mdata + offset; + for (i = 0; i < _CT_TYPE_MAX; i++) { + nfp_flower_compile_ipv6((struct nfp_flower_ipv6 *)key, + (struct nfp_flower_ipv6 *)msk, + rules[i]); + } + } + + if (NFP_FLOWER_LAYER_TP & key_layer.key_layer) { + offset = key_map[FLOW_PAY_L4]; + key = kdata + offset; + msk = mdata + offset; + for (i = 0; i < _CT_TYPE_MAX; i++) { + nfp_flower_compile_tport((struct nfp_flower_tp_ports *)key, + (struct nfp_flower_tp_ports *)msk, + rules[i]); + } + } + + if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_GRE) { + offset = key_map[FLOW_PAY_GRE]; + key = kdata + offset; + msk = mdata + offset; + if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) { + struct nfp_flower_ipv6_gre_tun *gre_match; + struct nfp_ipv6_addr_entry *entry; + struct in6_addr *dst; + + for (i = 0; i < _CT_TYPE_MAX; i++) { + nfp_flower_compile_ipv6_gre_tun((void *)key, + (void *)msk, rules[i]); + } + gre_match = (struct nfp_flower_ipv6_gre_tun *)key; + dst = &gre_match->ipv6.dst; + + entry = nfp_tunnel_add_ipv6_off(priv->app, dst); + if (!entry) { + err = -ENOMEM; + goto ct_offload_err; + } + + flow_pay->nfp_tun_ipv6 = entry; + } else { + __be32 dst; + + for (i = 0; i < _CT_TYPE_MAX; i++) { + nfp_flower_compile_ipv4_gre_tun((void *)key, + (void *)msk, rules[i]); + } + dst = ((struct nfp_flower_ipv4_gre_tun *)key)->ipv4.dst; + + /* Store the tunnel destination in the rule data. + * This must be present and be an exact match. + */ + flow_pay->nfp_tun_ipv4_addr = dst; + nfp_tunnel_add_ipv4_off(priv->app, dst); + } + } + + if (NFP_FLOWER_LAYER2_QINQ & key_layer.key_layer_two) { + offset = key_map[FLOW_PAY_QINQ]; + key = kdata + offset; + msk = mdata + offset; + for (i = 0; i < _CT_TYPE_MAX; i++) { + nfp_flower_compile_vlan((struct nfp_flower_vlan *)key, + (struct nfp_flower_vlan *)msk, + rules[i]); + } + } + + if (key_layer.key_layer & NFP_FLOWER_LAYER_VXLAN || + key_layer.key_layer_two & NFP_FLOWER_LAYER2_GENEVE) { + offset = key_map[FLOW_PAY_UDP_TUN]; + key = kdata + offset; + msk = mdata + offset; + if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_TUN_IPV6) { + struct nfp_flower_ipv6_udp_tun *udp_match; + struct nfp_ipv6_addr_entry *entry; + struct in6_addr *dst; + + for (i = 0; i < _CT_TYPE_MAX; i++) { + nfp_flower_compile_ipv6_udp_tun((void *)key, + (void *)msk, rules[i]); + } + udp_match = (struct nfp_flower_ipv6_udp_tun *)key; + dst = &udp_match->ipv6.dst; + + entry = nfp_tunnel_add_ipv6_off(priv->app, dst); + if (!entry) { + err = -ENOMEM; + goto ct_offload_err; + } + + flow_pay->nfp_tun_ipv6 = entry; + } else { + __be32 dst; + + for (i = 0; i < _CT_TYPE_MAX; i++) { + nfp_flower_compile_ipv4_udp_tun((void *)key, + (void *)msk, rules[i]); + } + dst = ((struct nfp_flower_ipv4_udp_tun *)key)->ipv4.dst; + + /* Store the tunnel destination in the rule data. + * This must be present and be an exact match. + */ + flow_pay->nfp_tun_ipv4_addr = dst; + nfp_tunnel_add_ipv4_off(priv->app, dst); + } + + if (key_layer.key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) { + offset = key_map[FLOW_PAY_GENEVE_OPT]; + key = kdata + offset; + msk = mdata + offset; + for (i = 0; i < _CT_TYPE_MAX; i++) + nfp_flower_compile_geneve_opt(key, msk, rules[i]); + } + } + + /* Merge actions into flow_pay */ + err = nfp_fl_merge_actions_offload(rules, priv, netdev, flow_pay); + if (err) + goto ct_offload_err; + + /* Use the pointer address as the cookie, but set the last bit to 1. + * This is to avoid the 'is_merge_flow' check from detecting this as + * an already merged flow. This works since address alignment means + * that the last bit for pointer addresses will be 0. + */ + flow_pay->tc_flower_cookie = ((unsigned long)flow_pay) | 0x1; + err = nfp_compile_flow_metadata(priv->app, flow_pay->tc_flower_cookie, + flow_pay, netdev, NULL); + if (err) + goto ct_offload_err; + + if (nfp_netdev_is_nfp_repr(netdev)) + port = nfp_port_from_netdev(netdev); + + err = rhashtable_insert_fast(&priv->flow_table, &flow_pay->fl_node, + nfp_flower_table_params); + if (err) + goto ct_release_offload_meta_err; + + err = nfp_flower_xmit_flow(priv->app, flow_pay, + NFP_FLOWER_CMSG_TYPE_FLOW_ADD); + if (err) + goto ct_remove_rhash_err; + + m_entry->tc_flower_cookie = flow_pay->tc_flower_cookie; + m_entry->flow_pay = flow_pay; + + if (port) + port->tc_offload_cnt++; + + return err; + +ct_remove_rhash_err: + WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, + &flow_pay->fl_node, + nfp_flower_table_params)); +ct_release_offload_meta_err: + nfp_modify_flow_metadata(priv->app, flow_pay); +ct_offload_err: + if (flow_pay->nfp_tun_ipv4_addr) + nfp_tunnel_del_ipv4_off(priv->app, flow_pay->nfp_tun_ipv4_addr); + if (flow_pay->nfp_tun_ipv6) + nfp_tunnel_put_ipv6_off(priv->app, flow_pay->nfp_tun_ipv6); + kfree(flow_pay->action_data); + kfree(flow_pay->mask_data); + kfree(flow_pay->unmasked_data); + kfree(flow_pay); + return err; } static int nfp_fl_ct_del_offload(struct nfp_app *app, unsigned long cookie, struct net_device *netdev) { - return 0; + struct nfp_flower_priv *priv = app->priv; + struct nfp_fl_payload *flow_pay; + struct nfp_port *port = NULL; + int err = 0; + + if (nfp_netdev_is_nfp_repr(netdev)) + port = nfp_port_from_netdev(netdev); + + flow_pay = nfp_flower_search_fl_table(app, cookie, netdev); + if (!flow_pay) + return -ENOENT; + + err = nfp_modify_flow_metadata(app, flow_pay); + if (err) + goto err_free_merge_flow; + + if (flow_pay->nfp_tun_ipv4_addr) + nfp_tunnel_del_ipv4_off(app, flow_pay->nfp_tun_ipv4_addr); + + if (flow_pay->nfp_tun_ipv6) + nfp_tunnel_put_ipv6_off(app, flow_pay->nfp_tun_ipv6); + + if (!flow_pay->in_hw) { + err = 0; + goto err_free_merge_flow; + } + + err = nfp_flower_xmit_flow(app, flow_pay, + NFP_FLOWER_CMSG_TYPE_FLOW_DEL); + +err_free_merge_flow: + nfp_flower_del_linked_merge_flows(app, flow_pay); + if (port) + port->tc_offload_cnt--; + kfree(flow_pay->action_data); + kfree(flow_pay->mask_data); + kfree(flow_pay->unmasked_data); + WARN_ON_ONCE(rhashtable_remove_fast(&priv->flow_table, + &flow_pay->fl_node, + nfp_flower_table_params)); + kfree_rcu(flow_pay, rcu); + return err; } static int nfp_ct_do_nft_merge(struct nfp_fl_ct_zone_entry *zt, @@ -1048,6 +1525,139 @@ int nfp_fl_ct_handle_post_ct(struct nfp_flower_priv *priv, return 0; } +static void +nfp_fl_ct_sub_stats(struct nfp_fl_nft_tc_merge *nft_merge, + enum ct_entry_type type, u64 *m_pkts, + u64 *m_bytes, u64 *m_used) +{ + struct nfp_flower_priv *priv = nft_merge->zt->priv; + struct nfp_fl_payload *nfp_flow; + u32 ctx_id; + + nfp_flow = nft_merge->flow_pay; + if (!nfp_flow) + return; + + ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id); + *m_pkts += priv->stats[ctx_id].pkts; + *m_bytes += priv->stats[ctx_id].bytes; + *m_used = max_t(u64, *m_used, priv->stats[ctx_id].used); + + /* If request is for a sub_flow which is part of a tunnel merged + * flow then update stats from tunnel merged flows first. + */ + if (!list_empty(&nfp_flow->linked_flows)) + nfp_flower_update_merge_stats(priv->app, nfp_flow); + + if (type != CT_TYPE_NFT) { + /* Update nft cached stats */ + flow_stats_update(&nft_merge->nft_parent->stats, + priv->stats[ctx_id].bytes, + priv->stats[ctx_id].pkts, + 0, priv->stats[ctx_id].used, + FLOW_ACTION_HW_STATS_DELAYED); + } else { + /* Update pre_ct cached stats */ + flow_stats_update(&nft_merge->tc_m_parent->pre_ct_parent->stats, + priv->stats[ctx_id].bytes, + priv->stats[ctx_id].pkts, + 0, priv->stats[ctx_id].used, + FLOW_ACTION_HW_STATS_DELAYED); + /* Update post_ct cached stats */ + flow_stats_update(&nft_merge->tc_m_parent->post_ct_parent->stats, + priv->stats[ctx_id].bytes, + priv->stats[ctx_id].pkts, + 0, priv->stats[ctx_id].used, + FLOW_ACTION_HW_STATS_DELAYED); + } + /* Reset stats from the nfp */ + priv->stats[ctx_id].pkts = 0; + priv->stats[ctx_id].bytes = 0; +} + +int nfp_fl_ct_stats(struct flow_cls_offload *flow, + struct nfp_fl_ct_map_entry *ct_map_ent) +{ + struct nfp_fl_ct_flow_entry *ct_entry = ct_map_ent->ct_entry; + struct nfp_fl_nft_tc_merge *nft_merge, *nft_m_tmp; + struct nfp_fl_ct_tc_merge *tc_merge, *tc_m_tmp; + + u64 pkts = 0, bytes = 0, used = 0; + u64 m_pkts, m_bytes, m_used; + + spin_lock_bh(&ct_entry->zt->priv->stats_lock); + + if (ct_entry->type == CT_TYPE_PRE_CT) { + /* Iterate tc_merge entries associated with this flow */ + list_for_each_entry_safe(tc_merge, tc_m_tmp, &ct_entry->children, + pre_ct_list) { + m_pkts = 0; + m_bytes = 0; + m_used = 0; + /* Iterate nft_merge entries associated with this tc_merge flow */ + list_for_each_entry_safe(nft_merge, nft_m_tmp, &tc_merge->children, + tc_merge_list) { + nfp_fl_ct_sub_stats(nft_merge, CT_TYPE_PRE_CT, + &m_pkts, &m_bytes, &m_used); + } + pkts += m_pkts; + bytes += m_bytes; + used = max_t(u64, used, m_used); + /* Update post_ct partner */ + flow_stats_update(&tc_merge->post_ct_parent->stats, + m_bytes, m_pkts, 0, m_used, + FLOW_ACTION_HW_STATS_DELAYED); + } + } else if (ct_entry->type == CT_TYPE_POST_CT) { + /* Iterate tc_merge entries associated with this flow */ + list_for_each_entry_safe(tc_merge, tc_m_tmp, &ct_entry->children, + post_ct_list) { + m_pkts = 0; + m_bytes = 0; + m_used = 0; + /* Iterate nft_merge entries associated with this tc_merge flow */ + list_for_each_entry_safe(nft_merge, nft_m_tmp, &tc_merge->children, + tc_merge_list) { + nfp_fl_ct_sub_stats(nft_merge, CT_TYPE_POST_CT, + &m_pkts, &m_bytes, &m_used); + } + pkts += m_pkts; + bytes += m_bytes; + used = max_t(u64, used, m_used); + /* Update pre_ct partner */ + flow_stats_update(&tc_merge->pre_ct_parent->stats, + m_bytes, m_pkts, 0, m_used, + FLOW_ACTION_HW_STATS_DELAYED); + } + } else { + /* Iterate nft_merge entries associated with this nft flow */ + list_for_each_entry_safe(nft_merge, nft_m_tmp, &ct_entry->children, + nft_flow_list) { + nfp_fl_ct_sub_stats(nft_merge, CT_TYPE_NFT, + &pkts, &bytes, &used); + } + } + + /* Add stats from this request to stats potentially cached by + * previous requests. + */ + flow_stats_update(&ct_entry->stats, bytes, pkts, 0, used, + FLOW_ACTION_HW_STATS_DELAYED); + /* Finally update the flow stats from the original stats request */ + flow_stats_update(&flow->stats, ct_entry->stats.bytes, + ct_entry->stats.pkts, 0, + ct_entry->stats.lastused, + FLOW_ACTION_HW_STATS_DELAYED); + /* Stats has been synced to original flow, can now clear + * the cache. + */ + ct_entry->stats.pkts = 0; + ct_entry->stats.bytes = 0; + spin_unlock_bh(&ct_entry->zt->priv->stats_lock); + + return 0; +} + static int nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offload *flow) { @@ -1080,7 +1690,11 @@ nfp_fl_ct_offload_nft_flow(struct nfp_fl_ct_zone_entry *zt, struct flow_cls_offl nfp_ct_map_params); return nfp_fl_ct_del_flow(ct_map_ent); case FLOW_CLS_STATS: - return 0; + ct_map_ent = rhashtable_lookup_fast(&zt->priv->ct_map_table, &flow->cookie, + nfp_ct_map_params); + if (ct_map_ent) + return nfp_fl_ct_stats(flow, ct_map_ent); + break; default: break; } diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.h b/drivers/net/ethernet/netronome/nfp/flower/conntrack.h index 170b6cdb8cd0..beb6cceff9d8 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.h +++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.h @@ -83,6 +83,24 @@ enum ct_entry_type { CT_TYPE_PRE_CT, CT_TYPE_NFT, CT_TYPE_POST_CT, + _CT_TYPE_MAX, +}; + +enum nfp_nfp_layer_name { + FLOW_PAY_META_TCI = 0, + FLOW_PAY_INPORT, + FLOW_PAY_EXT_META, + FLOW_PAY_MAC_MPLS, + FLOW_PAY_L4, + FLOW_PAY_IPV4, + FLOW_PAY_IPV6, + FLOW_PAY_CT, + FLOW_PAY_GRE, + FLOW_PAY_QINQ, + FLOW_PAY_UDP_TUN, + FLOW_PAY_GENEVE_OPT, + + _FLOW_PAY_LAYERS_MAX }; /** @@ -228,4 +246,12 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent); */ int nfp_fl_ct_handle_nft_flow(enum tc_setup_type type, void *type_data, void *cb_priv); + +/** + * nfp_fl_ct_stats() - Handle flower stats callbacks for ct flows + * @flow: TC flower classifier offload structure. + * @ct_map_ent: ct map entry for the flow that needs deleting + */ +int nfp_fl_ct_stats(struct flow_cls_offload *flow, + struct nfp_fl_ct_map_entry *ct_map_ent); #endif diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 0fbd682ccf72..917c450a7aad 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -413,20 +413,73 @@ int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, int nfp_flower_merge_offloaded_flows(struct nfp_app *app, struct nfp_fl_payload *sub_flow1, struct nfp_fl_payload *sub_flow2); +void +nfp_flower_compile_meta(struct nfp_flower_meta_tci *ext, + struct nfp_flower_meta_tci *msk, u8 key_type); +void +nfp_flower_compile_tci(struct nfp_flower_meta_tci *ext, + struct nfp_flower_meta_tci *msk, + struct flow_rule *rule); +void +nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext); +int +nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port, + bool mask_version, enum nfp_flower_tun_type tun_type, + struct netlink_ext_ack *extack); +void +nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext, + struct nfp_flower_mac_mpls *msk, + struct flow_rule *rule); +int +nfp_flower_compile_mpls(struct nfp_flower_mac_mpls *ext, + struct nfp_flower_mac_mpls *msk, + struct flow_rule *rule, + struct netlink_ext_ack *extack); +void +nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext, + struct nfp_flower_tp_ports *msk, + struct flow_rule *rule); +void +nfp_flower_compile_vlan(struct nfp_flower_vlan *ext, + struct nfp_flower_vlan *msk, + struct flow_rule *rule); +void +nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext, + struct nfp_flower_ipv4 *msk, struct flow_rule *rule); +void +nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext, + struct nfp_flower_ipv6 *msk, struct flow_rule *rule); +void +nfp_flower_compile_geneve_opt(u8 *ext, u8 *msk, struct flow_rule *rule); +void +nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext, + struct nfp_flower_ipv4_gre_tun *msk, + struct flow_rule *rule); +void +nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext, + struct nfp_flower_ipv4_udp_tun *msk, + struct flow_rule *rule); +void +nfp_flower_compile_ipv6_udp_tun(struct nfp_flower_ipv6_udp_tun *ext, + struct nfp_flower_ipv6_udp_tun *msk, + struct flow_rule *rule); +void +nfp_flower_compile_ipv6_gre_tun(struct nfp_flower_ipv6_gre_tun *ext, + struct nfp_flower_ipv6_gre_tun *msk, + struct flow_rule *rule); int nfp_flower_compile_flow_match(struct nfp_app *app, - struct flow_cls_offload *flow, + struct flow_rule *rule, struct nfp_fl_key_ls *key_ls, struct net_device *netdev, struct nfp_fl_payload *nfp_flow, enum nfp_flower_tun_type tun_type, struct netlink_ext_ack *extack); int nfp_flower_compile_action(struct nfp_app *app, - struct flow_cls_offload *flow, + struct flow_rule *rule, struct net_device *netdev, struct nfp_fl_payload *nfp_flow, struct netlink_ext_ack *extack); -int nfp_compile_flow_metadata(struct nfp_app *app, - struct flow_cls_offload *flow, +int nfp_compile_flow_metadata(struct nfp_app *app, u32 cookie, struct nfp_fl_payload *nfp_flow, struct net_device *netdev, struct netlink_ext_ack *extack); @@ -498,4 +551,22 @@ int nfp_flower_xmit_pre_tun_flow(struct nfp_app *app, struct nfp_fl_payload *flow); int nfp_flower_xmit_pre_tun_del_flow(struct nfp_app *app, struct nfp_fl_payload *flow); + +struct nfp_fl_payload * +nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer); +int nfp_flower_calculate_key_layers(struct nfp_app *app, + struct net_device *netdev, + struct nfp_fl_key_ls *ret_key_ls, + struct flow_rule *flow, + enum nfp_flower_tun_type *tun_type, + struct netlink_ext_ack *extack); +void +nfp_flower_del_linked_merge_flows(struct nfp_app *app, + struct nfp_fl_payload *sub_flow); +int +nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow, + u8 mtype); +void +nfp_flower_update_merge_stats(struct nfp_app *app, + struct nfp_fl_payload *sub_flow); #endif diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index 255a4dff6288..9d86eea4dc16 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c @@ -7,51 +7,68 @@ #include "cmsg.h" #include "main.h" -static void -nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext, - struct nfp_flower_meta_tci *msk, - struct flow_rule *rule, u8 key_type, bool qinq_sup) +void +nfp_flower_compile_meta(struct nfp_flower_meta_tci *ext, + struct nfp_flower_meta_tci *msk, u8 key_type) { - u16 tmp_tci; - - memset(ext, 0, sizeof(struct nfp_flower_meta_tci)); - memset(msk, 0, sizeof(struct nfp_flower_meta_tci)); - /* Populate the metadata frame. */ ext->nfp_flow_key_layer = key_type; ext->mask_id = ~0; msk->nfp_flow_key_layer = key_type; msk->mask_id = ~0; +} - if (!qinq_sup && flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { +void +nfp_flower_compile_tci(struct nfp_flower_meta_tci *ext, + struct nfp_flower_meta_tci *msk, + struct flow_rule *rule) +{ + u16 msk_tci, key_tci; + + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { struct flow_match_vlan match; flow_rule_match_vlan(rule, &match); /* Populate the tci field. */ - tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT; - tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, + key_tci = NFP_FLOWER_MASK_VLAN_PRESENT; + key_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, match.key->vlan_priority) | FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, match.key->vlan_id); - ext->tci = cpu_to_be16(tmp_tci); - tmp_tci = NFP_FLOWER_MASK_VLAN_PRESENT; - tmp_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, + msk_tci = NFP_FLOWER_MASK_VLAN_PRESENT; + msk_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, match.mask->vlan_priority) | FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, match.mask->vlan_id); - msk->tci = cpu_to_be16(tmp_tci); + + ext->tci |= cpu_to_be16((key_tci & msk_tci)); + msk->tci |= cpu_to_be16(msk_tci); } } static void +nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *ext, + struct nfp_flower_meta_tci *msk, + struct flow_rule *rule, u8 key_type, bool qinq_sup) +{ + memset(ext, 0, sizeof(struct nfp_flower_meta_tci)); + memset(msk, 0, sizeof(struct nfp_flower_meta_tci)); + + nfp_flower_compile_meta(ext, msk, key_type); + + if (!qinq_sup) + nfp_flower_compile_tci(ext, msk, rule); +} + +void nfp_flower_compile_ext_meta(struct nfp_flower_ext_meta *frame, u32 key_ext) { frame->nfp_flow_key_layer2 = cpu_to_be32(key_ext); } -static int +int nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port, bool mask_version, enum nfp_flower_tun_type tun_type, struct netlink_ext_ack *extack) @@ -74,28 +91,37 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port, return 0; } -static int +void nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext, - struct nfp_flower_mac_mpls *msk, struct flow_rule *rule, - struct netlink_ext_ack *extack) + struct nfp_flower_mac_mpls *msk, + struct flow_rule *rule) { - memset(ext, 0, sizeof(struct nfp_flower_mac_mpls)); - memset(msk, 0, sizeof(struct nfp_flower_mac_mpls)); - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) { struct flow_match_eth_addrs match; + int i; flow_rule_match_eth_addrs(rule, &match); /* Populate mac frame. */ - ether_addr_copy(ext->mac_dst, &match.key->dst[0]); - ether_addr_copy(ext->mac_src, &match.key->src[0]); - ether_addr_copy(msk->mac_dst, &match.mask->dst[0]); - ether_addr_copy(msk->mac_src, &match.mask->src[0]); + for (i = 0; i < ETH_ALEN; i++) { + ext->mac_dst[i] |= match.key->dst[i] & + match.mask->dst[i]; + msk->mac_dst[i] |= match.mask->dst[i]; + ext->mac_src[i] |= match.key->src[i] & + match.mask->src[i]; + msk->mac_src[i] |= match.mask->src[i]; + } } +} +int +nfp_flower_compile_mpls(struct nfp_flower_mac_mpls *ext, + struct nfp_flower_mac_mpls *msk, + struct flow_rule *rule, + struct netlink_ext_ack *extack) +{ if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) { struct flow_match_mpls match; - u32 t_mpls; + u32 key_mpls, msk_mpls; flow_rule_match_mpls(rule, &match); @@ -106,22 +132,24 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext, return -EOPNOTSUPP; } - t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, - match.key->ls[0].mpls_label) | - FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, - match.key->ls[0].mpls_tc) | - FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, - match.key->ls[0].mpls_bos) | - NFP_FLOWER_MASK_MPLS_Q; - ext->mpls_lse = cpu_to_be32(t_mpls); - t_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, - match.mask->ls[0].mpls_label) | - FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, - match.mask->ls[0].mpls_tc) | - FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, - match.mask->ls[0].mpls_bos) | - NFP_FLOWER_MASK_MPLS_Q; - msk->mpls_lse = cpu_to_be32(t_mpls); + key_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, + match.key->ls[0].mpls_label) | + FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, + match.key->ls[0].mpls_tc) | + FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, + match.key->ls[0].mpls_bos) | + NFP_FLOWER_MASK_MPLS_Q; + + msk_mpls = FIELD_PREP(NFP_FLOWER_MASK_MPLS_LB, + match.mask->ls[0].mpls_label) | + FIELD_PREP(NFP_FLOWER_MASK_MPLS_TC, + match.mask->ls[0].mpls_tc) | + FIELD_PREP(NFP_FLOWER_MASK_MPLS_BOS, + match.mask->ls[0].mpls_bos) | + NFP_FLOWER_MASK_MPLS_Q; + + ext->mpls_lse |= cpu_to_be32((key_mpls & msk_mpls)); + msk->mpls_lse |= cpu_to_be32(msk_mpls); } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) { /* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q * bit, which indicates an mpls ether type but without any @@ -132,30 +160,41 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *ext, flow_rule_match_basic(rule, &match); if (match.key->n_proto == cpu_to_be16(ETH_P_MPLS_UC) || match.key->n_proto == cpu_to_be16(ETH_P_MPLS_MC)) { - ext->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q); - msk->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q); + ext->mpls_lse |= cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q); + msk->mpls_lse |= cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q); } } return 0; } -static void +static int +nfp_flower_compile_mac_mpls(struct nfp_flower_mac_mpls *ext, + struct nfp_flower_mac_mpls *msk, + struct flow_rule *rule, + struct netlink_ext_ack *extack) +{ + memset(ext, 0, sizeof(struct nfp_flower_mac_mpls)); + memset(msk, 0, sizeof(struct nfp_flower_mac_mpls)); + + nfp_flower_compile_mac(ext, msk, rule); + + return nfp_flower_compile_mpls(ext, msk, rule, extack); +} + +void nfp_flower_compile_tport(struct nfp_flower_tp_ports *ext, struct nfp_flower_tp_ports *msk, struct flow_rule *rule) { - memset(ext, 0, sizeof(struct nfp_flower_tp_ports)); - memset(msk, 0, sizeof(struct nfp_flower_tp_ports)); - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) { struct flow_match_ports match; flow_rule_match_ports(rule, &match); - ext->port_src = match.key->src; - ext->port_dst = match.key->dst; - msk->port_src = match.mask->src; - msk->port_dst = match.mask->dst; + ext->port_src |= match.key->src & match.mask->src; + ext->port_dst |= match.key->dst & match.mask->dst; + msk->port_src |= match.mask->src; + msk->port_dst |= match.mask->dst; } } @@ -167,18 +206,18 @@ nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext, struct flow_match_basic match; flow_rule_match_basic(rule, &match); - ext->proto = match.key->ip_proto; - msk->proto = match.mask->ip_proto; + ext->proto |= match.key->ip_proto & match.mask->ip_proto; + msk->proto |= match.mask->ip_proto; } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) { struct flow_match_ip match; flow_rule_match_ip(rule, &match); - ext->tos = match.key->tos; - ext->ttl = match.key->ttl; - msk->tos = match.mask->tos; - msk->ttl = match.mask->ttl; + ext->tos |= match.key->tos & match.mask->tos; + ext->ttl |= match.key->ttl & match.mask->ttl; + msk->tos |= match.mask->tos; + msk->ttl |= match.mask->ttl; } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_TCP)) { @@ -231,99 +270,108 @@ nfp_flower_compile_ip_ext(struct nfp_flower_ip_ext *ext, } static void -nfp_flower_fill_vlan(struct flow_dissector_key_vlan *key, - struct nfp_flower_vlan *frame, - bool outer_vlan) +nfp_flower_fill_vlan(struct flow_match_vlan *match, + struct nfp_flower_vlan *ext, + struct nfp_flower_vlan *msk, bool outer_vlan) { - u16 tci; - - tci = NFP_FLOWER_MASK_VLAN_PRESENT; - tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, - key->vlan_priority) | - FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, - key->vlan_id); + struct flow_dissector_key_vlan *mask = match->mask; + struct flow_dissector_key_vlan *key = match->key; + u16 msk_tci, key_tci; + + key_tci = NFP_FLOWER_MASK_VLAN_PRESENT; + key_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, + key->vlan_priority) | + FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, + key->vlan_id); + msk_tci = NFP_FLOWER_MASK_VLAN_PRESENT; + msk_tci |= FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, + mask->vlan_priority) | + FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, + mask->vlan_id); if (outer_vlan) { - frame->outer_tci = cpu_to_be16(tci); - frame->outer_tpid = key->vlan_tpid; + ext->outer_tci |= cpu_to_be16((key_tci & msk_tci)); + ext->outer_tpid |= key->vlan_tpid & mask->vlan_tpid; + msk->outer_tci |= cpu_to_be16(msk_tci); + msk->outer_tpid |= mask->vlan_tpid; } else { - frame->inner_tci = cpu_to_be16(tci); - frame->inner_tpid = key->vlan_tpid; + ext->inner_tci |= cpu_to_be16((key_tci & msk_tci)); + ext->inner_tpid |= key->vlan_tpid & mask->vlan_tpid; + msk->inner_tci |= cpu_to_be16(msk_tci); + msk->inner_tpid |= mask->vlan_tpid; } } -static void +void nfp_flower_compile_vlan(struct nfp_flower_vlan *ext, struct nfp_flower_vlan *msk, struct flow_rule *rule) { struct flow_match_vlan match; - memset(ext, 0, sizeof(struct nfp_flower_vlan)); - memset(msk, 0, sizeof(struct nfp_flower_vlan)); - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) { flow_rule_match_vlan(rule, &match); - nfp_flower_fill_vlan(match.key, ext, true); - nfp_flower_fill_vlan(match.mask, msk, true); + nfp_flower_fill_vlan(&match, ext, msk, true); } if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) { flow_rule_match_cvlan(rule, &match); - nfp_flower_fill_vlan(match.key, ext, false); - nfp_flower_fill_vlan(match.mask, msk, false); + nfp_flower_fill_vlan(&match, ext, msk, false); } } -static void +void nfp_flower_compile_ipv4(struct nfp_flower_ipv4 *ext, struct nfp_flower_ipv4 *msk, struct flow_rule *rule) { - struct flow_match_ipv4_addrs match; - - memset(ext, 0, sizeof(struct nfp_flower_ipv4)); - memset(msk, 0, sizeof(struct nfp_flower_ipv4)); - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) { + struct flow_match_ipv4_addrs match; + flow_rule_match_ipv4_addrs(rule, &match); - ext->ipv4_src = match.key->src; - ext->ipv4_dst = match.key->dst; - msk->ipv4_src = match.mask->src; - msk->ipv4_dst = match.mask->dst; + ext->ipv4_src |= match.key->src & match.mask->src; + ext->ipv4_dst |= match.key->dst & match.mask->dst; + msk->ipv4_src |= match.mask->src; + msk->ipv4_dst |= match.mask->dst; } nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, rule); } -static void +void nfp_flower_compile_ipv6(struct nfp_flower_ipv6 *ext, struct nfp_flower_ipv6 *msk, struct flow_rule *rule) { - memset(ext, 0, sizeof(struct nfp_flower_ipv6)); - memset(msk, 0, sizeof(struct nfp_flower_ipv6)); - if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) { struct flow_match_ipv6_addrs match; + int i; flow_rule_match_ipv6_addrs(rule, &match); - ext->ipv6_src = match.key->src; - ext->ipv6_dst = match.key->dst; - msk->ipv6_src = match.mask->src; - msk->ipv6_dst = match.mask->dst; + for (i = 0; i < sizeof(ext->ipv6_src); i++) { + ext->ipv6_src.s6_addr[i] |= match.key->src.s6_addr[i] & + match.mask->src.s6_addr[i]; + ext->ipv6_dst.s6_addr[i] |= match.key->dst.s6_addr[i] & + match.mask->dst.s6_addr[i]; + msk->ipv6_src.s6_addr[i] |= match.mask->src.s6_addr[i]; + msk->ipv6_dst.s6_addr[i] |= match.mask->dst.s6_addr[i]; + } } nfp_flower_compile_ip_ext(&ext->ip_ext, &msk->ip_ext, rule); } -static int -nfp_flower_compile_geneve_opt(void *ext, void *msk, struct flow_rule *rule) +void +nfp_flower_compile_geneve_opt(u8 *ext, u8 *msk, struct flow_rule *rule) { struct flow_match_enc_opts match; + int i; - flow_rule_match_enc_opts(rule, &match); - memcpy(ext, match.key->data, match.key->len); - memcpy(msk, match.mask->data, match.mask->len); + if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_OPTS)) { + flow_rule_match_enc_opts(rule, &match); - return 0; + for (i = 0; i < match.mask->len; i++) { + ext[i] |= match.key->data[i] & match.mask->data[i]; + msk[i] |= match.mask->data[i]; + } + } } static void @@ -335,10 +383,10 @@ nfp_flower_compile_tun_ipv4_addrs(struct nfp_flower_tun_ipv4 *ext, struct flow_match_ipv4_addrs match; flow_rule_match_enc_ipv4_addrs(rule, &match); - ext->src = match.key->src; - ext->dst = match.key->dst; - msk->src = match.mask->src; - msk->dst = match.mask->dst; + ext->src |= match.key->src & match.mask->src; + ext->dst |= match.key->dst & match.mask->dst; + msk->src |= match.mask->src; + msk->dst |= match.mask->dst; } } @@ -349,12 +397,17 @@ nfp_flower_compile_tun_ipv6_addrs(struct nfp_flower_tun_ipv6 *ext, { if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) { struct flow_match_ipv6_addrs match; + int i; flow_rule_match_enc_ipv6_addrs(rule, &match); - ext->src = match.key->src; - ext->dst = match.key->dst; - msk->src = match.mask->src; - msk->dst = match.mask->dst; + for (i = 0; i < sizeof(ext->src); i++) { + ext->src.s6_addr[i] |= match.key->src.s6_addr[i] & + match.mask->src.s6_addr[i]; + ext->dst.s6_addr[i] |= match.key->dst.s6_addr[i] & + match.mask->dst.s6_addr[i]; + msk->src.s6_addr[i] |= match.mask->src.s6_addr[i]; + msk->dst.s6_addr[i] |= match.mask->dst.s6_addr[i]; + } } } @@ -367,10 +420,10 @@ nfp_flower_compile_tun_ip_ext(struct nfp_flower_tun_ip_ext *ext, struct flow_match_ip match; flow_rule_match_enc_ip(rule, &match); - ext->tos = match.key->tos; - ext->ttl = match.key->ttl; - msk->tos = match.mask->tos; - msk->ttl = match.mask->ttl; + ext->tos |= match.key->tos & match.mask->tos; + ext->ttl |= match.key->ttl & match.mask->ttl; + msk->tos |= match.mask->tos; + msk->ttl |= match.mask->ttl; } } @@ -383,10 +436,11 @@ nfp_flower_compile_tun_udp_key(__be32 *key, __be32 *key_msk, u32 vni; flow_rule_match_enc_keyid(rule, &match); - vni = be32_to_cpu(match.key->keyid) << NFP_FL_TUN_VNI_OFFSET; - *key = cpu_to_be32(vni); + vni = be32_to_cpu((match.key->keyid & match.mask->keyid)) << + NFP_FL_TUN_VNI_OFFSET; + *key |= cpu_to_be32(vni); vni = be32_to_cpu(match.mask->keyid) << NFP_FL_TUN_VNI_OFFSET; - *key_msk = cpu_to_be32(vni); + *key_msk |= cpu_to_be32(vni); } } @@ -398,22 +452,19 @@ nfp_flower_compile_tun_gre_key(__be32 *key, __be32 *key_msk, __be16 *flags, struct flow_match_enc_keyid match; flow_rule_match_enc_keyid(rule, &match); - *key = match.key->keyid; - *key_msk = match.mask->keyid; + *key |= match.key->keyid & match.mask->keyid; + *key_msk |= match.mask->keyid; *flags = cpu_to_be16(NFP_FL_GRE_FLAG_KEY); *flags_msk = cpu_to_be16(NFP_FL_GRE_FLAG_KEY); } } -static void +void nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext, struct nfp_flower_ipv4_gre_tun *msk, struct flow_rule *rule) { - memset(ext, 0, sizeof(struct nfp_flower_ipv4_gre_tun)); - memset(msk, 0, sizeof(struct nfp_flower_ipv4_gre_tun)); - /* NVGRE is the only supported GRE tunnel type */ ext->ethertype = cpu_to_be16(ETH_P_TEB); msk->ethertype = cpu_to_be16(~0); @@ -424,40 +475,31 @@ nfp_flower_compile_ipv4_gre_tun(struct nfp_flower_ipv4_gre_tun *ext, &ext->tun_flags, &msk->tun_flags, rule); } -static void +void nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *ext, struct nfp_flower_ipv4_udp_tun *msk, struct flow_rule *rule) { - memset(ext, 0, sizeof(struct nfp_flower_ipv4_udp_tun)); - memset(msk, 0, sizeof(struct nfp_flower_ipv4_udp_tun)); - nfp_flower_compile_tun_ipv4_addrs(&ext->ipv4, &msk->ipv4, rule); nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule); nfp_flower_compile_tun_udp_key(&ext->tun_id, &msk->tun_id, rule); } -static void +void nfp_flower_compile_ipv6_udp_tun(struct nfp_flower_ipv6_udp_tun *ext, struct nfp_flower_ipv6_udp_tun *msk, struct flow_rule *rule) { - memset(ext, 0, sizeof(struct nfp_flower_ipv6_udp_tun)); - memset(msk, 0, sizeof(struct nfp_flower_ipv6_udp_tun)); - nfp_flower_compile_tun_ipv6_addrs(&ext->ipv6, &msk->ipv6, rule); nfp_flower_compile_tun_ip_ext(&ext->ip_ext, &msk->ip_ext, rule); nfp_flower_compile_tun_udp_key(&ext->tun_id, &msk->tun_id, rule); } -static void +void nfp_flower_compile_ipv6_gre_tun(struct nfp_flower_ipv6_gre_tun *ext, struct nfp_flower_ipv6_gre_tun *msk, struct flow_rule *rule) { - memset(ext, 0, sizeof(struct nfp_flower_ipv6_gre_tun)); - memset(msk, 0, sizeof(struct nfp_flower_ipv6_gre_tun)); - /* NVGRE is the only supported GRE tunnel type */ ext->ethertype = cpu_to_be16(ETH_P_TEB); msk->ethertype = cpu_to_be16(~0); @@ -469,14 +511,13 @@ nfp_flower_compile_ipv6_gre_tun(struct nfp_flower_ipv6_gre_tun *ext, } int nfp_flower_compile_flow_match(struct nfp_app *app, - struct flow_cls_offload *flow, + struct flow_rule *rule, struct nfp_fl_key_ls *key_ls, struct net_device *netdev, struct nfp_fl_payload *nfp_flow, enum nfp_flower_tun_type tun_type, struct netlink_ext_ack *extack) { - struct flow_rule *rule = flow_cls_offload_flow_rule(flow); struct nfp_flower_priv *priv = app->priv; bool qinq_sup; u32 port_id; @@ -527,9 +568,9 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, msk += sizeof(struct nfp_flower_in_port); if (NFP_FLOWER_LAYER_MAC & key_ls->key_layer) { - err = nfp_flower_compile_mac((struct nfp_flower_mac_mpls *)ext, - (struct nfp_flower_mac_mpls *)msk, - rule, extack); + err = nfp_flower_compile_mac_mpls((struct nfp_flower_mac_mpls *)ext, + (struct nfp_flower_mac_mpls *)msk, + rule, extack); if (err) return err; @@ -640,9 +681,7 @@ int nfp_flower_compile_flow_match(struct nfp_app *app, } if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) { - err = nfp_flower_compile_geneve_opt(ext, msk, rule); - if (err) - return err; + nfp_flower_compile_geneve_opt(ext, msk, rule); } } diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c index 621113650a9b..2af9faee96c5 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c @@ -290,8 +290,7 @@ nfp_check_mask_remove(struct nfp_app *app, char *mask_data, u32 mask_len, return true; } -int nfp_compile_flow_metadata(struct nfp_app *app, - struct flow_cls_offload *flow, +int nfp_compile_flow_metadata(struct nfp_app *app, u32 cookie, struct nfp_fl_payload *nfp_flow, struct net_device *netdev, struct netlink_ext_ack *extack) @@ -310,7 +309,7 @@ int nfp_compile_flow_metadata(struct nfp_app *app, } nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt); - nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie); + nfp_flow->meta.host_cookie = cpu_to_be64(cookie); nfp_flow->ingress_dev = netdev; ctx_entry = kzalloc(sizeof(*ctx_entry), GFP_KERNEL); @@ -357,7 +356,7 @@ int nfp_compile_flow_metadata(struct nfp_app *app, priv->stats[stats_cxt].bytes = 0; priv->stats[stats_cxt].used = jiffies; - check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev); + check_entry = nfp_flower_search_fl_table(app, cookie, netdev); if (check_entry) { NL_SET_ERR_MSG_MOD(extack, "invalid entry: cannot offload duplicate flow entry"); if (nfp_release_stats_entry(app, stats_cxt)) { diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 2406d33356ad..556c3495211d 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -41,6 +41,8 @@ BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \ BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \ BIT(FLOW_DISSECTOR_KEY_MPLS) | \ + BIT(FLOW_DISSECTOR_KEY_CT) | \ + BIT(FLOW_DISSECTOR_KEY_META) | \ BIT(FLOW_DISSECTOR_KEY_IP)) #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \ @@ -89,7 +91,7 @@ struct nfp_flower_merge_check { }; }; -static int +int nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow, u8 mtype) { @@ -134,20 +136,16 @@ nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow, return 0; } -static bool nfp_flower_check_higher_than_mac(struct flow_cls_offload *f) +static bool nfp_flower_check_higher_than_mac(struct flow_rule *rule) { - struct flow_rule *rule = flow_cls_offload_flow_rule(f); - return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS) || flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS) || flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) || flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP); } -static bool nfp_flower_check_higher_than_l3(struct flow_cls_offload *f) +static bool nfp_flower_check_higher_than_l3(struct flow_rule *rule) { - struct flow_rule *rule = flow_cls_offload_flow_rule(f); - return flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS) || flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP); } @@ -236,15 +234,14 @@ nfp_flower_calc_udp_tun_layer(struct flow_dissector_key_ports *enc_ports, return 0; } -static int +int nfp_flower_calculate_key_layers(struct nfp_app *app, struct net_device *netdev, struct nfp_fl_key_ls *ret_key_ls, - struct flow_cls_offload *flow, + struct flow_rule *rule, enum nfp_flower_tun_type *tun_type, struct netlink_ext_ack *extack) { - struct flow_rule *rule = flow_cls_offload_flow_rule(flow); struct flow_dissector *dissector = rule->match.dissector; struct flow_match_basic basic = { NULL, NULL}; struct nfp_flower_priv *priv = app->priv; @@ -452,7 +449,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, NL_SET_ERR_MSG_MOD(extack, "unsupported offload: match on given EtherType is not supported"); return -EOPNOTSUPP; } - } else if (nfp_flower_check_higher_than_mac(flow)) { + } else if (nfp_flower_check_higher_than_mac(rule)) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match above L2 without specified EtherType"); return -EOPNOTSUPP; } @@ -471,7 +468,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, } if (!(key_layer & NFP_FLOWER_LAYER_TP) && - nfp_flower_check_higher_than_l3(flow)) { + nfp_flower_check_higher_than_l3(rule)) { NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot match on L4 information without specified IP protocol type"); return -EOPNOTSUPP; } @@ -543,7 +540,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, return 0; } -static struct nfp_fl_payload * +struct nfp_fl_payload * nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer) { struct nfp_fl_payload *flow_pay; @@ -1005,9 +1002,7 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app, struct nfp_fl_payload *sub_flow1, struct nfp_fl_payload *sub_flow2) { - struct flow_cls_offload merge_tc_off; struct nfp_flower_priv *priv = app->priv; - struct netlink_ext_ack *extack = NULL; struct nfp_fl_payload *merge_flow; struct nfp_fl_key_ls merge_key_ls; struct nfp_merge_info *merge_info; @@ -1016,7 +1011,6 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app, ASSERT_RTNL(); - extack = merge_tc_off.common.extack; if (sub_flow1 == sub_flow2 || nfp_flower_is_merge_flow(sub_flow1) || nfp_flower_is_merge_flow(sub_flow2)) @@ -1061,9 +1055,8 @@ int nfp_flower_merge_offloaded_flows(struct nfp_app *app, if (err) goto err_unlink_sub_flow1; - merge_tc_off.cookie = merge_flow->tc_flower_cookie; - err = nfp_compile_flow_metadata(app, &merge_tc_off, merge_flow, - merge_flow->ingress_dev, extack); + err = nfp_compile_flow_metadata(app, merge_flow->tc_flower_cookie, merge_flow, + merge_flow->ingress_dev, NULL); if (err) goto err_unlink_sub_flow2; @@ -1305,6 +1298,7 @@ static int nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, struct flow_cls_offload *flow) { + struct flow_rule *rule = flow_cls_offload_flow_rule(flow); enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE; struct nfp_flower_priv *priv = app->priv; struct netlink_ext_ack *extack = NULL; @@ -1330,7 +1324,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, if (!key_layer) return -ENOMEM; - err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow, + err = nfp_flower_calculate_key_layers(app, netdev, key_layer, rule, &tun_type, extack); if (err) goto err_free_key_ls; @@ -1341,12 +1335,12 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, goto err_free_key_ls; } - err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev, + err = nfp_flower_compile_flow_match(app, rule, key_layer, netdev, flow_pay, tun_type, extack); if (err) goto err_destroy_flow; - err = nfp_flower_compile_action(app, flow, netdev, flow_pay, extack); + err = nfp_flower_compile_action(app, rule, netdev, flow_pay, extack); if (err) goto err_destroy_flow; @@ -1356,7 +1350,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, goto err_destroy_flow; } - err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev, extack); + err = nfp_compile_flow_metadata(app, flow->cookie, flow_pay, netdev, extack); if (err) goto err_destroy_flow; @@ -1476,7 +1470,7 @@ err_free_links: kfree_rcu(merge_flow, rcu); } -static void +void nfp_flower_del_linked_merge_flows(struct nfp_app *app, struct nfp_fl_payload *sub_flow) { @@ -1601,7 +1595,7 @@ __nfp_flower_update_merge_stats(struct nfp_app *app, } } -static void +void nfp_flower_update_merge_stats(struct nfp_app *app, struct nfp_fl_payload *sub_flow) { @@ -1628,10 +1622,17 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev, struct flow_cls_offload *flow) { struct nfp_flower_priv *priv = app->priv; + struct nfp_fl_ct_map_entry *ct_map_ent; struct netlink_ext_ack *extack = NULL; struct nfp_fl_payload *nfp_flow; u32 ctx_id; + /* Check ct_map table first */ + ct_map_ent = rhashtable_lookup_fast(&priv->ct_map_table, &flow->cookie, + nfp_ct_map_params); + if (ct_map_ent) + return nfp_fl_ct_stats(flow, ct_map_ent); + extack = flow->common.extack; nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev); if (!nfp_flow) { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index 742a420152b3..bb3b8a7f6c5d 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -692,7 +692,7 @@ static int nfp_pci_probe(struct pci_dev *pdev, goto err_pci_disable; } - devlink = devlink_alloc(&nfp_devlink_ops, sizeof(*pf)); + devlink = devlink_alloc(&nfp_devlink_ops, sizeof(*pf), &pdev->dev); if (!devlink) { err = -ENOMEM; goto err_rel_regions; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index df5b748be068..df203738511b 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -17,6 +17,7 @@ #include <linux/list.h> #include <linux/netdevice.h> #include <linux/pci.h> +#include <linux/dim.h> #include <linux/io-64-nonatomic-hi-lo.h> #include <linux/semaphore.h> #include <linux/workqueue.h> @@ -360,6 +361,9 @@ struct nfp_net_rx_ring { * @rx_ring: Pointer to RX ring * @xdp_ring: Pointer to an extra TX ring for XDP * @irq_entry: MSI-X table entry (use for talking to the device) + * @event_ctr: Number of interrupt + * @rx_dim: Dynamic interrupt moderation structure for RX + * @tx_dim: Dynamic interrupt moderation structure for TX * @rx_sync: Seqlock for atomic updates of RX stats * @rx_pkts: Number of received packets * @rx_bytes: Number of received bytes @@ -410,6 +414,10 @@ struct nfp_net_r_vector { u16 irq_entry; + u16 event_ctr; + struct dim rx_dim; + struct dim tx_dim; + struct u64_stats_sync rx_sync; u64 rx_pkts; u64 rx_bytes; @@ -571,6 +579,8 @@ struct nfp_net_dp { * mailbox area, crypto TLV * @link_up: Is the link up? * @link_status_lock: Protects @link_* and ensures atomicity with BAR reading + * @rx_coalesce_adapt_on: Is RX interrupt moderation adaptive? + * @tx_coalesce_adapt_on: Is TX interrupt moderation adaptive? * @rx_coalesce_usecs: RX interrupt moderation usecs delay parameter * @rx_coalesce_max_frames: RX interrupt moderation frame count parameter * @tx_coalesce_usecs: TX interrupt moderation usecs delay parameter @@ -654,6 +664,8 @@ struct nfp_net { struct semaphore bar_lock; + bool rx_coalesce_adapt_on; + bool tx_coalesce_adapt_on; u32 rx_coalesce_usecs; u32 rx_coalesce_max_frames; u32 tx_coalesce_usecs; @@ -919,6 +931,14 @@ static inline bool nfp_netdev_is_nfp_net(struct net_device *netdev) return netdev->netdev_ops == &nfp_net_netdev_ops; } +static inline int nfp_net_coalesce_para_check(u32 usecs, u32 pkts) +{ + if ((usecs >= ((1 << 16) - 1)) || (pkts >= ((1 << 16) - 1))) + return -EINVAL; + + return 0; +} + /* Prototypes */ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver, void __iomem *ctrl_bar); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 5dfa4799c34f..5bfa22accf2c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -474,6 +474,12 @@ static irqreturn_t nfp_net_irq_rxtx(int irq, void *data) { struct nfp_net_r_vector *r_vec = data; + /* Currently we cannot tell if it's a rx or tx interrupt, + * since dim does not need accurate event_ctr to calculate, + * we just use this counter for both rx and tx dim. + */ + r_vec->event_ctr++; + napi_schedule_irqoff(&r_vec->napi); /* The FW auto-masks any interrupt, either via the MASK bit in @@ -1697,7 +1703,7 @@ nfp_net_parse_meta(struct net_device *netdev, struct nfp_meta_parsed *meta, case NFP_NET_META_RESYNC_INFO: if (nfp_net_tls_rx_resync_req(netdev, data, pkt, pkt_len)) - return NULL; + return false; data += sizeof(struct nfp_net_tls_resync_req); break; default: @@ -2061,6 +2067,36 @@ static int nfp_net_poll(struct napi_struct *napi, int budget) if (napi_complete_done(napi, pkts_polled)) nfp_net_irq_unmask(r_vec->nfp_net, r_vec->irq_entry); + if (r_vec->nfp_net->rx_coalesce_adapt_on) { + struct dim_sample dim_sample = {}; + unsigned int start; + u64 pkts, bytes; + + do { + start = u64_stats_fetch_begin(&r_vec->rx_sync); + pkts = r_vec->rx_pkts; + bytes = r_vec->rx_bytes; + } while (u64_stats_fetch_retry(&r_vec->rx_sync, start)); + + dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample); + net_dim(&r_vec->rx_dim, dim_sample); + } + + if (r_vec->nfp_net->tx_coalesce_adapt_on) { + struct dim_sample dim_sample = {}; + unsigned int start; + u64 pkts, bytes; + + do { + start = u64_stats_fetch_begin(&r_vec->tx_sync); + pkts = r_vec->tx_pkts; + bytes = r_vec->tx_bytes; + } while (u64_stats_fetch_retry(&r_vec->tx_sync, start)); + + dim_update_sample(r_vec->event_ctr, pkts, bytes, &dim_sample); + net_dim(&r_vec->tx_dim, dim_sample); + } + return pkts_polled; } @@ -2873,6 +2909,7 @@ static int nfp_net_set_config_and_enable(struct nfp_net *nn) */ static void nfp_net_close_stack(struct nfp_net *nn) { + struct nfp_net_r_vector *r_vec; unsigned int r; disable_irq(nn->irq_entries[NFP_NET_IRQ_LSC_IDX].vector); @@ -2880,8 +2917,16 @@ static void nfp_net_close_stack(struct nfp_net *nn) nn->link_up = false; for (r = 0; r < nn->dp.num_r_vecs; r++) { - disable_irq(nn->r_vecs[r].irq_vector); - napi_disable(&nn->r_vecs[r].napi); + r_vec = &nn->r_vecs[r]; + + disable_irq(r_vec->irq_vector); + napi_disable(&r_vec->napi); + + if (r_vec->rx_ring) + cancel_work_sync(&r_vec->rx_dim.work); + + if (r_vec->tx_ring) + cancel_work_sync(&r_vec->tx_dim.work); } netif_tx_disable(nn->dp.netdev); @@ -2948,17 +2993,92 @@ void nfp_ctrl_close(struct nfp_net *nn) rtnl_unlock(); } +static void nfp_net_rx_dim_work(struct work_struct *work) +{ + struct nfp_net_r_vector *r_vec; + unsigned int factor, value; + struct dim_cq_moder moder; + struct nfp_net *nn; + struct dim *dim; + + dim = container_of(work, struct dim, work); + moder = net_dim_get_rx_moderation(dim->mode, dim->profile_ix); + r_vec = container_of(dim, struct nfp_net_r_vector, rx_dim); + nn = r_vec->nfp_net; + + /* Compute factor used to convert coalesce '_usecs' parameters to + * ME timestamp ticks. There are 16 ME clock cycles for each timestamp + * count. + */ + factor = nn->tlv_caps.me_freq_mhz / 16; + if (nfp_net_coalesce_para_check(factor * moder.usec, moder.pkts)) + return; + + /* copy RX interrupt coalesce parameters */ + value = (moder.pkts << 16) | (factor * moder.usec); + rtnl_lock(); + nn_writel(nn, NFP_NET_CFG_RXR_IRQ_MOD(r_vec->rx_ring->idx), value); + (void)nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD); + rtnl_unlock(); + + dim->state = DIM_START_MEASURE; +} + +static void nfp_net_tx_dim_work(struct work_struct *work) +{ + struct nfp_net_r_vector *r_vec; + unsigned int factor, value; + struct dim_cq_moder moder; + struct nfp_net *nn; + struct dim *dim; + + dim = container_of(work, struct dim, work); + moder = net_dim_get_tx_moderation(dim->mode, dim->profile_ix); + r_vec = container_of(dim, struct nfp_net_r_vector, tx_dim); + nn = r_vec->nfp_net; + + /* Compute factor used to convert coalesce '_usecs' parameters to + * ME timestamp ticks. There are 16 ME clock cycles for each timestamp + * count. + */ + factor = nn->tlv_caps.me_freq_mhz / 16; + if (nfp_net_coalesce_para_check(factor * moder.usec, moder.pkts)) + return; + + /* copy TX interrupt coalesce parameters */ + value = (moder.pkts << 16) | (factor * moder.usec); + rtnl_lock(); + nn_writel(nn, NFP_NET_CFG_TXR_IRQ_MOD(r_vec->tx_ring->idx), value); + (void)nfp_net_reconfig(nn, NFP_NET_CFG_UPDATE_IRQMOD); + rtnl_unlock(); + + dim->state = DIM_START_MEASURE; +} + /** * nfp_net_open_stack() - Start the device from stack's perspective * @nn: NFP Net device to reconfigure */ static void nfp_net_open_stack(struct nfp_net *nn) { + struct nfp_net_r_vector *r_vec; unsigned int r; for (r = 0; r < nn->dp.num_r_vecs; r++) { - napi_enable(&nn->r_vecs[r].napi); - enable_irq(nn->r_vecs[r].irq_vector); + r_vec = &nn->r_vecs[r]; + + if (r_vec->rx_ring) { + INIT_WORK(&r_vec->rx_dim.work, nfp_net_rx_dim_work); + r_vec->rx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; + } + + if (r_vec->tx_ring) { + INIT_WORK(&r_vec->tx_dim.work, nfp_net_tx_dim_work); + r_vec->tx_dim.mode = DIM_CQ_PERIOD_MODE_START_FROM_EQE; + } + + napi_enable(&r_vec->napi); + enable_irq(r_vec->irq_vector); } netif_tx_wake_all_queues(nn->dp.netdev); @@ -3161,17 +3281,12 @@ static int nfp_net_dp_swap_enable(struct nfp_net *nn, struct nfp_net_dp *dp) for (r = 0; r < nn->max_r_vecs; r++) nfp_net_vector_assign_rings(&nn->dp, &nn->r_vecs[r], r); - err = netif_set_real_num_rx_queues(nn->dp.netdev, nn->dp.num_rx_rings); + err = netif_set_real_num_queues(nn->dp.netdev, + nn->dp.num_stack_tx_rings, + nn->dp.num_rx_rings); if (err) return err; - if (nn->dp.netdev->real_num_tx_queues != nn->dp.num_stack_tx_rings) { - err = netif_set_real_num_tx_queues(nn->dp.netdev, - nn->dp.num_stack_tx_rings); - if (err) - return err; - } - return nfp_net_set_config_and_enable(nn); } @@ -3893,6 +4008,9 @@ static void nfp_net_irqmod_init(struct nfp_net *nn) nn->rx_coalesce_max_frames = 64; nn->tx_coalesce_usecs = 50; nn->tx_coalesce_max_frames = 64; + + nn->rx_coalesce_adapt_on = true; + nn->tx_coalesce_adapt_on = true; } static void nfp_net_netdev_init(struct nfp_net *nn) diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 8803faadd302..0685ece1f155 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -1078,13 +1078,18 @@ static void nfp_net_get_regs(struct net_device *netdev, } static int nfp_net_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct nfp_net *nn = netdev_priv(netdev); if (!(nn->cap & NFP_NET_CFG_CTRL_IRQMOD)) return -EINVAL; + ec->use_adaptive_rx_coalesce = nn->rx_coalesce_adapt_on; + ec->use_adaptive_tx_coalesce = nn->tx_coalesce_adapt_on; + ec->rx_coalesce_usecs = nn->rx_coalesce_usecs; ec->rx_max_coalesced_frames = nn->rx_coalesce_max_frames; ec->tx_coalesce_usecs = nn->tx_coalesce_usecs; @@ -1327,7 +1332,9 @@ exit_close_nsp: } static int nfp_net_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct nfp_net *nn = netdev_priv(netdev); unsigned int factor; @@ -1361,19 +1368,18 @@ static int nfp_net_set_coalesce(struct net_device *netdev, if (!ec->tx_coalesce_usecs && !ec->tx_max_coalesced_frames) return -EINVAL; - if (ec->rx_coalesce_usecs * factor >= ((1 << 16) - 1)) - return -EINVAL; - - if (ec->tx_coalesce_usecs * factor >= ((1 << 16) - 1)) + if (nfp_net_coalesce_para_check(ec->rx_coalesce_usecs * factor, + ec->rx_max_coalesced_frames)) return -EINVAL; - if (ec->rx_max_coalesced_frames >= ((1 << 16) - 1)) - return -EINVAL; - - if (ec->tx_max_coalesced_frames >= ((1 << 16) - 1)) + if (nfp_net_coalesce_para_check(ec->tx_coalesce_usecs * factor, + ec->tx_max_coalesced_frames)) return -EINVAL; /* configuration is valid */ + nn->rx_coalesce_adapt_on = !!ec->use_adaptive_rx_coalesce; + nn->tx_coalesce_adapt_on = !!ec->use_adaptive_tx_coalesce; + nn->rx_coalesce_usecs = ec->rx_coalesce_usecs; nn->rx_coalesce_max_frames = ec->rx_max_coalesced_frames; nn->tx_coalesce_usecs = ec->tx_coalesce_usecs; @@ -1445,7 +1451,8 @@ static int nfp_net_set_channels(struct net_device *netdev, static const struct ethtool_ops nfp_net_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_USECS | - ETHTOOL_COALESCE_MAX_FRAMES, + ETHTOOL_COALESCE_MAX_FRAMES | + ETHTOOL_COALESCE_USE_ADAPTIVE, .get_drvinfo = nfp_net_get_drvinfo, .get_link = ethtool_op_get_link, .get_ringparam = nfp_net_get_ringparam, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index 921db40047d7..d10a93801344 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -701,7 +701,7 @@ int nfp_net_pci_probe(struct nfp_pf *pf) if (err) goto err_unmap; - err = devlink_register(devlink, &pf->pdev->dev); + err = devlink_register(devlink); if (err) goto err_app_clean; diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c index 2d097dcb7bda..346145d3180e 100644 --- a/drivers/net/ethernet/ni/nixge.c +++ b/drivers/net/ethernet/ni/nixge.c @@ -993,8 +993,11 @@ static void nixge_ethtools_get_drvinfo(struct net_device *ndev, strlcpy(ed->bus_info, "platform", sizeof(ed->bus_info)); } -static int nixge_ethtools_get_coalesce(struct net_device *ndev, - struct ethtool_coalesce *ecoalesce) +static int +nixge_ethtools_get_coalesce(struct net_device *ndev, + struct ethtool_coalesce *ecoalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct nixge_priv *priv = netdev_priv(ndev); u32 regval = 0; @@ -1008,8 +1011,11 @@ static int nixge_ethtools_get_coalesce(struct net_device *ndev, return 0; } -static int nixge_ethtools_set_coalesce(struct net_device *ndev, - struct ethtool_coalesce *ecoalesce) +static int +nixge_ethtools_set_coalesce(struct net_device *ndev, + struct ethtool_coalesce *ecoalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct nixge_priv *priv = netdev_priv(ndev); @@ -1223,7 +1229,6 @@ static int nixge_of_get_resources(struct platform_device *pdev) { const struct of_device_id *of_id; enum nixge_version version; - struct resource *ctrlres; struct net_device *ndev; struct nixge_priv *priv; @@ -1242,13 +1247,10 @@ static int nixge_of_get_resources(struct platform_device *pdev) netdev_err(ndev, "failed to map dma regs\n"); return PTR_ERR(priv->dma_regs); } - if (version <= NIXGE_V2) { + if (version <= NIXGE_V2) priv->ctrl_regs = priv->dma_regs + NIXGE_REG_CTRL_OFFSET; - } else { - ctrlres = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "ctrl"); - priv->ctrl_regs = devm_ioremap_resource(&pdev->dev, ctrlres); - } + else + priv->ctrl_regs = devm_platform_ioremap_resource_byname(pdev, "ctrl"); if (IS_ERR(priv->ctrl_regs)) { netdev_err(ndev, "failed to map ctrl regs\n"); return PTR_ERR(priv->ctrl_regs); diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c index 8724d6a9ed02..ef3fb4cc90af 100644 --- a/drivers/net/ethernet/nvidia/forcedeth.c +++ b/drivers/net/ethernet/nvidia/forcedeth.c @@ -5782,15 +5782,11 @@ static int nv_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) np->desc_ver = DESC_VER_3; np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; if (dma_64bit) { - if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(39))) + if (dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(39))) dev_info(&pci_dev->dev, "64-bit DMA failed, using 32-bit addressing\n"); else dev->features |= NETIF_F_HIGHDMA; - if (pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(39))) { - dev_info(&pci_dev->dev, - "64-bit DMA (consistent) failed, using 32-bit ring buffers\n"); - } } } else if (id->driver_data & DEV_HAS_LARGEDESC) { /* packet format 2: supports jumbo frames */ diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 64c6842bd452..d29fe562b3de 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -1219,7 +1219,7 @@ static const struct net_device_ops lpc_netdev_ops = { .ndo_stop = lpc_eth_close, .ndo_start_xmit = lpc_eth_hard_start_xmit, .ndo_set_rx_mode = lpc_eth_set_multicast_list, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_set_mac_address = lpc_set_mac_address, .ndo_validate_addr = eth_validate_addr, }; diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig index af84f72bf08e..4e18b64dceb9 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig +++ b/drivers/net/ethernet/oki-semi/pch_gbe/Kconfig @@ -6,6 +6,7 @@ config PCH_GBE tristate "OKI SEMICONDUCTOR IOH(ML7223/ML7831) GbE" depends on PCI && (X86_32 || COMPILE_TEST) + depends on PTP_1588_CLOCK select MII select PTP_1588_CLOCK_PCH select NET_PTP_CLASSIFY diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c index e351f3d1608f..ec3e558f890e 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c @@ -1031,13 +1031,7 @@ static void pch_gbe_watchdog(struct timer_list *t) struct ethtool_cmd cmd = { .cmd = ETHTOOL_GSET }; netdev->tx_queue_len = adapter->tx_queue_len; /* mii library handles link maintenance tasks */ - if (mii_ethtool_gset(&adapter->mii, &cmd)) { - netdev_err(netdev, "ethtool get setting Error\n"); - mod_timer(&adapter->watchdog_timer, - round_jiffies(jiffies + - PCH_GBE_WATCHDOG_PERIOD)); - return; - } + mii_ethtool_gset(&adapter->mii, &cmd); hw->mac.link_speed = ethtool_cmd_speed(&cmd); hw->mac.link_duplex = cmd.duplex; /* Set the RGMII control. */ @@ -2333,7 +2327,7 @@ static const struct net_device_ops pch_gbe_netdev_ops = { .ndo_tx_timeout = pch_gbe_tx_timeout, .ndo_change_mtu = pch_gbe_change_mtu, .ndo_set_features = pch_gbe_set_features, - .ndo_do_ioctl = pch_gbe_ioctl, + .ndo_eth_ioctl = pch_gbe_ioctl, .ndo_set_rx_mode = pch_gbe_set_multi, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = pch_gbe_netpoll, diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c index ed832046216a..3426f6fa2b57 100644 --- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c +++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_phy.c @@ -301,9 +301,7 @@ void pch_gbe_phy_init_setting(struct pch_gbe_hw *hw) int ret; u16 mii_reg; - ret = mii_ethtool_gset(&adapter->mii, &cmd); - if (ret) - netdev_err(adapter->netdev, "Error: mii_ethtool_gset\n"); + mii_ethtool_gset(&adapter->mii, &cmd); ethtool_cmd_speed_set(&cmd, hw->mac.link_speed); cmd.duplex = hw->mac.link_duplex; diff --git a/drivers/net/ethernet/packetengines/hamachi.c b/drivers/net/ethernet/packetengines/hamachi.c index d058a63602a9..1a6336a56d3d 100644 --- a/drivers/net/ethernet/packetengines/hamachi.c +++ b/drivers/net/ethernet/packetengines/hamachi.c @@ -546,7 +546,9 @@ static int read_eeprom(void __iomem *ioaddr, int location); static int mdio_read(struct net_device *dev, int phy_id, int location); static void mdio_write(struct net_device *dev, int phy_id, int location, int value); static int hamachi_open(struct net_device *dev); -static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static int hamachi_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); +static int hamachi_siocdevprivate(struct net_device *dev, struct ifreq *rq, + void __user *data, int cmd); static void hamachi_timer(struct timer_list *t); static void hamachi_tx_timeout(struct net_device *dev, unsigned int txqueue); static void hamachi_init_ring(struct net_device *dev); @@ -571,7 +573,8 @@ static const struct net_device_ops hamachi_netdev_ops = { .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, .ndo_tx_timeout = hamachi_tx_timeout, - .ndo_do_ioctl = netdev_ioctl, + .ndo_eth_ioctl = hamachi_ioctl, + .ndo_siocdevprivate = hamachi_siocdevprivate, }; @@ -1867,7 +1870,36 @@ static const struct ethtool_ops ethtool_ops_no_mii = { .get_drvinfo = hamachi_get_drvinfo, }; -static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) +/* private ioctl: set rx,tx intr params */ +static int hamachi_siocdevprivate(struct net_device *dev, struct ifreq *rq, + void __user *data, int cmd) +{ + struct hamachi_private *np = netdev_priv(dev); + u32 *d = (u32 *)&rq->ifr_ifru; + + if (!netif_running(dev)) + return -EINVAL; + + if (cmd != SIOCDEVPRIVATE + 3) + return -EOPNOTSUPP; + + /* Should add this check here or an ordinary user can do nasty + * things. -KDU + * + * TODO: Shut down the Rx and Tx engines while doing this. + */ + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + writel(d[0], np->base + TxIntrCtrl); + writel(d[1], np->base + RxIntrCtrl); + printk(KERN_NOTICE "%s: tx %08x, rx %08x intr\n", dev->name, + (u32)readl(np->base + TxIntrCtrl), + (u32)readl(np->base + RxIntrCtrl)); + + return 0; +} + +static int hamachi_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct hamachi_private *np = netdev_priv(dev); struct mii_ioctl_data *data = if_mii(rq); @@ -1876,28 +1908,9 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) if (!netif_running(dev)) return -EINVAL; - if (cmd == (SIOCDEVPRIVATE+3)) { /* set rx,tx intr params */ - u32 *d = (u32 *)&rq->ifr_ifru; - /* Should add this check here or an ordinary user can do nasty - * things. -KDU - * - * TODO: Shut down the Rx and Tx engines while doing this. - */ - if (!capable(CAP_NET_ADMIN)) - return -EPERM; - writel(d[0], np->base + TxIntrCtrl); - writel(d[1], np->base + RxIntrCtrl); - printk(KERN_NOTICE "%s: tx %08x, rx %08x intr\n", dev->name, - (u32) readl(np->base + TxIntrCtrl), - (u32) readl(np->base + RxIntrCtrl)); - rc = 0; - } - - else { - spin_lock_irq(&np->lock); - rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL); - spin_unlock_irq(&np->lock); - } + spin_lock_irq(&np->lock); + rc = generic_mii_ioctl(&np->mii_if, data, cmd, NULL); + spin_unlock_irq(&np->lock); return rc; } diff --git a/drivers/net/ethernet/packetengines/yellowfin.c b/drivers/net/ethernet/packetengines/yellowfin.c index d1dd9bc1bc7f..f5cd8f51be7c 100644 --- a/drivers/net/ethernet/packetengines/yellowfin.c +++ b/drivers/net/ethernet/packetengines/yellowfin.c @@ -362,7 +362,7 @@ static const struct net_device_ops netdev_ops = { .ndo_set_rx_mode = set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, - .ndo_do_ioctl = netdev_ioctl, + .ndo_eth_ioctl = netdev_ioctl, .ndo_tx_timeout = yellowfin_tx_timeout, }; diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c index 040a15a828b4..7e096b2888b9 100644 --- a/drivers/net/ethernet/pasemi/pasemi_mac.c +++ b/drivers/net/ethernet/pasemi/pasemi_mac.c @@ -247,12 +247,13 @@ static int pasemi_mac_unmap_tx_skb(struct pasemi_mac *mac, int f; struct pci_dev *pdev = mac->dma_pdev; - pci_unmap_single(pdev, dmas[0], skb_headlen(skb), PCI_DMA_TODEVICE); + dma_unmap_single(&pdev->dev, dmas[0], skb_headlen(skb), DMA_TO_DEVICE); for (f = 0; f < nfrags; f++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[f]; - pci_unmap_page(pdev, dmas[f+1], skb_frag_size(frag), PCI_DMA_TODEVICE); + dma_unmap_page(&pdev->dev, dmas[f + 1], skb_frag_size(frag), + DMA_TO_DEVICE); } dev_kfree_skb_irq(skb); @@ -548,10 +549,8 @@ static void pasemi_mac_free_rx_buffers(struct pasemi_mac *mac) for (i = 0; i < RX_RING_SIZE; i++) { info = &RX_DESC_INFO(rx, i); if (info->skb && info->dma) { - pci_unmap_single(mac->dma_pdev, - info->dma, - info->skb->len, - PCI_DMA_FROMDEVICE); + dma_unmap_single(&mac->dma_pdev->dev, info->dma, + info->skb->len, DMA_FROM_DEVICE); dev_kfree_skb_any(info->skb); } info->dma = 0; @@ -600,11 +599,11 @@ static void pasemi_mac_replenish_rx_ring(struct net_device *dev, if (unlikely(!skb)) break; - dma = pci_map_single(mac->dma_pdev, skb->data, + dma = dma_map_single(&mac->dma_pdev->dev, skb->data, mac->bufsz - LOCAL_SKB_ALIGN, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); - if (unlikely(pci_dma_mapping_error(mac->dma_pdev, dma))) { + if (dma_mapping_error(&mac->dma_pdev->dev, dma)) { dev_kfree_skb_irq(info->skb); break; } @@ -741,8 +740,9 @@ static int pasemi_mac_clean_rx(struct pasemi_mac_rxring *rx, len = (macrx & XCT_MACRX_LLEN_M) >> XCT_MACRX_LLEN_S; - pci_unmap_single(pdev, dma, mac->bufsz - LOCAL_SKB_ALIGN, - PCI_DMA_FROMDEVICE); + dma_unmap_single(&pdev->dev, dma, + mac->bufsz - LOCAL_SKB_ALIGN, + DMA_FROM_DEVICE); if (macrx & XCT_MACRX_CRC) { /* CRC error flagged */ @@ -1444,10 +1444,10 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) nfrags = skb_shinfo(skb)->nr_frags; - map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb), - PCI_DMA_TODEVICE); + map[0] = dma_map_single(&mac->dma_pdev->dev, skb->data, + skb_headlen(skb), DMA_TO_DEVICE); map_size[0] = skb_headlen(skb); - if (pci_dma_mapping_error(mac->dma_pdev, map[0])) + if (dma_mapping_error(&mac->dma_pdev->dev, map[0])) goto out_err_nolock; for (i = 0; i < nfrags; i++) { @@ -1534,8 +1534,8 @@ out_err: spin_unlock_irqrestore(&txring->lock, flags); out_err_nolock: while (nfrags--) - pci_unmap_single(mac->dma_pdev, map[nfrags], map_size[nfrags], - PCI_DMA_TODEVICE); + dma_unmap_single(&mac->dma_pdev->dev, map[nfrags], + map_size[nfrags], DMA_TO_DEVICE); return NETDEV_TX_BUSY; } diff --git a/drivers/net/ethernet/pensando/Kconfig b/drivers/net/ethernet/pensando/Kconfig index 202973a82712..3f7519e435b8 100644 --- a/drivers/net/ethernet/pensando/Kconfig +++ b/drivers/net/ethernet/pensando/Kconfig @@ -20,7 +20,7 @@ if NET_VENDOR_PENSANDO config IONIC tristate "Pensando Ethernet IONIC Support" depends on 64BIT && PCI - depends on PTP_1588_CLOCK || !PTP_1588_CLOCK + depends on PTP_1588_CLOCK_OPTIONAL select NET_DEVLINK select DIMLIB help diff --git a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c index e4a5416adc80..7e296fa71b36 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_bus_pci.c @@ -165,10 +165,10 @@ static int ionic_vf_alloc(struct ionic *ionic, int num_vfs) goto out; } + ionic->num_vfs++; /* ignore failures from older FW, we just won't get stats */ (void)ionic_set_vf_config(ionic, i, IONIC_VF_ATTR_STATSADDR, (u8 *)&v->stats_pa); - ionic->num_vfs++; } out: @@ -373,9 +373,6 @@ static void ionic_remove(struct pci_dev *pdev) { struct ionic *ionic = pci_get_drvdata(pdev); - if (!ionic) - return; - del_timer_sync(&ionic->watchdog_timer); if (ionic->lif) { diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c index 1dfe962e22e0..0d6858ab511c 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c @@ -15,6 +15,7 @@ static void ionic_watchdog_cb(struct timer_list *t) { struct ionic *ionic = from_timer(ionic, t, watchdog_timer); struct ionic_lif *lif = ionic->lif; + struct ionic_deferred_work *work; int hb; mod_timer(&ionic->watchdog_timer, @@ -31,6 +32,18 @@ static void ionic_watchdog_cb(struct timer_list *t) if (hb >= 0 && !test_bit(IONIC_LIF_F_FW_RESET, lif->state)) ionic_link_status_check_request(lif, CAN_NOT_SLEEP); + + if (test_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state)) { + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) { + netdev_err(lif->netdev, "rxmode change dropped\n"); + return; + } + + work->type = IONIC_DW_TYPE_RX_MODE; + netdev_dbg(lif->netdev, "deferred: rx_mode\n"); + ionic_lif_deferred_enqueue(&lif->deferred, work); + } } void ionic_init_devinfo(struct ionic *ionic) @@ -106,6 +119,8 @@ int ionic_dev_setup(struct ionic *ionic) idev->last_fw_hb = 0; idev->fw_hb_ready = true; idev->fw_status_ready = true; + idev->fw_generation = IONIC_FW_STS_F_GENERATION & + ioread8(&idev->dev_info_regs->fw_status); mod_timer(&ionic->watchdog_timer, round_jiffies(jiffies + ionic->watchdog_period)); @@ -121,7 +136,9 @@ int ionic_heartbeat_check(struct ionic *ionic) { struct ionic_dev *idev = &ionic->idev; unsigned long check_time, last_check_time; - bool fw_status_ready, fw_hb_ready; + bool fw_status_ready = true; + bool fw_hb_ready; + u8 fw_generation; u8 fw_status; u32 fw_hb; @@ -140,9 +157,29 @@ do_check_time: /* firmware is useful only if the running bit is set and * fw_status != 0xff (bad PCI read) + * If fw_status is not ready don't bother with the generation. */ fw_status = ioread8(&idev->dev_info_regs->fw_status); - fw_status_ready = (fw_status != 0xff) && (fw_status & IONIC_FW_STS_F_RUNNING); + + if (fw_status == 0xff || !(fw_status & IONIC_FW_STS_F_RUNNING)) { + fw_status_ready = false; + } else { + fw_generation = fw_status & IONIC_FW_STS_F_GENERATION; + if (idev->fw_generation != fw_generation) { + dev_info(ionic->dev, "FW generation 0x%02x -> 0x%02x\n", + idev->fw_generation, fw_generation); + + idev->fw_generation = fw_generation; + + /* If the generation changed, the fw status is not + * ready so we need to trigger a fw-down cycle. After + * the down, the next watchdog will see the fw is up + * and the generation value stable, so will trigger + * the fw-up activity. + */ + fw_status_ready = false; + } + } /* is this a transition? */ if (fw_status_ready != idev->fw_status_ready) { diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.h b/drivers/net/ethernet/pensando/ionic/ionic_dev.h index c25cf9b744c5..8311086fb1f4 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_dev.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.h @@ -143,6 +143,7 @@ struct ionic_dev { u32 last_fw_hb; bool fw_hb_ready; bool fw_status_ready; + u8 fw_generation; u64 __iomem *db_pages; dma_addr_t phy_db_pages; @@ -160,8 +161,6 @@ struct ionic_dev { struct ionic_cq_info { union { void *cq_desc; - struct ionic_txq_comp *txcq; - struct ionic_rxq_comp *rxcq; struct ionic_admin_comp *admincq; struct ionic_notifyq_event *notifyq; }; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c index b41301a5b0df..c7d0e195d176 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_devlink.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_devlink.c @@ -64,7 +64,7 @@ struct ionic *ionic_devlink_alloc(struct device *dev) { struct devlink *dl; - dl = devlink_alloc(&ionic_dl_ops, sizeof(struct ionic)); + dl = devlink_alloc(&ionic_dl_ops, sizeof(struct ionic), dev); return devlink_priv(dl); } @@ -82,7 +82,7 @@ int ionic_devlink_register(struct ionic *ionic) struct devlink_port_attrs attrs = {}; int err; - err = devlink_register(dl, ionic->dev); + err = devlink_register(dl); if (err) { dev_warn(ionic->dev, "devlink_register failed: %d\n", err); return err; @@ -91,20 +91,20 @@ int ionic_devlink_register(struct ionic *ionic) attrs.flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL; devlink_port_attrs_set(&ionic->dl_port, &attrs); err = devlink_port_register(dl, &ionic->dl_port, 0); - if (err) + if (err) { dev_err(ionic->dev, "devlink_port_register failed: %d\n", err); - else - devlink_port_type_eth_set(&ionic->dl_port, - ionic->lif->netdev); + devlink_unregister(dl); + return err; + } - return err; + devlink_port_type_eth_set(&ionic->dl_port, ionic->lif->netdev); + return 0; } void ionic_devlink_unregister(struct ionic *ionic) { struct devlink *dl = priv_to_devlink(ionic); - if (ionic->dl_port.registered) - devlink_port_unregister(&ionic->dl_port); + devlink_port_unregister(&ionic->dl_port); devlink_unregister(dl); } diff --git a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c index 6583be570e45..e91b4874a57f 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_ethtool.c @@ -32,6 +32,9 @@ static void ionic_get_stats(struct net_device *netdev, struct ionic_lif *lif = netdev_priv(netdev); u32 i; + if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) + return; + memset(buf, 0, stats->n_stats * sizeof(*buf)); for (i = 0; i < ionic_num_stats_grps; i++) ionic_stats_groups[i].get_values(lif, &buf); @@ -274,6 +277,9 @@ static int ionic_set_link_ksettings(struct net_device *netdev, struct ionic *ionic = lif->ionic; int err = 0; + if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) + return -EBUSY; + /* set autoneg */ if (ks->base.autoneg != idev->port_info->config.an_enable) { mutex_lock(&ionic->dev_cmd_lock); @@ -320,6 +326,9 @@ static int ionic_set_pauseparam(struct net_device *netdev, u32 requested_pause; int err; + if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) + return -EBUSY; + if (pause->autoneg) return -EOPNOTSUPP; @@ -372,6 +381,9 @@ static int ionic_set_fecparam(struct net_device *netdev, u8 fec_type; int ret = 0; + if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) + return -EBUSY; + if (lif->ionic->idev.port_info->config.an_enable) { netdev_err(netdev, "FEC request not allowed while autoneg is enabled\n"); return -EINVAL; @@ -408,7 +420,9 @@ static int ionic_set_fecparam(struct net_device *netdev, } static int ionic_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *coalesce) + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct ionic_lif *lif = netdev_priv(netdev); @@ -426,7 +440,9 @@ static int ionic_get_coalesce(struct net_device *netdev, } static int ionic_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *coalesce) + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct ionic_lif *lif = netdev_priv(netdev); struct ionic_identity *ident; @@ -528,6 +544,9 @@ static int ionic_set_ringparam(struct net_device *netdev, struct ionic_queue_params qparam; int err; + if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) + return -EBUSY; + ionic_init_queue_params(lif, &qparam); if (ring->rx_mini_pending || ring->rx_jumbo_pending) { @@ -597,6 +616,9 @@ static int ionic_set_channels(struct net_device *netdev, int max_cnt; int err; + if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) + return -EBUSY; + ionic_init_queue_params(lif, &qparam); if (ch->rx_count != ch->tx_count) { @@ -947,6 +969,9 @@ static int ionic_nway_reset(struct net_device *netdev) struct ionic *ionic = lif->ionic; int err = 0; + if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) + return -EBUSY; + /* flap the link to force auto-negotiation */ mutex_lock(&ionic->dev_cmd_lock); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h index 0478b48d9895..278610ed7227 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_if.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h @@ -2936,6 +2936,8 @@ struct ionic_hwstamp_regs { * @asic_type: Asic type * @asic_rev: Asic revision * @fw_status: Firmware status + * bit 0 - 1 = fw running + * bit 4-7 - 4 bit generation number, changes on fw restart * @fw_heartbeat: Firmware heartbeat counter * @serial_num: Serial number * @fw_version: Firmware version @@ -2949,7 +2951,8 @@ union ionic_dev_info_regs { u8 version; u8 asic_type; u8 asic_rev; -#define IONIC_FW_STS_F_RUNNING 0x1 +#define IONIC_FW_STS_F_RUNNING 0x01 +#define IONIC_FW_STS_F_GENERATION 0xF0 u8 fw_status; u32 fw_heartbeat; char fw_version[IONIC_DEVINFO_FWVERS_BUFLEN]; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index e795fa63ca12..23c9e196a784 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -11,6 +11,7 @@ #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/cpumask.h> +#include <linux/crash_dump.h> #include "ionic.h" #include "ionic_bus.h" @@ -29,9 +30,6 @@ static const u8 ionic_qtype_versions[IONIC_QTYPE_MAX] = { */ }; -static void ionic_lif_rx_mode(struct ionic_lif *lif); -static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr); -static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr); static void ionic_link_status_check(struct ionic_lif *lif); static void ionic_lif_handle_fw_down(struct ionic_lif *lif); static void ionic_lif_handle_fw_up(struct ionic_lif *lif); @@ -91,20 +89,21 @@ static void ionic_lif_deferred_work(struct work_struct *work) case IONIC_DW_TYPE_RX_MODE: ionic_lif_rx_mode(lif); break; - case IONIC_DW_TYPE_RX_ADDR_ADD: - ionic_lif_addr_add(lif, w->addr); - break; - case IONIC_DW_TYPE_RX_ADDR_DEL: - ionic_lif_addr_del(lif, w->addr); - break; case IONIC_DW_TYPE_LINK_STATUS: ionic_link_status_check(lif); break; case IONIC_DW_TYPE_LIF_RESET: - if (w->fw_status) + if (w->fw_status) { ionic_lif_handle_fw_up(lif); - else + } else { ionic_lif_handle_fw_down(lif); + + /* Fire off another watchdog to see + * if the FW is already back rather than + * waiting another whole cycle + */ + mod_timer(&lif->ionic->watchdog_timer, jiffies + 1); + } break; default: break; @@ -850,10 +849,8 @@ int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif) u64 features; int err; - mutex_lock(&lif->queue_lock); - if (lif->hwstamp_txq) - goto out; + return 0; features = IONIC_Q_F_2X_CQ_DESC | IONIC_TXQ_F_HWSTAMP; @@ -895,9 +892,6 @@ int ionic_lif_create_hwstamp_txq(struct ionic_lif *lif) } } -out: - mutex_unlock(&lif->queue_lock); - return 0; err_qcq_enable: @@ -908,7 +902,6 @@ err_qcq_init: ionic_qcq_free(lif, txq); devm_kfree(lif->ionic->dev, txq); err_qcq_alloc: - mutex_unlock(&lif->queue_lock); return err; } @@ -920,10 +913,8 @@ int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif) u64 features; int err; - mutex_lock(&lif->queue_lock); - if (lif->hwstamp_rxq) - goto out; + return 0; features = IONIC_Q_F_2X_CQ_DESC | IONIC_RXQ_F_HWSTAMP; @@ -961,9 +952,6 @@ int ionic_lif_create_hwstamp_rxq(struct ionic_lif *lif) } } -out: - mutex_unlock(&lif->queue_lock); - return 0; err_qcq_enable: @@ -974,7 +962,6 @@ err_qcq_init: ionic_qcq_free(lif, rxq); devm_kfree(lif->ionic->dev, rxq); err_qcq_alloc: - mutex_unlock(&lif->queue_lock); return err; } @@ -1077,7 +1064,11 @@ static int ionic_lif_add_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class) if (err && err != -EEXIST) return err; - return ionic_rx_filter_save(lif, 0, qid, 0, &ctx); + spin_lock_bh(&lif->rx_filters.lock); + err = ionic_rx_filter_save(lif, 0, qid, 0, &ctx, IONIC_FILTER_STATE_SYNCED); + spin_unlock_bh(&lif->rx_filters.lock); + + return err; } int ionic_lif_set_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class) @@ -1250,7 +1241,7 @@ void ionic_get_stats64(struct net_device *netdev, ns->tx_errors = ns->tx_aborted_errors; } -static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr) +int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr) { struct ionic_admin_ctx ctx = { .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), @@ -1260,27 +1251,83 @@ static int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr) .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC), }, }; + int nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters); + bool mc = is_multicast_ether_addr(addr); struct ionic_rx_filter *f; - int err; + int err = 0; + + memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN); - /* don't bother if we already have it */ spin_lock_bh(&lif->rx_filters.lock); f = ionic_rx_filter_by_addr(lif, addr); + if (f) { + /* don't bother if we already have it and it is sync'd */ + if (f->state == IONIC_FILTER_STATE_SYNCED) { + spin_unlock_bh(&lif->rx_filters.lock); + return 0; + } + + /* mark preemptively as sync'd to block any parallel attempts */ + f->state = IONIC_FILTER_STATE_SYNCED; + } else { + /* save as SYNCED to catch any DEL requests while processing */ + err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, + IONIC_FILTER_STATE_SYNCED); + } spin_unlock_bh(&lif->rx_filters.lock); - if (f) - return 0; + if (err) + return err; netdev_dbg(lif->netdev, "rx_filter add ADDR %pM\n", addr); - memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN); - err = ionic_adminq_post_wait(lif, &ctx); - if (err && err != -EEXIST) - return err; + /* Don't bother with the write to FW if we know there's no room, + * we can try again on the next sync attempt. + */ + if ((lif->nucast + lif->nmcast) >= nfilters) + err = -ENOSPC; + else + err = ionic_adminq_post_wait(lif, &ctx); + + spin_lock_bh(&lif->rx_filters.lock); + if (err && err != -EEXIST) { + /* set the state back to NEW so we can try again later */ + f = ionic_rx_filter_by_addr(lif, addr); + if (f && f->state == IONIC_FILTER_STATE_SYNCED) + f->state = IONIC_FILTER_STATE_NEW; + + spin_unlock_bh(&lif->rx_filters.lock); + + if (err == -ENOSPC) + return 0; + else + return err; + } - return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx); + if (mc) + lif->nmcast++; + else + lif->nucast++; + + f = ionic_rx_filter_by_addr(lif, addr); + if (f && f->state == IONIC_FILTER_STATE_OLD) { + /* Someone requested a delete while we were adding + * so update the filter info with the results from the add + * and the data will be there for the delete on the next + * sync cycle. + */ + err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, + IONIC_FILTER_STATE_OLD); + } else { + err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, + IONIC_FILTER_STATE_SYNCED); + } + + spin_unlock_bh(&lif->rx_filters.lock); + + return err; } -static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) +int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) { struct ionic_admin_ctx ctx = { .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), @@ -1290,6 +1337,7 @@ static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) }, }; struct ionic_rx_filter *f; + int state; int err; spin_lock_bh(&lif->rx_filters.lock); @@ -1302,65 +1350,37 @@ static int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr) netdev_dbg(lif->netdev, "rx_filter del ADDR %pM (id %d)\n", addr, f->filter_id); + state = f->state; ctx.cmd.rx_filter_del.filter_id = cpu_to_le32(f->filter_id); ionic_rx_filter_free(lif, f); - spin_unlock_bh(&lif->rx_filters.lock); - - err = ionic_adminq_post_wait(lif, &ctx); - if (err && err != -EEXIST) - return err; - return 0; -} + if (is_multicast_ether_addr(addr) && lif->nmcast) + lif->nmcast--; + else if (!is_multicast_ether_addr(addr) && lif->nucast) + lif->nucast--; -static int ionic_lif_addr(struct ionic_lif *lif, const u8 *addr, bool add) -{ - unsigned int nmfilters; - unsigned int nufilters; + spin_unlock_bh(&lif->rx_filters.lock); - if (add) { - /* Do we have space for this filter? We test the counters - * here before checking the need for deferral so that we - * can return an overflow error to the stack. - */ - nmfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters); - nufilters = le32_to_cpu(lif->identity->eth.max_ucast_filters); - - if ((is_multicast_ether_addr(addr) && lif->nmcast < nmfilters)) - lif->nmcast++; - else if (!is_multicast_ether_addr(addr) && - lif->nucast < nufilters) - lif->nucast++; - else - return -ENOSPC; - } else { - if (is_multicast_ether_addr(addr) && lif->nmcast) - lif->nmcast--; - else if (!is_multicast_ether_addr(addr) && lif->nucast) - lif->nucast--; + if (state != IONIC_FILTER_STATE_NEW) { + err = ionic_adminq_post_wait(lif, &ctx); + if (err && err != -EEXIST) + return err; } - netdev_dbg(lif->netdev, "rx_filter %s %pM\n", - add ? "add" : "del", addr); - if (add) - return ionic_lif_addr_add(lif, addr); - else - return ionic_lif_addr_del(lif, addr); - return 0; } static int ionic_addr_add(struct net_device *netdev, const u8 *addr) { - return ionic_lif_addr(netdev_priv(netdev), addr, ADD_ADDR); + return ionic_lif_list_addr(netdev_priv(netdev), addr, ADD_ADDR); } static int ionic_addr_del(struct net_device *netdev, const u8 *addr) { - return ionic_lif_addr(netdev_priv(netdev), addr, DEL_ADDR); + return ionic_lif_list_addr(netdev_priv(netdev), addr, DEL_ADDR); } -static void ionic_lif_rx_mode(struct ionic_lif *lif) +void ionic_lif_rx_mode(struct ionic_lif *lif) { struct net_device *netdev = lif->netdev; unsigned int nfilters; @@ -1381,32 +1401,26 @@ static void ionic_lif_rx_mode(struct ionic_lif *lif) rx_mode |= (nd_flags & IFF_PROMISC) ? IONIC_RX_MODE_F_PROMISC : 0; rx_mode |= (nd_flags & IFF_ALLMULTI) ? IONIC_RX_MODE_F_ALLMULTI : 0; - /* sync unicast addresses - * next check to see if we're in an overflow state + /* sync the mac filters */ + ionic_rx_filter_sync(lif); + + /* check for overflow state * if so, we track that we overflowed and enable NIC PROMISC * else if the overflow is set and not needed * we remove our overflow flag and check the netdev flags * to see if we can disable NIC PROMISC */ - __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del); nfilters = le32_to_cpu(lif->identity->eth.max_ucast_filters); - if (netdev_uc_count(netdev) + 1 > nfilters) { + if ((lif->nucast + lif->nmcast) >= nfilters) { rx_mode |= IONIC_RX_MODE_F_PROMISC; + rx_mode |= IONIC_RX_MODE_F_ALLMULTI; lif->uc_overflow = true; + lif->mc_overflow = true; } else if (lif->uc_overflow) { lif->uc_overflow = false; + lif->mc_overflow = false; if (!(nd_flags & IFF_PROMISC)) rx_mode &= ~IONIC_RX_MODE_F_PROMISC; - } - - /* same for multicast */ - __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del); - nfilters = le32_to_cpu(lif->identity->eth.max_mcast_filters); - if (netdev_mc_count(netdev) > nfilters) { - rx_mode |= IONIC_RX_MODE_F_ALLMULTI; - lif->mc_overflow = true; - } else if (lif->mc_overflow) { - lif->mc_overflow = false; if (!(nd_flags & IFF_ALLMULTI)) rx_mode &= ~IONIC_RX_MODE_F_ALLMULTI; } @@ -1449,28 +1463,26 @@ static void ionic_lif_rx_mode(struct ionic_lif *lif) mutex_unlock(&lif->config_lock); } -static void ionic_set_rx_mode(struct net_device *netdev, bool can_sleep) +static void ionic_ndo_set_rx_mode(struct net_device *netdev) { struct ionic_lif *lif = netdev_priv(netdev); struct ionic_deferred_work *work; - if (!can_sleep) { - work = kzalloc(sizeof(*work), GFP_ATOMIC); - if (!work) { - netdev_err(lif->netdev, "rxmode change dropped\n"); - return; - } - work->type = IONIC_DW_TYPE_RX_MODE; - netdev_dbg(lif->netdev, "deferred: rx_mode\n"); - ionic_lif_deferred_enqueue(&lif->deferred, work); - } else { - ionic_lif_rx_mode(lif); - } -} + /* Sync the kernel filter list with the driver filter list */ + __dev_uc_sync(netdev, ionic_addr_add, ionic_addr_del); + __dev_mc_sync(netdev, ionic_addr_add, ionic_addr_del); -static void ionic_ndo_set_rx_mode(struct net_device *netdev) -{ - ionic_set_rx_mode(netdev, CAN_NOT_SLEEP); + /* Shove off the rest of the rxmode work to the work task + * which will include syncing the filters to the firmware. + */ + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) { + netdev_err(lif->netdev, "rxmode change dropped\n"); + return; + } + work->type = IONIC_DW_TYPE_RX_MODE; + netdev_dbg(lif->netdev, "deferred: rx_mode\n"); + ionic_lif_deferred_enqueue(&lif->deferred, work); } static __le64 ionic_netdev_features_to_nic(netdev_features_t features) @@ -1599,7 +1611,6 @@ static int ionic_init_nic_features(struct ionic_lif *lif) features = NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER | - NETIF_F_RXHASH | NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM | @@ -1607,6 +1618,9 @@ static int ionic_init_nic_features(struct ionic_lif *lif) NETIF_F_TSO6 | NETIF_F_TSO_ECN; + if (lif->nxqs > 1) + features |= NETIF_F_RXHASH; + err = ionic_set_nic_features(lif, features); if (err) return err; @@ -1689,13 +1703,13 @@ static int ionic_set_mac_address(struct net_device *netdev, void *sa) if (!is_zero_ether_addr(netdev->dev_addr)) { netdev_info(netdev, "deleting mac addr %pM\n", netdev->dev_addr); - ionic_addr_del(netdev, netdev->dev_addr); + ionic_lif_addr_del(netdev_priv(netdev), netdev->dev_addr); } eth_commit_mac_addr_change(netdev, addr); netdev_info(netdev, "updating mac addr %pM\n", mac); - return ionic_addr_add(netdev, mac); + return ionic_lif_addr_add(netdev_priv(netdev), mac); } static void ionic_stop_queues_reconfig(struct ionic_lif *lif) @@ -1801,7 +1815,12 @@ static int ionic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, if (err) return err; - return ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx); + spin_lock_bh(&lif->rx_filters.lock); + err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, + IONIC_FILTER_STATE_SYNCED); + spin_unlock_bh(&lif->rx_filters.lock); + + return err; } static int ionic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, @@ -2104,7 +2123,7 @@ static int ionic_txrx_init(struct ionic_lif *lif) if (lif->netdev->features & NETIF_F_RXHASH) ionic_lif_rss_init(lif); - ionic_set_rx_mode(lif->netdev, CAN_SLEEP); + ionic_lif_rx_mode(lif); return 0; @@ -2202,9 +2221,11 @@ static int ionic_open(struct net_device *netdev) if (test_and_clear_bit(IONIC_LIF_F_BROKEN, lif->state)) netdev_info(netdev, "clearing broken state\n"); + mutex_lock(&lif->queue_lock); + err = ionic_txrx_alloc(lif); if (err) - return err; + goto err_unlock; err = ionic_txrx_init(lif); if (err) @@ -2225,12 +2246,21 @@ static int ionic_open(struct net_device *netdev) goto err_txrx_deinit; } + /* If hardware timestamping is enabled, but the queues were freed by + * ionic_stop, those need to be reallocated and initialized, too. + */ + ionic_lif_hwstamp_recreate_queues(lif); + + mutex_unlock(&lif->queue_lock); + return 0; err_txrx_deinit: ionic_txrx_deinit(lif); err_txrx_free: ionic_txrx_free(lif); +err_unlock: + mutex_unlock(&lif->queue_lock); return err; } @@ -2250,14 +2280,16 @@ static int ionic_stop(struct net_device *netdev) if (test_bit(IONIC_LIF_F_FW_RESET, lif->state)) return 0; + mutex_lock(&lif->queue_lock); ionic_stop_queues(lif); ionic_txrx_deinit(lif); ionic_txrx_free(lif); + mutex_unlock(&lif->queue_lock); return 0; } -static int ionic_do_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) +static int ionic_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { struct ionic_lif *lif = netdev_priv(netdev); @@ -2519,7 +2551,7 @@ static int ionic_set_vf_link_state(struct net_device *netdev, int vf, int set) static const struct net_device_ops ionic_netdev_ops = { .ndo_open = ionic_open, .ndo_stop = ionic_stop, - .ndo_do_ioctl = ionic_do_ioctl, + .ndo_eth_ioctl = ionic_eth_ioctl, .ndo_start_xmit = ionic_start_xmit, .ndo_get_stats64 = ionic_get_stats64, .ndo_set_rx_mode = ionic_ndo_set_rx_mode, @@ -2580,22 +2612,26 @@ int ionic_reconfigure_queues(struct ionic_lif *lif, struct ionic_qcq **tx_qcqs = NULL; struct ionic_qcq **rx_qcqs = NULL; unsigned int flags, i; - int err = -ENOMEM; + int err = 0; /* allocate temporary qcq arrays to hold new queue structs */ if (qparam->nxqs != lif->nxqs || qparam->ntxq_descs != lif->ntxq_descs) { tx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->ntxqs_per_lif, sizeof(struct ionic_qcq *), GFP_KERNEL); - if (!tx_qcqs) + if (!tx_qcqs) { + err = -ENOMEM; goto err_out; + } } if (qparam->nxqs != lif->nxqs || qparam->nrxq_descs != lif->nrxq_descs || qparam->rxq_features != lif->rxq_features) { rx_qcqs = devm_kcalloc(lif->ionic->dev, lif->ionic->nrxqs_per_lif, sizeof(struct ionic_qcq *), GFP_KERNEL); - if (!rx_qcqs) + if (!rx_qcqs) { + err = -ENOMEM; goto err_out; + } } /* allocate new desc_info and rings, but leave the interrupt setup @@ -2774,6 +2810,9 @@ err_out: ionic_qcq_free(lif, lif->rxqcqs[i]); } + if (err) + netdev_info(lif->netdev, "%s: failed %d\n", __func__, err); + return err; } @@ -2827,8 +2866,14 @@ int ionic_lif_alloc(struct ionic *ionic) lif->ionic = ionic; lif->index = 0; - lif->ntxq_descs = IONIC_DEF_TXRX_DESC; - lif->nrxq_descs = IONIC_DEF_TXRX_DESC; + + if (is_kdump_kernel()) { + lif->ntxq_descs = IONIC_MIN_TXRX_DESC; + lif->nrxq_descs = IONIC_MIN_TXRX_DESC; + } else { + lif->ntxq_descs = IONIC_DEF_TXRX_DESC; + lif->nrxq_descs = IONIC_DEF_TXRX_DESC; + } /* Convert the default coalesce value to actual hw resolution */ lif->rx_coalesce_usecs = IONIC_ITR_COAL_USEC_DEFAULT; @@ -3179,7 +3224,7 @@ static int ionic_station_set(struct ionic_lif *lif) */ if (!ether_addr_equal(ctx.comp.lif_getattr.mac, netdev->dev_addr)) - ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR); + ionic_lif_addr_add(lif, netdev->dev_addr); } else { /* Update the netdev mac with the device's mac */ memcpy(addr.sa_data, ctx.comp.lif_getattr.mac, netdev->addr_len); @@ -3196,7 +3241,7 @@ static int ionic_station_set(struct ionic_lif *lif) netdev_dbg(lif->netdev, "adding station MAC addr %pM\n", netdev->dev_addr); - ionic_lif_addr(lif, netdev->dev_addr, ADD_ADDR); + ionic_lif_addr_add(lif, netdev->dev_addr); return 0; } @@ -3514,6 +3559,7 @@ int ionic_lif_size(struct ionic *ionic) unsigned int min_intrs; int err; + /* retrieve basic values from FW */ lc = &ident->lif.eth.config; dev_nintrs = le32_to_cpu(ident->dev.nintrs); neqs_per_lif = le32_to_cpu(ident->lif.rdma.eq_qtype.qid_count); @@ -3521,6 +3567,15 @@ int ionic_lif_size(struct ionic *ionic) ntxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_TXQ]); nrxqs_per_lif = le32_to_cpu(lc->queue_count[IONIC_QTYPE_RXQ]); + /* limit values to play nice with kdump */ + if (is_kdump_kernel()) { + dev_nintrs = 2; + neqs_per_lif = 0; + nnqs_per_lif = 0; + ntxqs_per_lif = 1; + nrxqs_per_lif = 1; + } + /* reserve last queue id for hardware timestamping */ if (lc->features & cpu_to_le64(IONIC_ETH_HW_TIMESTAMP)) { if (ntxqs_per_lif <= 1 || nrxqs_per_lif <= 1) { diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h index 69ab59fedb6c..4915184f3efb 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h @@ -98,8 +98,6 @@ struct ionic_qcq { enum ionic_deferred_work_type { IONIC_DW_TYPE_RX_MODE, - IONIC_DW_TYPE_RX_ADDR_ADD, - IONIC_DW_TYPE_RX_ADDR_DEL, IONIC_DW_TYPE_LINK_STATUS, IONIC_DW_TYPE_LIF_RESET, }; @@ -147,6 +145,7 @@ enum ionic_lif_state_flags { IONIC_LIF_F_SW_DEBUG_STATS, IONIC_LIF_F_UP, IONIC_LIF_F_LINK_CHECK_REQUESTED, + IONIC_LIF_F_FILTER_SYNC_NEEDED, IONIC_LIF_F_FW_RESET, IONIC_LIF_F_SPLIT_INTR, IONIC_LIF_F_BROKEN, @@ -295,6 +294,10 @@ int ionic_lif_alloc(struct ionic *ionic); int ionic_lif_init(struct ionic_lif *lif); void ionic_lif_free(struct ionic_lif *lif); void ionic_lif_deinit(struct ionic_lif *lif); + +int ionic_lif_addr_add(struct ionic_lif *lif, const u8 *addr); +int ionic_lif_addr_del(struct ionic_lif *lif, const u8 *addr); + int ionic_lif_register(struct ionic_lif *lif); void ionic_lif_unregister(struct ionic_lif *lif); int ionic_lif_identify(struct ionic *ionic, u8 lif_type, @@ -303,6 +306,7 @@ int ionic_lif_size(struct ionic *ionic); #if IS_ENABLED(CONFIG_PTP_1588_CLOCK) void ionic_lif_hwstamp_replay(struct ionic_lif *lif); +void ionic_lif_hwstamp_recreate_queues(struct ionic_lif *lif); int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr); int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr); ktime_t ionic_lif_phc_ktime(struct ionic_lif *lif, u64 counter); @@ -312,6 +316,7 @@ void ionic_lif_alloc_phc(struct ionic_lif *lif); void ionic_lif_free_phc(struct ionic_lif *lif); #else static inline void ionic_lif_hwstamp_replay(struct ionic_lif *lif) {} +static inline void ionic_lif_hwstamp_recreate_queues(struct ionic_lif *lif) {} static inline int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr) { @@ -342,6 +347,7 @@ int ionic_lif_set_hwstamp_rxfilt(struct ionic_lif *lif, u64 pkt_class); int ionic_lif_rss_config(struct ionic_lif *lif, u16 types, const u8 *key, const u32 *indir); +void ionic_lif_rx_mode(struct ionic_lif *lif); int ionic_reconfigure_queues(struct ionic_lif *lif, struct ionic_queue_params *qparam); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c index 61cfe2120817..6f07bf509efe 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_main.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c @@ -375,8 +375,8 @@ try_again: * heartbeat check but is still alive and will process this * request, so don't clean the dev_cmd in this case. */ - dev_warn(ionic->dev, "DEVCMD %s (%d) failed - FW halted\n", - ionic_opcode_to_str(opcode), opcode); + dev_dbg(ionic->dev, "DEVCMD %s (%d) failed - FW halted\n", + ionic_opcode_to_str(opcode), opcode); return -ENXIO; } @@ -450,6 +450,8 @@ int ionic_identify(struct ionic *ionic) } mutex_unlock(&ionic->dev_cmd_lock); + dev_info(ionic->dev, "FW: %s\n", idev->dev_info.fw_version); + if (err) { dev_err(ionic->dev, "Cannot identify ionic: %dn", err); goto err_out; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_phc.c b/drivers/net/ethernet/pensando/ionic/ionic_phc.c index 6e2403c71608..eed2db69d708 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_phc.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_phc.c @@ -119,8 +119,8 @@ static int ionic_lif_hwstamp_set_ts_config(struct ionic_lif *lif, config->rx_filter = HWTSTAMP_FILTER_ALL; } - dev_dbg(ionic->dev, "config_rx_filter %d rx_filt %#llx rx_all %d\n", - config->rx_filter, rx_filt, rx_all); + dev_dbg(ionic->dev, "%s: config_rx_filter %d rx_filt %#llx rx_all %d\n", + __func__, config->rx_filter, rx_filt, rx_all); if (tx_mode) { err = ionic_lif_create_hwstamp_txq(lif); @@ -194,7 +194,9 @@ int ionic_lif_hwstamp_set(struct ionic_lif *lif, struct ifreq *ifr) if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) return -EFAULT; + mutex_lock(&lif->queue_lock); err = ionic_lif_hwstamp_set_ts_config(lif, &config); + mutex_unlock(&lif->queue_lock); if (err) { netdev_info(lif->netdev, "hwstamp set failed: %d\n", err); return err; @@ -213,11 +215,37 @@ void ionic_lif_hwstamp_replay(struct ionic_lif *lif) if (!lif->phc || !lif->phc->ptp) return; + mutex_lock(&lif->queue_lock); err = ionic_lif_hwstamp_set_ts_config(lif, NULL); + mutex_unlock(&lif->queue_lock); if (err) netdev_info(lif->netdev, "hwstamp replay failed: %d\n", err); } +void ionic_lif_hwstamp_recreate_queues(struct ionic_lif *lif) +{ + int err; + + if (!lif->phc || !lif->phc->ptp) + return; + + mutex_lock(&lif->phc->config_lock); + + if (lif->phc->ts_config_tx_mode) { + err = ionic_lif_create_hwstamp_txq(lif); + if (err) + netdev_info(lif->netdev, "hwstamp recreate txq failed: %d\n", err); + } + + if (lif->phc->ts_config_rx_filt) { + err = ionic_lif_create_hwstamp_rxq(lif); + if (err) + netdev_info(lif->netdev, "hwstamp recreate rxq failed: %d\n", err); + } + + mutex_unlock(&lif->phc->config_lock); +} + int ionic_lif_hwstamp_get(struct ionic_lif *lif, struct ifreq *ifr) { struct hwtstamp_config config; diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c index d71316d9ded2..7e3a5634c161 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.c @@ -4,6 +4,7 @@ #include <linux/netdevice.h> #include <linux/dynamic_debug.h> #include <linux/etherdevice.h> +#include <linux/list.h> #include "ionic.h" #include "ionic_lif.h" @@ -120,11 +121,12 @@ void ionic_rx_filters_deinit(struct ionic_lif *lif) } int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index, - u32 hash, struct ionic_admin_ctx *ctx) + u32 hash, struct ionic_admin_ctx *ctx, + enum ionic_filter_state state) { struct device *dev = lif->ionic->dev; struct ionic_rx_filter_add_cmd *ac; - struct ionic_rx_filter *f; + struct ionic_rx_filter *f = NULL; struct hlist_head *head; unsigned int key; @@ -133,9 +135,11 @@ int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index, switch (le16_to_cpu(ac->match)) { case IONIC_RX_FILTER_MATCH_VLAN: key = le16_to_cpu(ac->vlan.vlan); + f = ionic_rx_filter_by_vlan(lif, le16_to_cpu(ac->vlan.vlan)); break; case IONIC_RX_FILTER_MATCH_MAC: key = *(u32 *)ac->mac.addr; + f = ionic_rx_filter_by_addr(lif, ac->mac.addr); break; case IONIC_RX_FILTER_MATCH_MAC_VLAN: key = le16_to_cpu(ac->mac_vlan.vlan); @@ -147,12 +151,19 @@ int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index, return -EINVAL; } - f = devm_kzalloc(dev, sizeof(*f), GFP_KERNEL); - if (!f) - return -ENOMEM; + if (f) { + /* remove from current linking so we can refresh it */ + hlist_del(&f->by_id); + hlist_del(&f->by_hash); + } else { + f = devm_kzalloc(dev, sizeof(*f), GFP_ATOMIC); + if (!f) + return -ENOMEM; + } f->flow_id = flow_id; f->filter_id = le32_to_cpu(ctx->comp.rx_filter_add.filter_id); + f->state = state; f->rxq_index = rxq_index; memcpy(&f->cmd, ac, sizeof(f->cmd)); netdev_dbg(lif->netdev, "rx_filter add filter_id %d\n", f->filter_id); @@ -160,8 +171,6 @@ int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index, INIT_HLIST_NODE(&f->by_hash); INIT_HLIST_NODE(&f->by_id); - spin_lock_bh(&lif->rx_filters.lock); - key = hash_32(key, IONIC_RX_FILTER_HASH_BITS); head = &lif->rx_filters.by_hash[key]; hlist_add_head(&f->by_hash, head); @@ -170,8 +179,6 @@ int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index, head = &lif->rx_filters.by_id[key]; hlist_add_head(&f->by_id, head); - spin_unlock_bh(&lif->rx_filters.lock); - return 0; } @@ -231,3 +238,121 @@ struct ionic_rx_filter *ionic_rx_filter_rxsteer(struct ionic_lif *lif) return NULL; } + +int ionic_lif_list_addr(struct ionic_lif *lif, const u8 *addr, bool mode) +{ + struct ionic_rx_filter *f; + int err; + + spin_lock_bh(&lif->rx_filters.lock); + + f = ionic_rx_filter_by_addr(lif, addr); + if (mode == ADD_ADDR && !f) { + struct ionic_admin_ctx ctx = { + .work = COMPLETION_INITIALIZER_ONSTACK(ctx.work), + .cmd.rx_filter_add = { + .opcode = IONIC_CMD_RX_FILTER_ADD, + .lif_index = cpu_to_le16(lif->index), + .match = cpu_to_le16(IONIC_RX_FILTER_MATCH_MAC), + }, + }; + + memcpy(ctx.cmd.rx_filter_add.mac.addr, addr, ETH_ALEN); + err = ionic_rx_filter_save(lif, 0, IONIC_RXQ_INDEX_ANY, 0, &ctx, + IONIC_FILTER_STATE_NEW); + if (err) { + spin_unlock_bh(&lif->rx_filters.lock); + return err; + } + + } else if (mode == ADD_ADDR && f) { + if (f->state == IONIC_FILTER_STATE_OLD) + f->state = IONIC_FILTER_STATE_SYNCED; + + } else if (mode == DEL_ADDR && f) { + if (f->state == IONIC_FILTER_STATE_NEW) + ionic_rx_filter_free(lif, f); + else if (f->state == IONIC_FILTER_STATE_SYNCED) + f->state = IONIC_FILTER_STATE_OLD; + } else if (mode == DEL_ADDR && !f) { + spin_unlock_bh(&lif->rx_filters.lock); + return -ENOENT; + } + + spin_unlock_bh(&lif->rx_filters.lock); + + set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state); + + return 0; +} + +struct sync_item { + struct list_head list; + struct ionic_rx_filter f; +}; + +void ionic_rx_filter_sync(struct ionic_lif *lif) +{ + struct device *dev = lif->ionic->dev; + struct list_head sync_add_list; + struct list_head sync_del_list; + struct sync_item *sync_item; + struct ionic_rx_filter *f; + struct hlist_head *head; + struct hlist_node *tmp; + struct sync_item *spos; + unsigned int i; + + INIT_LIST_HEAD(&sync_add_list); + INIT_LIST_HEAD(&sync_del_list); + + clear_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state); + + /* Copy the filters to be added and deleted + * into a separate local list that needs no locking. + */ + spin_lock_bh(&lif->rx_filters.lock); + for (i = 0; i < IONIC_RX_FILTER_HLISTS; i++) { + head = &lif->rx_filters.by_id[i]; + hlist_for_each_entry_safe(f, tmp, head, by_id) { + if (f->state == IONIC_FILTER_STATE_NEW || + f->state == IONIC_FILTER_STATE_OLD) { + sync_item = devm_kzalloc(dev, sizeof(*sync_item), + GFP_KERNEL); + if (!sync_item) + goto loop_out; + + sync_item->f = *f; + + if (f->state == IONIC_FILTER_STATE_NEW) + list_add(&sync_item->list, &sync_add_list); + else + list_add(&sync_item->list, &sync_del_list); + } + } + } +loop_out: + spin_unlock_bh(&lif->rx_filters.lock); + + /* If the add or delete fails, it won't get marked as sync'd + * and will be tried again in the next sync action. + * Do the deletes first in case we're in an overflow state and + * they can clear room for some new filters + */ + list_for_each_entry_safe(sync_item, spos, &sync_del_list, list) { + (void)ionic_lif_addr_del(lif, sync_item->f.cmd.mac.addr); + + list_del(&sync_item->list); + devm_kfree(dev, sync_item); + } + + list_for_each_entry_safe(sync_item, spos, &sync_add_list, list) { + (void)ionic_lif_addr_add(lif, sync_item->f.cmd.mac.addr); + + if (sync_item->f.state != IONIC_FILTER_STATE_SYNCED) + set_bit(IONIC_LIF_F_FILTER_SYNC_NEEDED, lif->state); + + list_del(&sync_item->list); + devm_kfree(dev, sync_item); + } +} diff --git a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h index 1ead48be3c83..a66e35f0833b 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h +++ b/drivers/net/ethernet/pensando/ionic/ionic_rx_filter.h @@ -5,10 +5,18 @@ #define _IONIC_RX_FILTER_H_ #define IONIC_RXQ_INDEX_ANY (0xFFFF) + +enum ionic_filter_state { + IONIC_FILTER_STATE_SYNCED, + IONIC_FILTER_STATE_NEW, + IONIC_FILTER_STATE_OLD, +}; + struct ionic_rx_filter { u32 flow_id; u32 filter_id; u16 rxq_index; + enum ionic_filter_state state; struct ionic_rx_filter_add_cmd cmd; struct hlist_node by_hash; struct hlist_node by_id; @@ -28,9 +36,13 @@ void ionic_rx_filter_replay(struct ionic_lif *lif); int ionic_rx_filters_init(struct ionic_lif *lif); void ionic_rx_filters_deinit(struct ionic_lif *lif); int ionic_rx_filter_save(struct ionic_lif *lif, u32 flow_id, u16 rxq_index, - u32 hash, struct ionic_admin_ctx *ctx); + u32 hash, struct ionic_admin_ctx *ctx, + enum ionic_filter_state state); struct ionic_rx_filter *ionic_rx_filter_by_vlan(struct ionic_lif *lif, u16 vid); struct ionic_rx_filter *ionic_rx_filter_by_addr(struct ionic_lif *lif, const u8 *addr); struct ionic_rx_filter *ionic_rx_filter_rxsteer(struct ionic_lif *lif); +void ionic_rx_filter_sync(struct ionic_lif *lif); +int ionic_lif_list_addr(struct ionic_lif *lif, const u8 *addr, bool mode); +int ionic_rx_filters_need_sync(struct ionic_lif *lif); #endif /* _IONIC_RX_FILTER_H_ */ diff --git a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c index 08870190e4d2..37c39581b659 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_txrx.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_txrx.c @@ -32,19 +32,13 @@ static inline struct netdev_queue *q_to_ndq(struct ionic_queue *q) return netdev_get_tx_queue(q->lif->netdev, q->index); } -static void ionic_rx_buf_reset(struct ionic_buf_info *buf_info) -{ - buf_info->page = NULL; - buf_info->page_offset = 0; - buf_info->dma_addr = 0; -} - static int ionic_rx_page_alloc(struct ionic_queue *q, struct ionic_buf_info *buf_info) { struct net_device *netdev = q->lif->netdev; struct ionic_rx_stats *stats; struct device *dev; + struct page *page; dev = q->dev; stats = q_to_rx_stats(q); @@ -55,26 +49,27 @@ static int ionic_rx_page_alloc(struct ionic_queue *q, return -EINVAL; } - buf_info->page = alloc_pages(IONIC_PAGE_GFP_MASK, 0); - if (unlikely(!buf_info->page)) { + page = alloc_pages(IONIC_PAGE_GFP_MASK, 0); + if (unlikely(!page)) { net_err_ratelimited("%s: %s page alloc failed\n", netdev->name, q->name); stats->alloc_err++; return -ENOMEM; } - buf_info->page_offset = 0; - buf_info->dma_addr = dma_map_page(dev, buf_info->page, buf_info->page_offset, + buf_info->dma_addr = dma_map_page(dev, page, 0, IONIC_PAGE_SIZE, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(dev, buf_info->dma_addr))) { - __free_pages(buf_info->page, 0); - ionic_rx_buf_reset(buf_info); + __free_pages(page, 0); net_err_ratelimited("%s: %s dma map failed\n", netdev->name, q->name); stats->dma_map_err++; return -EIO; } + buf_info->page = page; + buf_info->page_offset = 0; + return 0; } @@ -95,7 +90,7 @@ static void ionic_rx_page_free(struct ionic_queue *q, dma_unmap_page(dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE); __free_pages(buf_info->page, 0); - ionic_rx_buf_reset(buf_info); + buf_info->page = NULL; } static bool ionic_rx_buf_recycle(struct ionic_queue *q, @@ -139,7 +134,7 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q, buf_info = &desc_info->bufs[0]; len = le16_to_cpu(comp->len); - prefetch(buf_info->page); + prefetchw(buf_info->page); skb = napi_get_frags(&q_to_qcq(q)->napi); if (unlikely(!skb)) { @@ -170,7 +165,7 @@ static struct sk_buff *ionic_rx_frags(struct ionic_queue *q, if (!ionic_rx_buf_recycle(q, buf_info, frag_len)) { dma_unmap_page(dev, buf_info->dma_addr, IONIC_PAGE_SIZE, DMA_FROM_DEVICE); - ionic_rx_buf_reset(buf_info); + buf_info->page = NULL; } buf_info++; diff --git a/drivers/net/ethernet/qlogic/Kconfig b/drivers/net/ethernet/qlogic/Kconfig index 98f430905ffa..1203353238e5 100644 --- a/drivers/net/ethernet/qlogic/Kconfig +++ b/drivers/net/ethernet/qlogic/Kconfig @@ -99,7 +99,7 @@ config QED_SRIOV config QEDE tristate "QLogic QED 25/40/100Gb Ethernet NIC" depends on QED - imply PTP_1588_CLOCK + depends on PTP_1588_CLOCK_OPTIONAL help This enables the support for Marvell FastLinQ adapters family, ethernet driver. diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h index e5c51256243a..f13fa7396aef 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic.h +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic.h @@ -1863,7 +1863,6 @@ static inline u32 netxen_tx_avail(struct nx_host_tx_ring *tx_ring) int netxen_get_flash_mac_addr(struct netxen_adapter *adapter, u64 *mac); int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, u64 *mac); void netxen_change_ringparam(struct netxen_adapter *adapter); -int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp); extern const struct ethtool_ops netxen_nic_ethtool_ops; diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c index dd22cb056d03..a075643f5826 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_ethtool.c @@ -731,7 +731,9 @@ netxen_nic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) * firmware coalescing to default. */ static int netxen_set_intr_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ethcoal) + struct ethtool_coalesce *ethcoal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct netxen_adapter *adapter = netdev_priv(netdev); @@ -775,7 +777,9 @@ static int netxen_set_intr_coalesce(struct net_device *netdev, } static int netxen_get_intr_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ethcoal) + struct ethtool_coalesce *ethcoal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct netxen_adapter *adapter = netdev_priv(netdev); diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index b590c70539b5..d58e021614cd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -26,15 +26,6 @@ extern const struct qed_common_ops qed_common_ops_pass; -#define QED_MAJOR_VERSION 8 -#define QED_MINOR_VERSION 37 -#define QED_REVISION_VERSION 0 -#define QED_ENGINEERING_VERSION 20 - -#define QED_VERSION \ - ((QED_MAJOR_VERSION << 24) | (QED_MINOR_VERSION << 16) | \ - (QED_REVISION_VERSION << 8) | QED_ENGINEERING_VERSION) - #define STORM_FW_VERSION \ ((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \ (FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION) @@ -517,12 +508,6 @@ enum qed_hsi_def_type { QED_NUM_HSI_DEFS }; -#define DRV_MODULE_VERSION \ - __stringify(QED_MAJOR_VERSION) "." \ - __stringify(QED_MINOR_VERSION) "." \ - __stringify(QED_REVISION_VERSION) "." \ - __stringify(QED_ENGINEERING_VERSION) - struct qed_simd_fp_handler { void *token; void (*func)(void *); diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c index e81dd34a3cac..dc93ddea8906 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c @@ -741,7 +741,6 @@ static int qed_dcbx_read_local_lldp_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_dcbx_mib_meta_data data; - int rc = 0; memset(&data, 0, sizeof(data)); data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port, @@ -750,7 +749,7 @@ qed_dcbx_read_local_lldp_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) data.size = sizeof(struct lldp_config_params_s); qed_memcpy_from(p_hwfn, p_ptt, data.lldp_local, data.addr, data.size); - return rc; + return 0; } static int @@ -810,7 +809,6 @@ static int qed_dcbx_read_local_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { struct qed_dcbx_mib_meta_data data; - int rc = 0; memset(&data, 0, sizeof(data)); data.addr = p_hwfn->mcp_info->port_addr + @@ -819,7 +817,7 @@ qed_dcbx_read_local_mib(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) data.size = sizeof(struct dcbx_local_params); qed_memcpy_from(p_hwfn, p_ptt, data.local_admin, data.addr, data.size); - return rc; + return 0; } static int qed_dcbx_read_mib(struct qed_hwfn *p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_devlink.c b/drivers/net/ethernet/qlogic/qed/qed_devlink.c index cf7f4da68e69..78070682f2df 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_devlink.c +++ b/drivers/net/ethernet/qlogic/qed/qed_devlink.c @@ -93,7 +93,7 @@ static const struct devlink_health_reporter_ops qed_fw_fatal_reporter_ops = { .dump = qed_fw_fatal_reporter_dump, }; -#define QED_REPORTER_FW_GRACEFUL_PERIOD 1200000 +#define QED_REPORTER_FW_GRACEFUL_PERIOD 0 void qed_fw_reporters_create(struct devlink *devlink) { @@ -207,14 +207,15 @@ struct devlink *qed_devlink_register(struct qed_dev *cdev) struct devlink *dl; int rc; - dl = devlink_alloc(&qed_dl_ops, sizeof(struct qed_devlink)); + dl = devlink_alloc(&qed_dl_ops, sizeof(struct qed_devlink), + &cdev->pdev->dev); if (!dl) return ERR_PTR(-ENOMEM); qdevlink = devlink_priv(dl); qdevlink->cdev = cdev; - rc = devlink_register(dl, &cdev->pdev->dev); + rc = devlink_register(dl); if (rc) goto err_free; diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index 578935f643b8..f78e6055f654 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -351,6 +351,9 @@ static int qed_fw_assertion(struct qed_hwfn *p_hwfn) qed_hw_err_notify(p_hwfn, p_hwfn->p_dpc_ptt, QED_HW_ERR_FW_ASSERT, "FW assertion!\n"); + /* Clear assert indications */ + qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, MISC_REG_AEU_GENERAL_ATTN_32, 0); + return -EINVAL; } @@ -464,12 +467,19 @@ static int qed_dorq_attn_int_sts(struct qed_hwfn *p_hwfn) u32 int_sts, first_drop_reason, details, address, all_drops_reason; struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt; + int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS); + if (int_sts == 0xdeadbeaf) { + DP_NOTICE(p_hwfn->cdev, + "DORQ is being reset, skipping int_sts handler\n"); + + return 0; + } + /* int_sts may be zero since all PFs were interrupted for doorbell * overflow but another one already handled it. Can abort here. If * This PF also requires overflow recovery we will be interrupted again. * The masked almost full indication may also be set. Ignoring. */ - int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS); if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) return 0; @@ -528,6 +538,9 @@ static int qed_dorq_attn_int_sts(struct qed_hwfn *p_hwfn) static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) { + if (p_hwfn->cdev->recov_in_prog) + return 0; + p_hwfn->db_recovery_info.dorq_attn = true; qed_dorq_attn_overflow(p_hwfn); @@ -943,6 +956,13 @@ qed_int_deassertion_aeu_bit(struct qed_hwfn *p_hwfn, DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n", p_bit_name); + /* Re-enable FW aassertion (Gen 32) interrupts */ + val = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, + MISC_REG_AEU_ENABLE4_IGU_OUT_0); + val |= MISC_REG_AEU_ENABLE4_IGU_OUT_0_GENERAL_ATTN32; + qed_wr(p_hwfn, p_hwfn->p_dpc_ptt, + MISC_REG_AEU_ENABLE4_IGU_OUT_0, val); + out: return rc; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c index a99861124630..fc8b3e64f153 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iwarp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iwarp.c @@ -1624,8 +1624,6 @@ qed_iwarp_get_listener(struct qed_hwfn *p_hwfn, static const u32 ip_zero[4] = { 0, 0, 0, 0 }; bool found = false; - qed_iwarp_print_cm_info(p_hwfn, cm_info); - list_for_each_entry(listener, &p_hwfn->p_rdma_info->iwarp.listen_list, list_entry) { diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index 5bd58c65e163..15ef59aa34ff 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -49,11 +49,10 @@ #define QED_NVM_CFG_MAX_ATTRS 50 static char version[] = - "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n"; + "QLogic FastLinQ 4xxxx Core Module qed\n"; MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module"); MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_MODULE_VERSION); #define FW_FILE_VERSION \ __stringify(FW_MAJOR_VERSION) "." \ @@ -616,7 +615,12 @@ static int qed_enable_msix(struct qed_dev *cdev, rc = cnt; } - if (rc > 0) { + /* For VFs, we should return with an error in case we didn't get the + * exact number of msix vectors as we requested. + * Not doing that will lead to a crash when starting queues for + * this VF. + */ + if ((IS_PF(cdev) && rc > 0) || (IS_VF(cdev) && rc == cnt)) { /* MSI-x configuration was achieved */ int_params->out.int_mode = QED_INT_MODE_MSIX; int_params->out.num_vectors = rc; @@ -1216,6 +1220,10 @@ static void qed_slowpath_task(struct work_struct *work) if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC, &hwfn->slowpath_task_flags)) { + /* skip qed_db_rec_handler during recovery/unload */ + if (hwfn->cdev->recov_in_prog || !hwfn->slowpath_wq_active) + goto out; + qed_db_rec_handler(hwfn, ptt); if (hwfn->periodic_db_rec_count--) qed_slowpath_delayed_work(hwfn, @@ -1223,6 +1231,7 @@ static void qed_slowpath_task(struct work_struct *work) QED_PERIODIC_DB_REC_INTERVAL); } +out: qed_ptt_release(hwfn, ptt); } diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index 4387292c37e2..6e5a6cc97d0e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -944,7 +944,6 @@ int qed_mcp_load_req(struct qed_hwfn *p_hwfn, memset(&in_params, 0, sizeof(in_params)); in_params.hsi_ver = QED_LOAD_REQ_HSI_VER_DEFAULT; - in_params.drv_ver_0 = QED_VERSION; in_params.drv_ver_1 = qed_get_config_bitmap(); in_params.fw_ver = STORM_FW_VERSION; rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role); diff --git a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c index c1dd71d19f3f..3b84d00cf987 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c +++ b/drivers/net/ethernet/qlogic/qed/qed_nvmetcp_fw_funcs.c @@ -4,7 +4,6 @@ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> -#include <linux/kernel.h> #include <linux/list.h> #include <linux/mm.h> #include <linux/types.h> diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index 9db22be42476..da1b7fdcbda7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -504,6 +504,8 @@ 0x180824UL #define MISC_REG_AEU_GENERAL_ATTN_0 \ 0x008400UL +#define MISC_REG_AEU_GENERAL_ATTN_32 \ + 0x008480UL #define MISC_REG_AEU_GENERAL_ATTN_35 \ 0x00848cUL #define CAU_REG_SB_ADDR_MEMORY \ @@ -518,6 +520,12 @@ 0x180804UL #define MISC_REG_AEU_ENABLE1_IGU_OUT_0 \ 0x00849cUL +#define MISC_REG_AEU_ENABLE4_IGU_OUT_0 \ + 0x0084a8UL +#define MISC_REG_AEU_ENABLE4_IGU_OUT_0_GENERAL_ATTN32 \ + (0x1UL << 0) +#define MISC_REG_AEU_ENABLE4_IGU_OUT_0_GENERAL_ATTN32_SHIFT \ + 0 #define MISC_REG_AEU_AFTER_INVERT_1_IGU \ 0x0087b4UL #define MISC_REG_AEU_MASK_ATTN_IGU \ diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index 5630008f38b7..f90dcfe9ee68 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -30,15 +30,6 @@ #include <net/pkt_cls.h> #include <net/tc_act/tc_gact.h> -#define QEDE_MAJOR_VERSION 8 -#define QEDE_MINOR_VERSION 37 -#define QEDE_REVISION_VERSION 0 -#define QEDE_ENGINEERING_VERSION 20 -#define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \ - __stringify(QEDE_MINOR_VERSION) "." \ - __stringify(QEDE_REVISION_VERSION) "." \ - __stringify(QEDE_ENGINEERING_VERSION) - #define DRV_MODULE_SYM qede struct qede_stats_common { @@ -589,7 +580,9 @@ int qede_add_tc_flower_fltr(struct qede_dev *edev, __be16 proto, struct flow_cls_offload *f); void qede_forced_speed_maps_init(void); -int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal); +int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack); int qede_set_per_coalesce(struct net_device *dev, u32 queue, struct ethtool_coalesce *coal); diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 1560ad3d9290..8284c4c1528f 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -625,13 +625,13 @@ static void qede_get_drvinfo(struct net_device *ndev, (edev->dev_info.common.mfw_rev >> 8) & 0xFF, edev->dev_info.common.mfw_rev & 0xFF); - if ((strlen(storm) + strlen(DRV_MODULE_VERSION) + strlen("[storm] ")) < + if ((strlen(storm) + strlen("[storm]")) < sizeof(info->version)) snprintf(info->version, sizeof(info->version), - "%s [storm %s]", DRV_MODULE_VERSION, storm); + "[storm %s]", storm); else snprintf(info->version, sizeof(info->version), - "%s %s", DRV_MODULE_VERSION, storm); + "%s", storm); if (edev->dev_info.common.mbi_version) { snprintf(mbi, ETHTOOL_FWVERS_LEN, "%d.%d.%d", @@ -760,7 +760,9 @@ static int qede_flash_device(struct net_device *dev, } static int qede_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { void *rx_handle = NULL, *tx_handle = NULL; struct qede_dev *edev = netdev_priv(dev); @@ -819,7 +821,9 @@ out: return rc; } -int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal) +int qede_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct qede_dev *edev = netdev_priv(dev); struct qede_fastpath *fp; diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 7c6064baeba2..9837bdb89cd4 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -39,12 +39,8 @@ #include "qede.h" #include "qede_ptp.h" -static char version[] = - "QLogic FastLinQ 4xxxx Ethernet Driver qede " DRV_MODULE_VERSION "\n"; - MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Ethernet Driver"); MODULE_LICENSE("GPL"); -MODULE_VERSION(DRV_MODULE_VERSION); static uint debug; module_param(debug, uint, 0); @@ -258,7 +254,7 @@ int __init qede_init(void) { int ret; - pr_info("qede_init: %s\n", version); + pr_info("qede init: QLogic FastLinQ 4xxxx Ethernet Driver qede\n"); qede_forced_speed_maps_init(); @@ -644,7 +640,7 @@ static const struct net_device_ops qede_netdev_ops = { .ndo_set_mac_address = qede_set_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = qede_change_mtu, - .ndo_do_ioctl = qede_ioctl, + .ndo_eth_ioctl = qede_ioctl, .ndo_tx_timeout = qede_tx_timeout, #ifdef CONFIG_QED_SRIOV .ndo_set_vf_mac = qede_set_vf_mac, @@ -1157,10 +1153,6 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, /* Start the Slowpath-process */ memset(&sp_params, 0, sizeof(sp_params)); sp_params.int_mode = QED_INT_MODE_MSIX; - sp_params.drv_major = QEDE_MAJOR_VERSION; - sp_params.drv_minor = QEDE_MINOR_VERSION; - sp_params.drv_rev = QEDE_REVISION_VERSION; - sp_params.drv_eng = QEDE_ENGINEERING_VERSION; strlcpy(sp_params.name, "qede LAN", QED_DRV_VER_STR_SIZE); rc = qed_ops->common->slowpath_start(cdev, &sp_params); if (rc) { @@ -1874,6 +1866,7 @@ static void qede_sync_free_irqs(struct qede_dev *edev) } edev->int_info.used_cnt = 0; + edev->int_info.msix_cnt = 0; } static int qede_req_msix_irqs(struct qede_dev *edev) @@ -1906,6 +1899,12 @@ static int qede_req_msix_irqs(struct qede_dev *edev) &edev->fp_array[i]); if (rc) { DP_ERR(edev, "Request fp %d irq failed\n", i); +#ifdef CONFIG_RFS_ACCEL + if (edev->ndev->rx_cpu_rmap) + free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap); + + edev->ndev->rx_cpu_rmap = NULL; +#endif qede_sync_free_irqs(edev); return rc; } @@ -2298,6 +2297,15 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode, rc = qede_stop_queues(edev); if (rc) { +#ifdef CONFIG_RFS_ACCEL + if (edev->dev_info.common.b_arfs_capable) { + qede_poll_for_freeing_arfs_filters(edev); + if (edev->ndev->rx_cpu_rmap) + free_irq_cpu_rmap(edev->ndev->rx_cpu_rmap); + + edev->ndev->rx_cpu_rmap = NULL; + } +#endif qede_sync_free_irqs(edev); goto out; } @@ -2427,7 +2435,6 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, goto out; err4: qede_sync_free_irqs(edev); - memset(&edev->int_info.msix_cnt, 0, sizeof(struct qed_int_info)); err3: qede_napi_disable_remove(edev); err2: @@ -2628,8 +2635,10 @@ static void qede_generic_hw_err_handler(struct qede_dev *edev) "Generic sleepable HW error handling started - err_flags 0x%lx\n", edev->err_flags); - if (edev->devlink) + if (edev->devlink) { + DP_NOTICE(edev, "Reporting fatal error to devlink\n"); edev->ops->common->report_fatal_error(edev->devlink, edev->last_err_type); + } clear_bit(QEDE_ERR_IS_HANDLED, &edev->err_flags); @@ -2651,6 +2660,8 @@ static void qede_set_hw_err_flags(struct qede_dev *edev, case QED_HW_ERR_FW_ASSERT: set_bit(QEDE_ERR_ATTN_CLR_EN, &err_flags); set_bit(QEDE_ERR_GET_DBG_INFO, &err_flags); + /* make this error as recoverable and start recovery*/ + set_bit(QEDE_ERR_IS_RECOVERABLE, &err_flags); break; default: diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c index d8f0863b3934..fc364b4ab6eb 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c @@ -1021,7 +1021,7 @@ clear_diag_irq: static void qlcnic_create_loopback_buff(unsigned char *data, u8 mac[]) { - unsigned char random_data[] = {0xa8, 0x06, 0x45, 0x00}; + static const unsigned char random_data[] = {0xa8, 0x06, 0x45, 0x00}; memset(data, 0x4e, QLCNIC_ILB_PKT_SIZE); @@ -1527,7 +1527,9 @@ qlcnic_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) * firmware coalescing to default. */ static int qlcnic_set_intr_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ethcoal) + struct ethtool_coalesce *ethcoal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct qlcnic_adapter *adapter = netdev_priv(netdev); int err; @@ -1551,7 +1553,9 @@ static int qlcnic_set_intr_coalesce(struct net_device *netdev, } static int qlcnic_get_intr_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ethcoal) + struct ethtool_coalesce *ethcoal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct qlcnic_adapter *adapter = netdev_priv(netdev); diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c index e6784023bce4..3d61a767a8a3 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c @@ -94,10 +94,8 @@ void qlcnic_release_rx_buffers(struct qlcnic_adapter *adapter) if (rx_buf->skb == NULL) continue; - pci_unmap_single(adapter->pdev, - rx_buf->dma, - rds_ring->dma_size, - PCI_DMA_FROMDEVICE); + dma_unmap_single(&adapter->pdev->dev, rx_buf->dma, + rds_ring->dma_size, DMA_FROM_DEVICE); dev_kfree_skb_any(rx_buf->skb); } @@ -139,16 +137,16 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter, for (i = 0; i < tx_ring->num_desc; i++) { buffrag = cmd_buf->frag_array; if (buffrag->dma) { - pci_unmap_single(adapter->pdev, buffrag->dma, - buffrag->length, PCI_DMA_TODEVICE); + dma_unmap_single(&adapter->pdev->dev, buffrag->dma, + buffrag->length, DMA_TO_DEVICE); buffrag->dma = 0ULL; } for (j = 1; j < cmd_buf->frag_count; j++) { buffrag++; if (buffrag->dma) { - pci_unmap_page(adapter->pdev, buffrag->dma, - buffrag->length, - PCI_DMA_TODEVICE); + dma_unmap_page(&adapter->pdev->dev, + buffrag->dma, buffrag->length, + DMA_TO_DEVICE); buffrag->dma = 0ULL; } } diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index af4c516a9e7c..29cdcb2285b1 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -587,9 +587,9 @@ static int qlcnic_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb, nr_frags = skb_shinfo(skb)->nr_frags; nf = &pbuf->frag_array[0]; - map = pci_map_single(pdev, skb->data, skb_headlen(skb), - PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(pdev, map)) + map = dma_map_single(&pdev->dev, skb->data, skb_headlen(skb), + DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, map)) goto out_err; nf->dma = map; @@ -612,11 +612,11 @@ static int qlcnic_map_tx_skb(struct pci_dev *pdev, struct sk_buff *skb, unwind: while (--i >= 0) { nf = &pbuf->frag_array[i+1]; - pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE); + dma_unmap_page(&pdev->dev, nf->dma, nf->length, DMA_TO_DEVICE); } nf = &pbuf->frag_array[0]; - pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE); + dma_unmap_single(&pdev->dev, nf->dma, skb_headlen(skb), DMA_TO_DEVICE); out_err: return -ENOMEM; @@ -630,11 +630,11 @@ static void qlcnic_unmap_buffers(struct pci_dev *pdev, struct sk_buff *skb, for (i = 0; i < nr_frags; i++) { nf = &pbuf->frag_array[i+1]; - pci_unmap_page(pdev, nf->dma, nf->length, PCI_DMA_TODEVICE); + dma_unmap_page(&pdev->dev, nf->dma, nf->length, DMA_TO_DEVICE); } nf = &pbuf->frag_array[0]; - pci_unmap_single(pdev, nf->dma, skb_headlen(skb), PCI_DMA_TODEVICE); + dma_unmap_single(&pdev->dev, nf->dma, skb_headlen(skb), DMA_TO_DEVICE); pbuf->skb = NULL; } @@ -825,10 +825,10 @@ static int qlcnic_alloc_rx_skb(struct qlcnic_adapter *adapter, } skb_reserve(skb, NET_IP_ALIGN); - dma = pci_map_single(pdev, skb->data, - rds_ring->dma_size, PCI_DMA_FROMDEVICE); + dma = dma_map_single(&pdev->dev, skb->data, rds_ring->dma_size, + DMA_FROM_DEVICE); - if (pci_dma_mapping_error(pdev, dma)) { + if (dma_mapping_error(&pdev->dev, dma)) { adapter->stats.rx_dma_map_error++; dev_kfree_skb_any(skb); return -ENOMEM; @@ -903,13 +903,13 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter, buffer = &tx_ring->cmd_buf_arr[sw_consumer]; if (buffer->skb) { frag = &buffer->frag_array[0]; - pci_unmap_single(pdev, frag->dma, frag->length, - PCI_DMA_TODEVICE); + dma_unmap_single(&pdev->dev, frag->dma, frag->length, + DMA_TO_DEVICE); frag->dma = 0ULL; for (i = 1; i < buffer->frag_count; i++) { frag++; - pci_unmap_page(pdev, frag->dma, frag->length, - PCI_DMA_TODEVICE); + dma_unmap_page(&pdev->dev, frag->dma, + frag->length, DMA_TO_DEVICE); frag->dma = 0ULL; } tx_ring->tx_stats.xmit_finished++; @@ -1147,8 +1147,8 @@ static struct sk_buff *qlcnic_process_rxbuf(struct qlcnic_adapter *adapter, return NULL; } - pci_unmap_single(adapter->pdev, buffer->dma, ring->dma_size, - PCI_DMA_FROMDEVICE); + dma_unmap_single(&adapter->pdev->dev, buffer->dma, ring->dma_size, + DMA_FROM_DEVICE); skb = buffer->skb; if (likely((adapter->netdev->features & NETIF_F_RXCSUM) && diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index a4fa507903ee..75960a29f80e 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -2343,11 +2343,9 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev, static int qlcnic_set_dma_mask(struct pci_dev *pdev, int *pci_using_dac) { - if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && - !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) + if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) *pci_using_dac = 1; - else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) && - !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) + else if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32))) *pci_using_dac = 0; else { dev_err(&pdev->dev, "Unable to set DMA mask, aborting\n"); diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c index ad655f0a4965..9015a38eaced 100644 --- a/drivers/net/ethernet/qualcomm/emac/emac.c +++ b/drivers/net/ethernet/qualcomm/emac/emac.c @@ -377,7 +377,7 @@ static const struct net_device_ops emac_netdev_ops = { .ndo_start_xmit = emac_start_xmit, .ndo_set_mac_address = eth_mac_addr, .ndo_change_mtu = emac_change_mtu, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_tx_timeout = emac_tx_timeout, .ndo_get_stats64 = emac_get_stats64, .ndo_set_features = emac_set_features, diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index b64c254e00ba..8427fe1b8fd1 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -434,7 +434,7 @@ qcaspi_receive(struct qcaspi *qca) skb_put(qca->rx_skb, retcode); qca->rx_skb->protocol = eth_type_trans( qca->rx_skb, qca->rx_skb->dev); - qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY; + skb_checksum_none_assert(qca->rx_skb); netif_rx_ni(qca->rx_skb); qca->rx_skb = netdev_alloc_skb_ip_align(net_dev, net_dev->mtu + VLAN_ETH_HLEN); diff --git a/drivers/net/ethernet/qualcomm/qca_uart.c b/drivers/net/ethernet/qualcomm/qca_uart.c index bcdeca7b3366..ce3f7ce31adc 100644 --- a/drivers/net/ethernet/qualcomm/qca_uart.c +++ b/drivers/net/ethernet/qualcomm/qca_uart.c @@ -107,7 +107,7 @@ qca_tty_receive(struct serdev_device *serdev, const unsigned char *data, skb_put(qca->rx_skb, retcode); qca->rx_skb->protocol = eth_type_trans( qca->rx_skb, qca->rx_skb->dev); - qca->rx_skb->ip_summed = CHECKSUM_UNNECESSARY; + skb_checksum_none_assert(qca->rx_skb); netif_rx_ni(qca->rx_skb); qca->rx_skb = netdev_alloc_skb_ip_align(netdev, netdev->mtu + diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c index 47e9998b62f0..4b2eca5e08e2 100644 --- a/drivers/net/ethernet/rdc/r6040.c +++ b/drivers/net/ethernet/rdc/r6040.c @@ -954,7 +954,7 @@ static const struct net_device_ops r6040_netdev_ops = { .ndo_set_rx_mode = r6040_multicast_list, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, - .ndo_do_ioctl = phy_do_ioctl, + .ndo_eth_ioctl = phy_do_ioctl, .ndo_tx_timeout = r6040_tx_timeout, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = r6040_poll_controller, diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c index 9677e257e9a1..2b84b4565e64 100644 --- a/drivers/net/ethernet/realtek/8139cp.c +++ b/drivers/net/ethernet/realtek/8139cp.c @@ -514,7 +514,7 @@ static int cp_rx_poll(struct napi_struct *napi, int budget) } new_mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen, - PCI_DMA_FROMDEVICE); + DMA_FROM_DEVICE); if (dma_mapping_error(&cp->pdev->dev, new_mapping)) { dev->stats.rx_dropped++; kfree_skb(new_skb); @@ -522,7 +522,7 @@ static int cp_rx_poll(struct napi_struct *napi, int budget) } dma_unmap_single(&cp->pdev->dev, mapping, - buflen, PCI_DMA_FROMDEVICE); + buflen, DMA_FROM_DEVICE); /* Handle checksum offloading for incoming packets. */ if (cp_rx_csum_ok(status)) @@ -666,7 +666,7 @@ static void cp_tx (struct cp_private *cp) dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr), cp->tx_opts[tx_tail] & 0xffff, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); if (status & LastFrag) { if (status & (TxError | TxFIFOUnder)) { @@ -724,7 +724,7 @@ static void unwind_tx_frag_mapping(struct cp_private *cp, struct sk_buff *skb, txd = &cp->tx_ring[index]; this_frag = &skb_shinfo(skb)->frags[frag]; dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr), - skb_frag_size(this_frag), PCI_DMA_TODEVICE); + skb_frag_size(this_frag), DMA_TO_DEVICE); } } @@ -781,7 +781,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, dma_addr_t mapping; len = skb->len; - mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE); + mapping = dma_map_single(&cp->pdev->dev, skb->data, len, DMA_TO_DEVICE); if (dma_mapping_error(&cp->pdev->dev, mapping)) goto out_dma_error; @@ -810,7 +810,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, first_eor = eor; first_len = skb_headlen(skb); first_mapping = dma_map_single(&cp->pdev->dev, skb->data, - first_len, PCI_DMA_TODEVICE); + first_len, DMA_TO_DEVICE); if (dma_mapping_error(&cp->pdev->dev, first_mapping)) goto out_dma_error; @@ -826,7 +826,7 @@ static netdev_tx_t cp_start_xmit (struct sk_buff *skb, len = skb_frag_size(this_frag); mapping = dma_map_single(&cp->pdev->dev, skb_frag_address(this_frag), - len, PCI_DMA_TODEVICE); + len, DMA_TO_DEVICE); if (dma_mapping_error(&cp->pdev->dev, mapping)) { unwind_tx_frag_mapping(cp, skb, first_entry, entry); goto out_dma_error; @@ -1069,7 +1069,7 @@ static int cp_refill_rx(struct cp_private *cp) goto err_out; mapping = dma_map_single(&cp->pdev->dev, skb->data, - cp->rx_buf_sz, PCI_DMA_FROMDEVICE); + cp->rx_buf_sz, DMA_FROM_DEVICE); if (dma_mapping_error(&cp->pdev->dev, mapping)) { kfree_skb(skb); goto err_out; @@ -1139,7 +1139,7 @@ static void cp_clean_rings (struct cp_private *cp) if (cp->rx_skb[i]) { desc = cp->rx_ring + i; dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr), - cp->rx_buf_sz, PCI_DMA_FROMDEVICE); + cp->rx_buf_sz, DMA_FROM_DEVICE); dev_kfree_skb_any(cp->rx_skb[i]); } } @@ -1151,7 +1151,7 @@ static void cp_clean_rings (struct cp_private *cp) desc = cp->tx_ring + i; dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr), le32_to_cpu(desc->opts1) & 0xffff, - PCI_DMA_TODEVICE); + DMA_TO_DEVICE); if (le32_to_cpu(desc->opts1) & LastFrag) dev_kfree_skb_any(skb); cp->dev->stats.tx_dropped++; @@ -1869,7 +1869,7 @@ static const struct net_device_ops cp_netdev_ops = { .ndo_set_mac_address = cp_set_mac_address, .ndo_set_rx_mode = cp_set_rx_mode, .ndo_get_stats = cp_get_stats, - .ndo_do_ioctl = cp_ioctl, + .ndo_eth_ioctl = cp_ioctl, .ndo_start_xmit = cp_start_xmit, .ndo_tx_timeout = cp_tx_timeout, .ndo_set_features = cp_set_features, @@ -1945,24 +1945,17 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent) /* Configure DMA attributes. */ if ((sizeof(dma_addr_t) > 4) && - !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) && - !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + !dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { pci_using_dac = 1; } else { pci_using_dac = 0; - rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (rc) { dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); goto err_out_res; } - rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); - if (rc) { - dev_err(&pdev->dev, - "No usable consistent DMA configuration, aborting\n"); - goto err_out_res; - } } cp->cpcmd = (pci_using_dac ? PCIDAC : 0) | diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c index f0608f050050..2e6923cc653e 100644 --- a/drivers/net/ethernet/realtek/8139too.c +++ b/drivers/net/ethernet/realtek/8139too.c @@ -932,7 +932,7 @@ static const struct net_device_ops rtl8139_netdev_ops = { .ndo_set_mac_address = rtl8139_set_mac_address, .ndo_start_xmit = rtl8139_start_xmit, .ndo_set_rx_mode = rtl8139_set_rx_mode, - .ndo_do_ioctl = netdev_ioctl, + .ndo_eth_ioctl = netdev_ioctl, .ndo_tx_timeout = rtl8139_tx_timeout, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = rtl8139_poll_controller, diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index 4d8e337f5085..46a6ff9a782d 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -1749,7 +1749,10 @@ rtl_coalesce_info(struct rtl8169_private *tp) return ERR_PTR(-ELNRNG); } -static int rtl_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) +static int rtl_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct rtl8169_private *tp = netdev_priv(dev); const struct rtl_coalesce_info *ci; @@ -1807,7 +1810,10 @@ static int rtl_coalesce_choose_scale(struct rtl8169_private *tp, u32 usec, return -ERANGE; } -static int rtl_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) +static int rtl_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct rtl8169_private *tp = netdev_priv(dev); u32 tx_fr = ec->tx_max_coalesced_frames; @@ -2598,7 +2604,7 @@ static u32 rtl_csi_read(struct rtl8169_private *tp, int addr) RTL_R32(tp, CSIDR) : ~0; } -static void rtl_csi_access_enable(struct rtl8169_private *tp, u8 val) +static void rtl_set_aspm_entry_latency(struct rtl8169_private *tp, u8 val) { struct pci_dev *pdev = tp->pci_dev; u32 csi; @@ -2606,6 +2612,8 @@ static void rtl_csi_access_enable(struct rtl8169_private *tp, u8 val) /* According to Realtek the value at config space address 0x070f * controls the L0s/L1 entrance latency. We try standard ECAM access * first and if it fails fall back to CSI. + * bit 0..2: L0: 0 = 1us, 1 = 2us .. 6 = 7us, 7 = 7us (no typo) + * bit 3..5: L1: 0 = 1us, 1 = 2us .. 6 = 64us, 7 = 64us */ if (pdev->cfg_size > 0x070f && pci_write_config_byte(pdev, 0x070f, val) == PCIBIOS_SUCCESSFUL) @@ -2619,7 +2627,8 @@ static void rtl_csi_access_enable(struct rtl8169_private *tp, u8 val) static void rtl_set_def_aspm_entry_latency(struct rtl8169_private *tp) { - rtl_csi_access_enable(tp, 0x27); + /* L0 7us, L1 16us */ + rtl_set_aspm_entry_latency(tp, 0x27); } struct ephy_info { @@ -2660,6 +2669,34 @@ static void rtl_pcie_state_l2l3_disable(struct rtl8169_private *tp) RTL_W8(tp, Config3, RTL_R8(tp, Config3) & ~Rdy_to_L23); } +static void rtl_enable_exit_l1(struct rtl8169_private *tp) +{ + /* Bits control which events trigger ASPM L1 exit: + * Bit 12: rxdv + * Bit 11: ltr_msg + * Bit 10: txdma_poll + * Bit 9: xadm + * Bit 8: pktavi + * Bit 7: txpla + */ + switch (tp->mac_version) { + case RTL_GIGA_MAC_VER_34 ... RTL_GIGA_MAC_VER_36: + rtl_eri_set_bits(tp, 0xd4, 0x1f00); + break; + case RTL_GIGA_MAC_VER_37 ... RTL_GIGA_MAC_VER_38: + rtl_eri_set_bits(tp, 0xd4, 0x0c00); + break; + case RTL_GIGA_MAC_VER_40 ... RTL_GIGA_MAC_VER_53: + rtl_eri_set_bits(tp, 0xd4, 0x1f80); + break; + case RTL_GIGA_MAC_VER_60 ... RTL_GIGA_MAC_VER_63: + r8168_mac_ocp_modify(tp, 0xc0ac, 0, 0x1f80); + break; + default: + break; + } +} + static void rtl_hw_aspm_clkreq_enable(struct rtl8169_private *tp, bool enable) { /* Don't enable ASPM in the chip if OS can't control ASPM */ @@ -2848,7 +2885,6 @@ static void rtl_hw_start_8168e_2(struct rtl8169_private *tp) rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); rtl_eri_write(tp, 0xb8, ERIAR_MASK_1111, 0x0000); rtl_set_fifo_size(tp, 0x10, 0x10, 0x02, 0x06); - rtl_eri_set_bits(tp, 0x0d4, 0x1f00); rtl_eri_set_bits(tp, 0x1d0, BIT(1)); rtl_reset_packet_filter(tp); rtl_eri_set_bits(tp, 0x1b0, BIT(4)); @@ -2905,8 +2941,6 @@ static void rtl_hw_start_8168f_1(struct rtl8169_private *tp) rtl_hw_start_8168f(tp); rtl_ephy_init(tp, e_info_8168f_1); - - rtl_eri_set_bits(tp, 0x0d4, 0x1f00); } static void rtl_hw_start_8411(struct rtl8169_private *tp) @@ -2923,8 +2957,6 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp) rtl_pcie_state_l2l3_disable(tp); rtl_ephy_init(tp, e_info_8168f_1); - - rtl_eri_set_bits(tp, 0x0d4, 0x0c00); } static void rtl_hw_start_8168g(struct rtl8169_private *tp) @@ -2941,7 +2973,6 @@ static void rtl_hw_start_8168g(struct rtl8169_private *tp) rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000); rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000); - rtl_eri_set_bits(tp, 0x0d4, 0x1f80); rtl8168_config_eee_mac(tp); @@ -3172,7 +3203,6 @@ static void rtl_hw_start_8168h_1(struct rtl8169_private *tp) rtl_reset_packet_filter(tp); - rtl_eri_set_bits(tp, 0xd4, 0x1f00); rtl_eri_set_bits(tp, 0xdc, 0x001c); rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); @@ -3226,8 +3256,6 @@ static void rtl_hw_start_8168ep(struct rtl8169_private *tp) rtl_reset_packet_filter(tp); - rtl_eri_set_bits(tp, 0xd4, 0x1f80); - rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); RTL_W32(tp, MISC, RTL_R32(tp, MISC) & ~RXDV_GATED_EN); @@ -3329,7 +3357,7 @@ static void rtl_hw_start_8117(struct rtl8169_private *tp) rtl_reset_packet_filter(tp); - rtl_eri_set_bits(tp, 0xd4, 0x1f90); + rtl_eri_set_bits(tp, 0xd4, 0x0010); rtl_eri_write(tp, 0x5f0, ERIAR_MASK_0011, 0x4f87); @@ -3502,8 +3530,8 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp) RTL_W8(tp, MCU, RTL_R8(tp, MCU) | EN_NDP | EN_OOB_RESET); RTL_W8(tp, DLLPR, RTL_R8(tp, DLLPR) & ~PFM_EN); - /* The default value is 0x13. Change it to 0x2f */ - rtl_csi_access_enable(tp, 0x2f); + /* L0 7us, L1 32us - needed to avoid issues with link-up detection */ + rtl_set_aspm_entry_latency(tp, 0x2f); rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000); @@ -3560,7 +3588,6 @@ static void rtl_hw_start_8125_common(struct rtl8169_private *tp) r8168_mac_ocp_modify(tp, 0xea1c, 0x0003, 0x0001); r8168_mac_ocp_modify(tp, 0xe0c0, 0x4f0f, 0x4403); r8168_mac_ocp_modify(tp, 0xe052, 0x0080, 0x0068); - r8168_mac_ocp_modify(tp, 0xc0ac, 0x0080, 0x1f00); r8168_mac_ocp_modify(tp, 0xd430, 0x0fff, 0x047f); r8168_mac_ocp_modify(tp, 0xea1c, 0x0004, 0x0000); @@ -3783,6 +3810,7 @@ static void rtl_hw_start(struct rtl8169_private *tp) else rtl_hw_start_8168(tp); + rtl_enable_exit_l1(tp); rtl_set_rx_max_size(tp); rtl_set_rx_tx_desc_registers(tp); rtl_lock_config_regs(tp); @@ -4983,7 +5011,7 @@ static const struct net_device_ops rtl_netdev_ops = { .ndo_fix_features = rtl8169_fix_features, .ndo_set_features = rtl8169_set_features, .ndo_set_mac_address = rtl_set_mac_address, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_set_rx_mode = rtl_set_rx_mode, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = rtl8169_netpoll, @@ -5278,11 +5306,10 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) return rc; - /* Disable ASPM completely as that cause random device stop working + /* Disable ASPM L1 as that cause random device stop working * problems as well as full system hangs for some PCIe devices users. */ - rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | - PCIE_LINK_STATE_L1); + rc = pci_disable_link_state(pdev, PCIE_LINK_STATE_L1); tp->aspm_manageable = !rc; /* enable device (incl. PCI PM wakeup and hotplug setup) */ diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig index 5a2a4af31812..8008b2f45934 100644 --- a/drivers/net/ethernet/renesas/Kconfig +++ b/drivers/net/ethernet/renesas/Kconfig @@ -32,11 +32,11 @@ config SH_ETH config RAVB tristate "Renesas Ethernet AVB support" depends on ARCH_RENESAS || COMPILE_TEST + depends on PTP_1588_CLOCK_OPTIONAL select CRC32 select MII select MDIO_BITBANG select PHYLIB - imply PTP_1588_CLOCK help Renesas Ethernet AVB device driver. This driver supports the following SoCs: diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index 80e62ca2e3d3..47c5377e4f42 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -956,10 +956,6 @@ enum RAVB_QUEUE { #define RX_BUF_SZ (2048 - ETH_FCS_LEN + sizeof(__sum16)) -/* TX descriptors per packet */ -#define NUM_TX_DESC_GEN2 2 -#define NUM_TX_DESC_GEN3 1 - struct ravb_tstamp_skb { struct list_head list; struct sk_buff *skb; @@ -983,9 +979,29 @@ struct ravb_ptp { struct ravb_ptp_perout perout[N_PER_OUT]; }; -enum ravb_chip_id { - RCAR_GEN2, - RCAR_GEN3, +struct ravb_hw_info { + void (*rx_ring_free)(struct net_device *ndev, int q); + void (*rx_ring_format)(struct net_device *ndev, int q); + void *(*alloc_rx_desc)(struct net_device *ndev, int q); + bool (*receive)(struct net_device *ndev, int *quota, int q); + void (*set_rate)(struct net_device *ndev); + int (*set_rx_csum_feature)(struct net_device *ndev, netdev_features_t features); + void (*dmac_init)(struct net_device *ndev); + void (*emac_init)(struct net_device *ndev); + const char (*gstrings_stats)[ETH_GSTRING_LEN]; + size_t gstrings_size; + netdev_features_t net_hw_features; + netdev_features_t net_features; + int stats_len; + size_t max_rx_len; + unsigned aligned_tx: 1; + + /* hardware features */ + unsigned internal_delay:1; /* AVB-DMAC has internal delays */ + unsigned tx_counters:1; /* E-MAC has TX counters */ + unsigned multi_irqs:1; /* AVB-DMAC and E-MAC has multiple irqs */ + unsigned no_ptp_cfg_active:1; /* AVB-DMAC does not support gPTP active in config mode */ + unsigned ptp_cfg_active:1; /* AVB-DMAC has gPTP support active in config mode */ }; struct ravb_private { @@ -1029,7 +1045,6 @@ struct ravb_private { int msg_enable; int speed; int emac_irq; - enum ravb_chip_id chip_id; int rx_irqs[NUM_RX_QUEUE]; int tx_irqs[NUM_TX_QUEUE]; @@ -1039,7 +1054,10 @@ struct ravb_private { unsigned rxcidm:1; /* RX Clock Internal Delay Mode */ unsigned txcidm:1; /* TX Clock Internal Delay Mode */ unsigned rgmii_override:1; /* Deprecated rgmii-*id behavior */ - int num_tx_desc; /* TX descriptors per packet */ + unsigned int num_tx_desc; /* TX descriptors per packet */ + + const struct ravb_hw_info *info; + struct reset_control *rstc; }; static inline u32 ravb_read(struct net_device *ndev, enum ravb_reg reg) diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 805397088850..0f85f2d97b18 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -29,6 +29,7 @@ #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/sys_soc.h> +#include <linux/reset.h> #include <asm/div64.h> @@ -177,10 +178,10 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only) { struct ravb_private *priv = netdev_priv(ndev); struct net_device_stats *stats = &priv->stats[q]; - int num_tx_desc = priv->num_tx_desc; + unsigned int num_tx_desc = priv->num_tx_desc; struct ravb_tx_desc *desc; + unsigned int entry; int free_num = 0; - int entry; u32 size; for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) { @@ -216,31 +217,42 @@ static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only) return free_num; } -/* Free skb's and DMA buffers for Ethernet AVB */ -static void ravb_ring_free(struct net_device *ndev, int q) +static void ravb_rx_ring_free(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); - int num_tx_desc = priv->num_tx_desc; - int ring_size; - int i; + unsigned int ring_size; + unsigned int i; + + if (!priv->rx_ring[q]) + return; - if (priv->rx_ring[q]) { - for (i = 0; i < priv->num_rx_ring[q]; i++) { - struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i]; + for (i = 0; i < priv->num_rx_ring[q]; i++) { + struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i]; - if (!dma_mapping_error(ndev->dev.parent, - le32_to_cpu(desc->dptr))) - dma_unmap_single(ndev->dev.parent, - le32_to_cpu(desc->dptr), - RX_BUF_SZ, - DMA_FROM_DEVICE); - } - ring_size = sizeof(struct ravb_ex_rx_desc) * - (priv->num_rx_ring[q] + 1); - dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q], - priv->rx_desc_dma[q]); - priv->rx_ring[q] = NULL; + if (!dma_mapping_error(ndev->dev.parent, + le32_to_cpu(desc->dptr))) + dma_unmap_single(ndev->dev.parent, + le32_to_cpu(desc->dptr), + RX_BUF_SZ, + DMA_FROM_DEVICE); } + ring_size = sizeof(struct ravb_ex_rx_desc) * + (priv->num_rx_ring[q] + 1); + dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q], + priv->rx_desc_dma[q]); + priv->rx_ring[q] = NULL; +} + +/* Free skb's and DMA buffers for Ethernet AVB */ +static void ravb_ring_free(struct net_device *ndev, int q) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + unsigned int num_tx_desc = priv->num_tx_desc; + unsigned int ring_size; + unsigned int i; + + info->rx_ring_free(ndev, q); if (priv->tx_ring[q]) { ravb_tx_free(ndev, q, false); @@ -271,24 +283,13 @@ static void ravb_ring_free(struct net_device *ndev, int q) priv->tx_skb[q] = NULL; } -/* Format skb and descriptor buffer for Ethernet AVB */ -static void ravb_ring_format(struct net_device *ndev, int q) +static void ravb_rx_ring_format(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); - int num_tx_desc = priv->num_tx_desc; struct ravb_ex_rx_desc *rx_desc; - struct ravb_tx_desc *tx_desc; - struct ravb_desc *desc; - int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; - int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] * - num_tx_desc; + unsigned int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; dma_addr_t dma_addr; - int i; - - priv->cur_rx[q] = 0; - priv->cur_tx[q] = 0; - priv->dirty_rx[q] = 0; - priv->dirty_tx[q] = 0; + unsigned int i; memset(priv->rx_ring[q], 0, rx_ring_size); /* Build RX ring buffer */ @@ -310,6 +311,26 @@ static void ravb_ring_format(struct net_device *ndev, int q) rx_desc = &priv->rx_ring[q][i]; rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); rx_desc->die_dt = DT_LINKFIX; /* type */ +} + +/* Format skb and descriptor buffer for Ethernet AVB */ +static void ravb_ring_format(struct net_device *ndev, int q) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + unsigned int num_tx_desc = priv->num_tx_desc; + struct ravb_tx_desc *tx_desc; + struct ravb_desc *desc; + unsigned int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] * + num_tx_desc; + unsigned int i; + + priv->cur_rx[q] = 0; + priv->cur_tx[q] = 0; + priv->dirty_rx[q] = 0; + priv->dirty_tx[q] = 0; + + info->rx_ring_format(ndev, q); memset(priv->tx_ring[q], 0, tx_ring_size); /* Build TX ring buffer */ @@ -335,14 +356,28 @@ static void ravb_ring_format(struct net_device *ndev, int q) desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]); } +static void *ravb_alloc_rx_desc(struct net_device *ndev, int q) +{ + struct ravb_private *priv = netdev_priv(ndev); + unsigned int ring_size; + + ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); + + priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size, + &priv->rx_desc_dma[q], + GFP_KERNEL); + return priv->rx_ring[q]; +} + /* Init skb and descriptor buffer for Ethernet AVB */ static int ravb_ring_init(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); - int num_tx_desc = priv->num_tx_desc; + const struct ravb_hw_info *info = priv->info; + unsigned int num_tx_desc = priv->num_tx_desc; + unsigned int ring_size; struct sk_buff *skb; - int ring_size; - int i; + unsigned int i; /* Allocate RX and TX skb rings */ priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], @@ -353,7 +388,7 @@ static int ravb_ring_init(struct net_device *ndev, int q) goto error; for (i = 0; i < priv->num_rx_ring[q]; i++) { - skb = netdev_alloc_skb(ndev, RX_BUF_SZ + RAVB_ALIGN - 1); + skb = netdev_alloc_skb(ndev, info->max_rx_len); if (!skb) goto error; ravb_set_buffer_align(skb); @@ -369,11 +404,7 @@ static int ravb_ring_init(struct net_device *ndev, int q) } /* Allocate all RX descriptors. */ - ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); - priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size, - &priv->rx_desc_dma[q], - GFP_KERNEL); - if (!priv->rx_ring[q]) + if (!info->alloc_rx_desc(ndev, q)) goto error; priv->dirty_rx[q] = 0; @@ -395,8 +426,7 @@ error: return -ENOMEM; } -/* E-MAC init function */ -static void ravb_emac_init(struct net_device *ndev) +static void ravb_rcar_emac_init(struct net_device *ndev) { /* Receive frame limit set register */ ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR); @@ -422,29 +452,19 @@ static void ravb_emac_init(struct net_device *ndev) ravb_write(ndev, ECSIPR_ICDIP | ECSIPR_MPDIP | ECSIPR_LCHNGIP, ECSIPR); } -/* Device init function for Ethernet AVB */ -static int ravb_dmac_init(struct net_device *ndev) +/* E-MAC init function */ +static void ravb_emac_init(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); - int error; + const struct ravb_hw_info *info = priv->info; - /* Set CONFIG mode */ - error = ravb_config(ndev); - if (error) - return error; - - error = ravb_ring_init(ndev, RAVB_BE); - if (error) - return error; - error = ravb_ring_init(ndev, RAVB_NC); - if (error) { - ravb_ring_free(ndev, RAVB_BE); - return error; - } + info->emac_init(ndev); +} - /* Descriptor format */ - ravb_ring_format(ndev, RAVB_BE); - ravb_ring_format(ndev, RAVB_NC); +static void ravb_rcar_dmac_init(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; /* Set AVB RX */ ravb_write(ndev, @@ -457,7 +477,7 @@ static int ravb_dmac_init(struct net_device *ndev) ravb_write(ndev, TCCR_TFEN, TCCR); /* Interrupt init: */ - if (priv->chip_id == RCAR_GEN3) { + if (info->multi_irqs) { /* Clear DIL.DPLx */ ravb_write(ndev, 0, DIL); /* Set queue specific interrupt */ @@ -471,6 +491,34 @@ static int ravb_dmac_init(struct net_device *ndev) ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2); /* Frame transmitted, timestamp FIFO updated */ ravb_write(ndev, TIC_FTE0 | TIC_FTE1 | TIC_TFUE, TIC); +} + +/* Device init function for Ethernet AVB */ +static int ravb_dmac_init(struct net_device *ndev) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + int error; + + /* Set CONFIG mode */ + error = ravb_config(ndev); + if (error) + return error; + + error = ravb_ring_init(ndev, RAVB_BE); + if (error) + return error; + error = ravb_ring_init(ndev, RAVB_NC); + if (error) { + ravb_ring_free(ndev, RAVB_BE); + return error; + } + + /* Descriptor format */ + ravb_ring_format(ndev, RAVB_BE); + ravb_ring_format(ndev, RAVB_NC); + + info->dmac_init(ndev); /* Setting the control will start the AVB-DMAC process. */ ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_OPERATION); @@ -531,10 +579,10 @@ static void ravb_rx_csum(struct sk_buff *skb) skb_trim(skb, skb->len - sizeof(__sum16)); } -/* Packet receive function for Ethernet AVB */ -static bool ravb_rx(struct net_device *ndev, int *quota, int q) +static bool ravb_rcar_rx(struct net_device *ndev, int *quota, int q) { struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; int entry = priv->cur_rx[q] % priv->num_rx_ring[q]; int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) - priv->cur_rx[q]; @@ -619,9 +667,7 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q) desc->ds_cc = cpu_to_le16(RX_BUF_SZ); if (!priv->rx_skb[q][entry]) { - skb = netdev_alloc_skb(ndev, - RX_BUF_SZ + - RAVB_ALIGN - 1); + skb = netdev_alloc_skb(ndev, info->max_rx_len); if (!skb) break; /* Better luck next round. */ ravb_set_buffer_align(skb); @@ -647,6 +693,15 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q) return boguscnt <= 0; } +/* Packet receive function for Ethernet AVB */ +static bool ravb_rx(struct net_device *ndev, int *quota, int q) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + + return info->receive(ndev, quota, q); +} + static void ravb_rcv_snd_disable(struct net_device *ndev) { /* Disable TX and RX */ @@ -758,6 +813,7 @@ static void ravb_error_interrupt(struct net_device *ndev) static bool ravb_queue_interrupt(struct net_device *ndev, int q) { struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; u32 ris0 = ravb_read(ndev, RIS0); u32 ric0 = ravb_read(ndev, RIC0); u32 tis = ravb_read(ndev, TIS); @@ -766,7 +822,7 @@ static bool ravb_queue_interrupt(struct net_device *ndev, int q) if (((ris0 & ric0) & BIT(q)) || ((tis & tic) & BIT(q))) { if (napi_schedule_prep(&priv->napi[q])) { /* Mask RX and TX interrupts */ - if (priv->chip_id == RCAR_GEN2) { + if (!info->multi_irqs) { ravb_write(ndev, ric0 & ~BIT(q), RIC0); ravb_write(ndev, tic & ~BIT(q), TIC); } else { @@ -909,6 +965,7 @@ static int ravb_poll(struct napi_struct *napi, int budget) { struct net_device *ndev = napi->dev; struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; unsigned long flags; int q = napi - priv->napi; int mask = BIT(q); @@ -932,7 +989,7 @@ static int ravb_poll(struct napi_struct *napi, int budget) /* Re-enable RX/TX interrupts */ spin_lock_irqsave(&priv->lock, flags); - if (priv->chip_id == RCAR_GEN2) { + if (!info->multi_irqs) { ravb_modify(ndev, RIC0, mask, mask); ravb_modify(ndev, TIC, mask, mask); } else { @@ -956,6 +1013,7 @@ out: static void ravb_adjust_link(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; struct phy_device *phydev = ndev->phydev; bool new_state = false; unsigned long flags; @@ -970,7 +1028,7 @@ static void ravb_adjust_link(struct net_device *ndev) if (phydev->speed != priv->speed) { new_state = true; priv->speed = phydev->speed; - ravb_set_rate(ndev); + info->set_rate(ndev); } if (!priv->link) { ravb_modify(ndev, ECMR, ECMR_TXF, 0); @@ -1133,13 +1191,14 @@ static const char ravb_gstrings_stats[][ETH_GSTRING_LEN] = { "rx_queue_1_over_errors", }; -#define RAVB_STATS_LEN ARRAY_SIZE(ravb_gstrings_stats) - static int ravb_get_sset_count(struct net_device *netdev, int sset) { + struct ravb_private *priv = netdev_priv(netdev); + const struct ravb_hw_info *info = priv->info; + switch (sset) { case ETH_SS_STATS: - return RAVB_STATS_LEN; + return info->stats_len; default: return -EOPNOTSUPP; } @@ -1176,9 +1235,12 @@ static void ravb_get_ethtool_stats(struct net_device *ndev, static void ravb_get_strings(struct net_device *ndev, u32 stringset, u8 *data) { + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + switch (stringset) { case ETH_SS_STATS: - memcpy(data, ravb_gstrings_stats, sizeof(ravb_gstrings_stats)); + memcpy(data, info->gstrings_stats, info->gstrings_size); break; } } @@ -1198,6 +1260,7 @@ static int ravb_set_ringparam(struct net_device *ndev, struct ethtool_ringparam *ring) { struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; int error; if (ring->tx_pending > BE_TX_RING_MAX || @@ -1211,7 +1274,7 @@ static int ravb_set_ringparam(struct net_device *ndev, if (netif_running(ndev)) { netif_device_detach(ndev); /* Stop PTP Clock driver */ - if (priv->chip_id == RCAR_GEN2) + if (info->no_ptp_cfg_active) ravb_ptp_stop(ndev); /* Wait for DMA stopping */ error = ravb_stop_dma(ndev); @@ -1243,7 +1306,7 @@ static int ravb_set_ringparam(struct net_device *ndev, ravb_emac_init(ndev); /* Initialise PTP Clock driver */ - if (priv->chip_id == RCAR_GEN2) + if (info->no_ptp_cfg_active) ravb_ptp_init(ndev, priv->pdev); netif_device_attach(ndev); @@ -1334,6 +1397,7 @@ static inline int ravb_hook_irq(unsigned int irq, irq_handler_t handler, static int ravb_open(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; struct platform_device *pdev = priv->pdev; struct device *dev = &pdev->dev; int error; @@ -1341,7 +1405,7 @@ static int ravb_open(struct net_device *ndev) napi_enable(&priv->napi[RAVB_BE]); napi_enable(&priv->napi[RAVB_NC]); - if (priv->chip_id == RCAR_GEN2) { + if (!info->multi_irqs) { error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED, ndev->name, ndev); if (error) { @@ -1382,7 +1446,7 @@ static int ravb_open(struct net_device *ndev) ravb_emac_init(ndev); /* Initialise PTP Clock driver */ - if (priv->chip_id == RCAR_GEN2) + if (info->no_ptp_cfg_active) ravb_ptp_init(ndev, priv->pdev); netif_tx_start_all_queues(ndev); @@ -1396,10 +1460,10 @@ static int ravb_open(struct net_device *ndev) out_ptp_stop: /* Stop PTP Clock driver */ - if (priv->chip_id == RCAR_GEN2) + if (info->no_ptp_cfg_active) ravb_ptp_stop(ndev); out_free_irq_nc_tx: - if (priv->chip_id == RCAR_GEN2) + if (!info->multi_irqs) goto out_free_irq; free_irq(priv->tx_irqs[RAVB_NC], ndev); out_free_irq_nc_rx: @@ -1437,13 +1501,14 @@ static void ravb_tx_timeout_work(struct work_struct *work) { struct ravb_private *priv = container_of(work, struct ravb_private, work); + const struct ravb_hw_info *info = priv->info; struct net_device *ndev = priv->ndev; int error; netif_tx_stop_all_queues(ndev); /* Stop PTP Clock driver */ - if (priv->chip_id == RCAR_GEN2) + if (info->no_ptp_cfg_active) ravb_ptp_stop(ndev); /* Wait for DMA stopping */ @@ -1478,7 +1543,7 @@ static void ravb_tx_timeout_work(struct work_struct *work) out: /* Initialise PTP Clock driver */ - if (priv->chip_id == RCAR_GEN2) + if (info->no_ptp_cfg_active) ravb_ptp_init(ndev, priv->pdev); netif_tx_start_all_queues(ndev); @@ -1488,7 +1553,7 @@ out: static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); - int num_tx_desc = priv->num_tx_desc; + unsigned int num_tx_desc = priv->num_tx_desc; u16 q = skb_get_queue_mapping(skb); struct ravb_tstamp_skb *ts_skb; struct ravb_tx_desc *desc; @@ -1628,13 +1693,14 @@ static u16 ravb_select_queue(struct net_device *ndev, struct sk_buff *skb, static struct net_device_stats *ravb_get_stats(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; struct net_device_stats *nstats, *stats0, *stats1; nstats = &ndev->stats; stats0 = &priv->stats[RAVB_BE]; stats1 = &priv->stats[RAVB_NC]; - if (priv->chip_id == RCAR_GEN3) { + if (info->tx_counters) { nstats->tx_dropped += ravb_read(ndev, TROCR); ravb_write(ndev, 0, TROCR); /* (write clear) */ } @@ -1675,6 +1741,7 @@ static int ravb_close(struct net_device *ndev) { struct device_node *np = ndev->dev.parent->of_node; struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; struct ravb_tstamp_skb *ts_skb, *ts_skb2; netif_tx_stop_all_queues(ndev); @@ -1685,7 +1752,7 @@ static int ravb_close(struct net_device *ndev) ravb_write(ndev, 0, TIC); /* Stop PTP Clock driver */ - if (priv->chip_id == RCAR_GEN2) + if (info->no_ptp_cfg_active) ravb_ptp_stop(ndev); /* Set the config mode to stop the AVB-DMAC's processes */ @@ -1708,7 +1775,7 @@ static int ravb_close(struct net_device *ndev) of_phy_deregister_fixed_link(np); } - if (priv->chip_id != RCAR_GEN2) { + if (info->multi_irqs) { free_irq(priv->tx_irqs[RAVB_NC], ndev); free_irq(priv->rx_irqs[RAVB_NC], ndev); free_irq(priv->tx_irqs[RAVB_BE], ndev); @@ -1851,8 +1918,8 @@ static void ravb_set_rx_csum(struct net_device *ndev, bool enable) spin_unlock_irqrestore(&priv->lock, flags); } -static int ravb_set_features(struct net_device *ndev, - netdev_features_t features) +static int ravb_set_features_rx_csum(struct net_device *ndev, + netdev_features_t features) { netdev_features_t changed = ndev->features ^ features; @@ -1864,6 +1931,15 @@ static int ravb_set_features(struct net_device *ndev, return 0; } +static int ravb_set_features(struct net_device *ndev, + netdev_features_t features) +{ + struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; + + return info->set_rx_csum_feature(ndev, features); +} + static const struct net_device_ops ravb_netdev_ops = { .ndo_open = ravb_open, .ndo_stop = ravb_close, @@ -1872,7 +1948,7 @@ static const struct net_device_ops ravb_netdev_ops = { .ndo_get_stats = ravb_get_stats, .ndo_set_rx_mode = ravb_set_rx_mode, .ndo_tx_timeout = ravb_tx_timeout, - .ndo_do_ioctl = ravb_do_ioctl, + .ndo_eth_ioctl = ravb_do_ioctl, .ndo_change_mtu = ravb_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, @@ -1924,12 +2000,52 @@ static int ravb_mdio_release(struct ravb_private *priv) return 0; } +static const struct ravb_hw_info ravb_gen3_hw_info = { + .rx_ring_free = ravb_rx_ring_free, + .rx_ring_format = ravb_rx_ring_format, + .alloc_rx_desc = ravb_alloc_rx_desc, + .receive = ravb_rcar_rx, + .set_rate = ravb_set_rate, + .set_rx_csum_feature = ravb_set_features_rx_csum, + .dmac_init = ravb_rcar_dmac_init, + .emac_init = ravb_rcar_emac_init, + .gstrings_stats = ravb_gstrings_stats, + .gstrings_size = sizeof(ravb_gstrings_stats), + .net_hw_features = NETIF_F_RXCSUM, + .net_features = NETIF_F_RXCSUM, + .stats_len = ARRAY_SIZE(ravb_gstrings_stats), + .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1, + .internal_delay = 1, + .tx_counters = 1, + .multi_irqs = 1, + .ptp_cfg_active = 1, +}; + +static const struct ravb_hw_info ravb_gen2_hw_info = { + .rx_ring_free = ravb_rx_ring_free, + .rx_ring_format = ravb_rx_ring_format, + .alloc_rx_desc = ravb_alloc_rx_desc, + .receive = ravb_rcar_rx, + .set_rate = ravb_set_rate, + .set_rx_csum_feature = ravb_set_features_rx_csum, + .dmac_init = ravb_rcar_dmac_init, + .emac_init = ravb_rcar_emac_init, + .gstrings_stats = ravb_gstrings_stats, + .gstrings_size = sizeof(ravb_gstrings_stats), + .net_hw_features = NETIF_F_RXCSUM, + .net_features = NETIF_F_RXCSUM, + .stats_len = ARRAY_SIZE(ravb_gstrings_stats), + .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1, + .aligned_tx = 1, + .no_ptp_cfg_active = 1, +}; + static const struct of_device_id ravb_match_table[] = { - { .compatible = "renesas,etheravb-r8a7790", .data = (void *)RCAR_GEN2 }, - { .compatible = "renesas,etheravb-r8a7794", .data = (void *)RCAR_GEN2 }, - { .compatible = "renesas,etheravb-rcar-gen2", .data = (void *)RCAR_GEN2 }, - { .compatible = "renesas,etheravb-r8a7795", .data = (void *)RCAR_GEN3 }, - { .compatible = "renesas,etheravb-rcar-gen3", .data = (void *)RCAR_GEN3 }, + { .compatible = "renesas,etheravb-r8a7790", .data = &ravb_gen2_hw_info }, + { .compatible = "renesas,etheravb-r8a7794", .data = &ravb_gen2_hw_info }, + { .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info }, + { .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info }, + { .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info }, { } }; MODULE_DEVICE_TABLE(of, ravb_match_table); @@ -1962,8 +2078,9 @@ static int ravb_set_gti(struct net_device *ndev) static void ravb_set_config_mode(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; - if (priv->chip_id == RCAR_GEN2) { + if (info->no_ptp_cfg_active) { ravb_modify(ndev, CCC, CCC_OPC, CCC_OPC_CONFIG); /* Set CSEL value */ ravb_modify(ndev, CCC, CCC_CSEL, CCC_CSEL_HPB); @@ -1973,13 +2090,6 @@ static void ravb_set_config_mode(struct net_device *ndev) } } -static const struct soc_device_attribute ravb_delay_mode_quirk_match[] = { - { .soc_id = "r8a774c0" }, - { .soc_id = "r8a77990" }, - { .soc_id = "r8a77995" }, - { /* sentinel */ } -}; - /* Set tx and rx clock internal delay modes */ static void ravb_parse_delay_mode(struct device_node *np, struct net_device *ndev) { @@ -2010,12 +2120,8 @@ static void ravb_parse_delay_mode(struct device_node *np, struct net_device *nde if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) { - if (!WARN(soc_device_match(ravb_delay_mode_quirk_match), - "phy-mode %s requires TX clock internal delay mode which is not supported by this hardware revision. Please update device tree", - phy_modes(priv->phy_interface))) { - priv->txcidm = 1; - priv->rgmii_override = 1; - } + priv->txcidm = 1; + priv->rgmii_override = 1; } } @@ -2034,8 +2140,9 @@ static void ravb_set_delay_mode(struct net_device *ndev) static int ravb_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; + const struct ravb_hw_info *info; + struct reset_control *rstc; struct ravb_private *priv; - enum ravb_chip_id chip_id; struct net_device *ndev; int error, irq, q; struct resource *res; @@ -2047,20 +2154,26 @@ static int ravb_probe(struct platform_device *pdev) return -EINVAL; } + rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL); + if (IS_ERR(rstc)) + return dev_err_probe(&pdev->dev, PTR_ERR(rstc), + "failed to get cpg reset\n"); + ndev = alloc_etherdev_mqs(sizeof(struct ravb_private), NUM_TX_QUEUE, NUM_RX_QUEUE); if (!ndev) return -ENOMEM; - ndev->features = NETIF_F_RXCSUM; - ndev->hw_features = NETIF_F_RXCSUM; + info = of_device_get_match_data(&pdev->dev); + ndev->features = info->net_features; + ndev->hw_features = info->net_hw_features; + + reset_control_deassert(rstc); pm_runtime_enable(&pdev->dev); pm_runtime_get_sync(&pdev->dev); - chip_id = (enum ravb_chip_id)of_device_get_match_data(&pdev->dev); - - if (chip_id == RCAR_GEN3) + if (info->multi_irqs) irq = platform_get_irq_byname(pdev, "ch22"); else irq = platform_get_irq(pdev, 0); @@ -2073,6 +2186,8 @@ static int ravb_probe(struct platform_device *pdev) SET_NETDEV_DEV(ndev, &pdev->dev); priv = netdev_priv(ndev); + priv->info = info; + priv->rstc = rstc; priv->ndev = ndev; priv->pdev = pdev; priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE; @@ -2099,7 +2214,7 @@ static int ravb_probe(struct platform_device *pdev) priv->avb_link_active_low = of_property_read_bool(np, "renesas,ether-link-active-low"); - if (chip_id == RCAR_GEN3) { + if (info->multi_irqs) { irq = platform_get_irq_byname(pdev, "ch24"); if (irq < 0) { error = irq; @@ -2124,8 +2239,6 @@ static int ravb_probe(struct platform_device *pdev) } } - priv->chip_id = chip_id; - priv->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(priv->clk)) { error = PTR_ERR(priv->clk); @@ -2142,8 +2255,12 @@ static int ravb_probe(struct platform_device *pdev) ndev->max_mtu = 2048 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); ndev->min_mtu = ETH_MIN_MTU; - priv->num_tx_desc = chip_id == RCAR_GEN2 ? - NUM_TX_DESC_GEN2 : NUM_TX_DESC_GEN3; + /* FIXME: R-Car Gen2 has 4byte alignment restriction for tx buffer + * Use two descriptor to handle such situation. First descriptor to + * handle aligned data buffer and second descriptor to handle the + * overflow data because of alignment. + */ + priv->num_tx_desc = info->aligned_tx ? 2 : 1; /* Set function */ ndev->netdev_ops = &ravb_netdev_ops; @@ -2160,7 +2277,7 @@ static int ravb_probe(struct platform_device *pdev) /* Request GTI loading */ ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); - if (priv->chip_id != RCAR_GEN2) { + if (info->internal_delay) { ravb_parse_delay_mode(np, ndev); ravb_set_delay_mode(ndev); } @@ -2184,7 +2301,7 @@ static int ravb_probe(struct platform_device *pdev) INIT_LIST_HEAD(&priv->ts_skb_list); /* Initialise PTP Clock driver */ - if (chip_id != RCAR_GEN2) + if (info->ptp_cfg_active) ravb_ptp_init(ndev, pdev); /* Debug message level */ @@ -2232,7 +2349,7 @@ out_dma_free: priv->desc_bat_dma); /* Stop PTP Clock driver */ - if (chip_id != RCAR_GEN2) + if (info->ptp_cfg_active) ravb_ptp_stop(ndev); out_disable_refclk: clk_disable_unprepare(priv->refclk); @@ -2241,6 +2358,7 @@ out_release: pm_runtime_put(&pdev->dev); pm_runtime_disable(&pdev->dev); + reset_control_assert(rstc); return error; } @@ -2248,9 +2366,10 @@ static int ravb_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; /* Stop PTP Clock driver */ - if (priv->chip_id != RCAR_GEN2) + if (info->ptp_cfg_active) ravb_ptp_stop(ndev); clk_disable_unprepare(priv->refclk); @@ -2265,6 +2384,7 @@ static int ravb_remove(struct platform_device *pdev) netif_napi_del(&priv->napi[RAVB_BE]); ravb_mdio_release(priv); pm_runtime_disable(&pdev->dev); + reset_control_assert(priv->rstc); free_netdev(ndev); platform_set_drvdata(pdev, NULL); @@ -2333,6 +2453,7 @@ static int __maybe_unused ravb_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct ravb_private *priv = netdev_priv(ndev); + const struct ravb_hw_info *info = priv->info; int ret = 0; /* If WoL is enabled set reset mode to rearm the WoL logic */ @@ -2355,7 +2476,7 @@ static int __maybe_unused ravb_resume(struct device *dev) /* Request GTI loading */ ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI); - if (priv->chip_id != RCAR_GEN2) + if (info->internal_delay) ravb_set_delay_mode(ndev); /* Restore descriptor base address table */ diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c index 6984bd5b7da9..c099656dd75b 100644 --- a/drivers/net/ethernet/renesas/ravb_ptp.c +++ b/drivers/net/ethernet/renesas/ravb_ptp.c @@ -179,6 +179,7 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp, { struct ravb_private *priv = container_of(ptp, struct ravb_private, ptp.info); + const struct ravb_hw_info *info = priv->info; struct net_device *ndev = priv->ndev; unsigned long flags; @@ -197,7 +198,7 @@ static int ravb_ptp_extts(struct ptp_clock_info *ptp, priv->ptp.extts[req->index] = on; spin_lock_irqsave(&priv->lock, flags); - if (priv->chip_id == RCAR_GEN2) + if (!info->multi_irqs) ravb_modify(ndev, GIC, GIC_PTCE, on ? GIC_PTCE : 0); else if (on) ravb_write(ndev, GIE_PTCS, GIE); @@ -213,6 +214,7 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp, { struct ravb_private *priv = container_of(ptp, struct ravb_private, ptp.info); + const struct ravb_hw_info *info = priv->info; struct net_device *ndev = priv->ndev; struct ravb_ptp_perout *perout; unsigned long flags; @@ -252,7 +254,7 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp, error = ravb_ptp_update_compare(priv, (u32)start_ns); if (!error) { /* Unmask interrupt */ - if (priv->chip_id == RCAR_GEN2) + if (!info->multi_irqs) ravb_modify(ndev, GIC, GIC_PTME, GIC_PTME); else ravb_write(ndev, GIE_PTMS0, GIE); @@ -264,7 +266,7 @@ static int ravb_ptp_perout(struct ptp_clock_info *ptp, perout->period = 0; /* Mask interrupt */ - if (priv->chip_id == RCAR_GEN2) + if (!info->multi_irqs) ravb_modify(ndev, GIC, GIC_PTME, 0); else ravb_write(ndev, GID_PTMD0, GID); diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index 840478692a37..6c8ba916d1a6 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -3141,7 +3141,7 @@ static const struct net_device_ops sh_eth_netdev_ops = { .ndo_get_stats = sh_eth_get_stats, .ndo_set_rx_mode = sh_eth_set_rx_mode, .ndo_tx_timeout = sh_eth_tx_timeout, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_change_mtu = sh_eth_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, @@ -3157,7 +3157,7 @@ static const struct net_device_ops sh_eth_netdev_ops_tsu = { .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid, .ndo_tx_timeout = sh_eth_tx_timeout, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_change_mtu = sh_eth_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, diff --git a/drivers/net/ethernet/rocker/rocker.h b/drivers/net/ethernet/rocker/rocker.h index 315a6e5c0f59..e75814a4654f 100644 --- a/drivers/net/ethernet/rocker/rocker.h +++ b/drivers/net/ethernet/rocker/rocker.h @@ -119,7 +119,8 @@ struct rocker_world_ops { int (*port_obj_fdb_del)(struct rocker_port *rocker_port, u16 vid, const unsigned char *addr); int (*port_master_linked)(struct rocker_port *rocker_port, - struct net_device *master); + struct net_device *master, + struct netlink_ext_ack *extack); int (*port_master_unlinked)(struct rocker_port *rocker_port, struct net_device *master); int (*port_neigh_update)(struct rocker_port *rocker_port, diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 1f06b92ee5bb..3364b6a56bd1 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -1670,13 +1670,14 @@ rocker_world_port_fdb_del(struct rocker_port *rocker_port, } static int rocker_world_port_master_linked(struct rocker_port *rocker_port, - struct net_device *master) + struct net_device *master, + struct netlink_ext_ack *extack) { struct rocker_world_ops *wops = rocker_port->rocker->wops; if (!wops->port_master_linked) return -EOPNOTSUPP; - return wops->port_master_linked(rocker_port, master); + return wops->port_master_linked(rocker_port, master, extack); } static int rocker_world_port_master_unlinked(struct rocker_port *rocker_port, @@ -3107,6 +3108,7 @@ struct rocker_port *rocker_port_dev_lower_find(struct net_device *dev, static int rocker_netdevice_event(struct notifier_block *unused, unsigned long event, void *ptr) { + struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr); struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct netdev_notifier_changeupper_info *info; struct rocker_port *rocker_port; @@ -3123,7 +3125,8 @@ static int rocker_netdevice_event(struct notifier_block *unused, rocker_port = netdev_priv(dev); if (info->linking) { err = rocker_world_port_master_linked(rocker_port, - info->upper_dev); + info->upper_dev, + extack); if (err) netdev_warn(dev, "failed to reflect master linked (err %d)\n", err); diff --git a/drivers/net/ethernet/rocker/rocker_ofdpa.c b/drivers/net/ethernet/rocker/rocker_ofdpa.c index e33a9d283a4e..3e1ca7a8d029 100644 --- a/drivers/net/ethernet/rocker/rocker_ofdpa.c +++ b/drivers/net/ethernet/rocker/rocker_ofdpa.c @@ -2571,8 +2571,10 @@ static int ofdpa_port_obj_fdb_del(struct rocker_port *rocker_port, } static int ofdpa_port_bridge_join(struct ofdpa_port *ofdpa_port, - struct net_device *bridge) + struct net_device *bridge, + struct netlink_ext_ack *extack) { + struct net_device *dev = ofdpa_port->dev; int err; /* Port is joining bridge, so the internal VLAN for the @@ -2592,13 +2594,21 @@ static int ofdpa_port_bridge_join(struct ofdpa_port *ofdpa_port, ofdpa_port->bridge_dev = bridge; - return ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0); + err = ofdpa_port_vlan_add(ofdpa_port, OFDPA_UNTAGGED_VID, 0); + if (err) + return err; + + return switchdev_bridge_port_offload(dev, dev, NULL, NULL, NULL, + false, extack); } static int ofdpa_port_bridge_leave(struct ofdpa_port *ofdpa_port) { + struct net_device *dev = ofdpa_port->dev; int err; + switchdev_bridge_port_unoffload(dev, NULL, NULL, NULL); + err = ofdpa_port_vlan_del(ofdpa_port, OFDPA_UNTAGGED_VID, 0); if (err) return err; @@ -2637,13 +2647,14 @@ static int ofdpa_port_ovs_changed(struct ofdpa_port *ofdpa_port, } static int ofdpa_port_master_linked(struct rocker_port *rocker_port, - struct net_device *master) + struct net_device *master, + struct netlink_ext_ack *extack) { struct ofdpa_port *ofdpa_port = rocker_port->wpriv; int err = 0; if (netif_is_bridge_master(master)) - err = ofdpa_port_bridge_join(ofdpa_port, master); + err = ofdpa_port_bridge_join(ofdpa_port, master, extack); else if (netif_is_ovs_master(master)) err = ofdpa_port_ovs_changed(ofdpa_port, master); return err; diff --git a/drivers/net/ethernet/samsung/Kconfig b/drivers/net/ethernet/samsung/Kconfig index 0582e110b1c0..2a6c2658d284 100644 --- a/drivers/net/ethernet/samsung/Kconfig +++ b/drivers/net/ethernet/samsung/Kconfig @@ -20,9 +20,9 @@ if NET_VENDOR_SAMSUNG config SXGBE_ETH tristate "Samsung 10G/2.5G/1G SXGBE Ethernet driver" depends on HAS_IOMEM && HAS_DMA + depends on PTP_1588_CLOCK_OPTIONAL select PHYLIB select CRC32 - imply PTP_1588_CLOCK help This is the driver for the SXGBE 10G Ethernet IP block found on Samsung platforms. diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c index 7f8b10c49660..98edb01024f0 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_ethtool.c @@ -274,7 +274,9 @@ static u32 sxgbe_usec2riwt(u32 usec, struct sxgbe_priv_data *priv) } static int sxgbe_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct sxgbe_priv_data *priv = netdev_priv(dev); @@ -285,7 +287,9 @@ static int sxgbe_get_coalesce(struct net_device *dev, } static int sxgbe_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct sxgbe_priv_data *priv = netdev_priv(dev); unsigned int rx_riwt; diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index 090bcd2fb758..6781aa636d58 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -1964,7 +1964,7 @@ static const struct net_device_ops sxgbe_netdev_ops = { .ndo_set_features = sxgbe_set_features, .ndo_set_rx_mode = sxgbe_set_rx_mode, .ndo_tx_timeout = sxgbe_tx_timeout, - .ndo_do_ioctl = sxgbe_ioctl, + .ndo_eth_ioctl = sxgbe_ioctl, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = sxgbe_poll_controller, #endif diff --git a/drivers/net/ethernet/sfc/Kconfig b/drivers/net/ethernet/sfc/Kconfig index 5e37c8313725..97ce64079855 100644 --- a/drivers/net/ethernet/sfc/Kconfig +++ b/drivers/net/ethernet/sfc/Kconfig @@ -19,9 +19,9 @@ if NET_VENDOR_SOLARFLARE config SFC tristate "Solarflare SFC9000/SFC9100/EF100-family support" depends on PCI + depends on PTP_1588_CLOCK_OPTIONAL select MDIO select CRC32 - imply PTP_1588_CLOCK help This driver supports 10/40-gigabit Ethernet cards based on the Solarflare SFC9000-family and SFC9100-family controllers. diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 37fcf2eb0741..a295e2621cf3 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -591,7 +591,7 @@ static const struct net_device_ops efx_netdev_ops = { .ndo_tx_timeout = efx_watchdog, .ndo_start_xmit = efx_hard_start_xmit, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = efx_ioctl, + .ndo_eth_ioctl = efx_ioctl, .ndo_change_mtu = efx_change_mtu, .ndo_set_mac_address = efx_set_mac_address, .ndo_set_rx_mode = efx_set_rx_mode, diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 058d9fe41d99..e002ce21788d 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -97,7 +97,9 @@ static void efx_ethtool_get_regs(struct net_device *net_dev, */ static int efx_ethtool_get_coalesce(struct net_device *net_dev, - struct ethtool_coalesce *coalesce) + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct efx_nic *efx = netdev_priv(net_dev); unsigned int tx_usecs, rx_usecs; @@ -115,7 +117,9 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev, } static int efx_ethtool_set_coalesce(struct net_device *net_dev, - struct ethtool_coalesce *coalesce) + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct efx_nic *efx = netdev_priv(net_dev); struct efx_channel *channel; diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c index 9ec752a43c75..c177ea0f301e 100644 --- a/drivers/net/ethernet/sfc/falcon/efx.c +++ b/drivers/net/ethernet/sfc/falcon/efx.c @@ -2219,7 +2219,7 @@ static const struct net_device_ops ef4_netdev_ops = { .ndo_tx_timeout = ef4_watchdog, .ndo_start_xmit = ef4_hard_start_xmit, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = ef4_ioctl, + .ndo_eth_ioctl = ef4_ioctl, .ndo_change_mtu = ef4_change_mtu, .ndo_set_mac_address = ef4_set_mac_address, .ndo_set_rx_mode = ef4_set_rx_mode, diff --git a/drivers/net/ethernet/sfc/falcon/ethtool.c b/drivers/net/ethernet/sfc/falcon/ethtool.c index a6bae6a234ba..137e8a7aeaa1 100644 --- a/drivers/net/ethernet/sfc/falcon/ethtool.c +++ b/drivers/net/ethernet/sfc/falcon/ethtool.c @@ -577,7 +577,9 @@ static int ef4_ethtool_nway_reset(struct net_device *net_dev) */ static int ef4_ethtool_get_coalesce(struct net_device *net_dev, - struct ethtool_coalesce *coalesce) + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct ef4_nic *efx = netdev_priv(net_dev); unsigned int tx_usecs, rx_usecs; @@ -595,7 +597,9 @@ static int ef4_ethtool_get_coalesce(struct net_device *net_dev, } static int ef4_ethtool_set_coalesce(struct net_device *net_dev, - struct ethtool_coalesce *coalesce) + struct ethtool_coalesce *coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct ef4_nic *efx = netdev_priv(net_dev); struct ef4_channel *channel; diff --git a/drivers/net/ethernet/sgi/ioc3-eth.c b/drivers/net/ethernet/sgi/ioc3-eth.c index 2b29fd4cbdf4..062f7844c496 100644 --- a/drivers/net/ethernet/sgi/ioc3-eth.c +++ b/drivers/net/ethernet/sgi/ioc3-eth.c @@ -820,7 +820,7 @@ static const struct net_device_ops ioc3_netdev_ops = { .ndo_tx_timeout = ioc3_timeout, .ndo_get_stats = ioc3_get_stats, .ndo_set_rx_mode = ioc3_set_multicast_list, - .ndo_do_ioctl = ioc3_ioctl, + .ndo_eth_ioctl = ioc3_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = ioc3_set_mac_address, }; diff --git a/drivers/net/ethernet/sgi/meth.c b/drivers/net/ethernet/sgi/meth.c index 0c396ecd3389..efce834d8ee6 100644 --- a/drivers/net/ethernet/sgi/meth.c +++ b/drivers/net/ethernet/sgi/meth.c @@ -812,7 +812,7 @@ static const struct net_device_ops meth_netdev_ops = { .ndo_open = meth_open, .ndo_stop = meth_release, .ndo_start_xmit = meth_tx, - .ndo_do_ioctl = meth_ioctl, + .ndo_eth_ioctl = meth_ioctl, .ndo_tx_timeout = meth_tx_timeout, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, diff --git a/drivers/net/ethernet/sis/sis190.c b/drivers/net/ethernet/sis/sis190.c index 676b193833c0..3d1a18a01ce5 100644 --- a/drivers/net/ethernet/sis/sis190.c +++ b/drivers/net/ethernet/sis/sis190.c @@ -1841,7 +1841,7 @@ static int sis190_mac_addr(struct net_device *dev, void *p) static const struct net_device_ops sis190_netdev_ops = { .ndo_open = sis190_open, .ndo_stop = sis190_close, - .ndo_do_ioctl = sis190_ioctl, + .ndo_eth_ioctl = sis190_ioctl, .ndo_start_xmit = sis190_start_xmit, .ndo_tx_timeout = sis190_tx_timeout, .ndo_set_rx_mode = sis190_set_rx_mode, diff --git a/drivers/net/ethernet/sis/sis900.c b/drivers/net/ethernet/sis/sis900.c index cff87de9178a..60a0c0e9ded2 100644 --- a/drivers/net/ethernet/sis/sis900.c +++ b/drivers/net/ethernet/sis/sis900.c @@ -404,7 +404,7 @@ static const struct net_device_ops sis900_netdev_ops = { .ndo_set_rx_mode = set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, - .ndo_do_ioctl = mii_ioctl, + .ndo_eth_ioctl = mii_ioctl, .ndo_tx_timeout = sis900_tx_timeout, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = sis900_poll, diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig index c52a38df0e0d..72e42a868346 100644 --- a/drivers/net/ethernet/smsc/Kconfig +++ b/drivers/net/ethernet/smsc/Kconfig @@ -23,6 +23,7 @@ config SMC9194 tristate "SMC 9194 support" depends on ISA select CRC32 + select NETDEV_LEGACY_INIT help This is support for the SMC9xxx based Ethernet cards. Choose this option if you have a DELL laptop with the docking station, or diff --git a/drivers/net/ethernet/smsc/epic100.c b/drivers/net/ethernet/smsc/epic100.c index 51cd7dca91cd..44daf79a8f97 100644 --- a/drivers/net/ethernet/smsc/epic100.c +++ b/drivers/net/ethernet/smsc/epic100.c @@ -312,7 +312,7 @@ static const struct net_device_ops epic_netdev_ops = { .ndo_tx_timeout = epic_tx_timeout, .ndo_get_stats = epic_get_stats, .ndo_set_rx_mode = set_rx_mode, - .ndo_do_ioctl = netdev_ioctl, + .ndo_eth_ioctl = netdev_ioctl, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; diff --git a/drivers/net/ethernet/smsc/smc9194.c b/drivers/net/ethernet/smsc/smc9194.c index bf7c8c8b1350..0ce403fa5f1a 100644 --- a/drivers/net/ethernet/smsc/smc9194.c +++ b/drivers/net/ethernet/smsc/smc9194.c @@ -1508,7 +1508,7 @@ MODULE_PARM_DESC(io, "SMC 99194 I/O base address"); MODULE_PARM_DESC(irq, "SMC 99194 IRQ number"); MODULE_PARM_DESC(ifport, "SMC 99194 interface port (0-default, 1-TP, 2-AUI)"); -int __init init_module(void) +static int __init smc_init_module(void) { if (io == 0) printk(KERN_WARNING @@ -1518,13 +1518,15 @@ int __init init_module(void) devSMC9194 = smc_init(-1); return PTR_ERR_OR_ZERO(devSMC9194); } +module_init(smc_init_module); -void __exit cleanup_module(void) +static void __exit smc_cleanup_module(void) { unregister_netdev(devSMC9194); free_irq(devSMC9194->irq, devSMC9194); release_region(devSMC9194->base_addr, SMC_IO_EXTENT); free_netdev(devSMC9194); } +module_exit(smc_cleanup_module); #endif /* MODULE */ diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c index f2a50eb3c1e0..42fc37c7887a 100644 --- a/drivers/net/ethernet/smsc/smc91c92_cs.c +++ b/drivers/net/ethernet/smsc/smc91c92_cs.c @@ -294,7 +294,7 @@ static const struct net_device_ops smc_netdev_ops = { .ndo_tx_timeout = smc_tx_timeout, .ndo_set_config = s9k_config, .ndo_set_rx_mode = set_rx_mode, - .ndo_do_ioctl = smc_ioctl, + .ndo_eth_ioctl = smc_ioctl, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c index 556a9790cdcf..199a97339280 100644 --- a/drivers/net/ethernet/smsc/smsc911x.c +++ b/drivers/net/ethernet/smsc/smsc911x.c @@ -2148,7 +2148,7 @@ static const struct net_device_ops smsc911x_netdev_ops = { .ndo_start_xmit = smsc911x_hard_start_xmit, .ndo_get_stats = smsc911x_get_stats, .ndo_set_rx_mode = smsc911x_set_multicast_list, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = smsc911x_set_mac_address, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c index c1dab009415d..fdbd2a43e267 100644 --- a/drivers/net/ethernet/smsc/smsc9420.c +++ b/drivers/net/ethernet/smsc/smsc9420.c @@ -1482,7 +1482,7 @@ static const struct net_device_ops smsc9420_netdev_ops = { .ndo_start_xmit = smsc9420_hard_start_xmit, .ndo_get_stats = smsc9420_get_stats, .ndo_set_rx_mode = smsc9420_set_multicast_list, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index 20d148c019d8..1f46af136aa8 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -532,7 +532,9 @@ static void netsec_et_get_drvinfo(struct net_device *net_device, } static int netsec_et_get_coalesce(struct net_device *net_device, - struct ethtool_coalesce *et_coalesce) + struct ethtool_coalesce *et_coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct netsec_priv *priv = netdev_priv(net_device); @@ -542,7 +544,9 @@ static int netsec_et_get_coalesce(struct net_device *net_device, } static int netsec_et_set_coalesce(struct net_device *net_device, - struct ethtool_coalesce *et_coalesce) + struct ethtool_coalesce *et_coalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct netsec_priv *priv = netdev_priv(net_device); @@ -1544,7 +1548,7 @@ static int netsec_start_gmac(struct netsec_priv *priv) netsec_write(priv, NETSEC_REG_NRM_RX_INTEN_CLR, ~0); netsec_write(priv, NETSEC_REG_NRM_TX_INTEN_CLR, ~0); - netsec_et_set_coalesce(priv->ndev, &priv->et_coalesce); + netsec_et_set_coalesce(priv->ndev, &priv->et_coalesce, NULL, NULL); if (netsec_mac_write(priv, GMAC_REG_OMR, value)) return -ETIMEDOUT; @@ -1831,7 +1835,7 @@ static const struct net_device_ops netsec_netdev_ops = { .ndo_set_features = netsec_netdev_set_features, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = phy_do_ioctl, + .ndo_eth_ioctl = phy_do_ioctl, .ndo_xdp_xmit = netsec_xdp_xmit, .ndo_bpf = netsec_xdp, }; diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index 5eb6bb4f7b6c..ae31ed93aaf0 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -1543,7 +1543,7 @@ static const struct net_device_ops ave_netdev_ops = { .ndo_open = ave_open, .ndo_stop = ave_stop, .ndo_start_xmit = ave_start_xmit, - .ndo_do_ioctl = ave_ioctl, + .ndo_eth_ioctl = ave_ioctl, .ndo_set_rx_mode = ave_set_rx_mode, .ndo_get_stats64 = ave_get_stats64, .ndo_set_mac_address = ave_set_mac_address, diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index ac3c248d4f9b..929cfc22cd0c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -2,12 +2,12 @@ config STMMAC_ETH tristate "STMicroelectronics Multi-Gigabit Ethernet driver" depends on HAS_IOMEM && HAS_DMA + depends on PTP_1588_CLOCK_OPTIONAL select MII select PCS_XPCS select PAGE_POOL select PHYLINK select CRC32 - imply PTP_1588_CLOCK select RESET_CONTROLLER help This is the driver for the Ethernet IPs built around a diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 5fecc83f175b..b6d945ea903d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -58,6 +58,16 @@ #undef FRAME_FILTER_DEBUG /* #define FRAME_FILTER_DEBUG */ +struct stmmac_txq_stats { + unsigned long tx_pkt_n; + unsigned long tx_normal_irq_n; +}; + +struct stmmac_rxq_stats { + unsigned long rx_pkt_n; + unsigned long rx_normal_irq_n; +}; + /* Extra statistic and debug information exposed by ethtool */ struct stmmac_extra_stats { /* Transmit errors */ @@ -189,6 +199,9 @@ struct stmmac_extra_stats { unsigned long mtl_est_hlbf; unsigned long mtl_est_btre; unsigned long mtl_est_btrlm; + /* per queue statistics */ + struct stmmac_txq_stats txq_stats[MTL_MAX_TX_QUEUES]; + struct stmmac_rxq_stats rxq_stats[MTL_MAX_RX_QUEUES]; }; /* Safety Feature statistics exposed by ethtool */ diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c index 28dd0ed85a82..f7dc8458cde8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c @@ -289,10 +289,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) val &= ~NSS_COMMON_GMAC_CTL_PHY_IFACE_SEL; break; default: - dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n", - phy_modes(gmac->phy_mode)); - err = -EINVAL; - goto err_remove_config_dt; + goto err_unsupported_phy; } regmap_write(gmac->nss_common, NSS_COMMON_GMAC_CTL(gmac->id), val); @@ -309,10 +306,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id); break; default: - dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n", - phy_modes(gmac->phy_mode)); - err = -EINVAL; - goto err_remove_config_dt; + goto err_unsupported_phy; } regmap_write(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, val); @@ -329,8 +323,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) NSS_COMMON_CLK_GATE_GMII_TX_EN(gmac->id); break; default: - /* We don't get here; the switch above will have errored out */ - unreachable(); + goto err_unsupported_phy; } regmap_write(gmac->nss_common, NSS_COMMON_CLK_GATE, val); @@ -361,6 +354,11 @@ static int ipq806x_gmac_probe(struct platform_device *pdev) return 0; +err_unsupported_phy: + dev_err(&pdev->dev, "Unsupported PHY mode: \"%s\"\n", + phy_modes(gmac->phy_mode)); + err = -EINVAL; + err_remove_config_dt: stmmac_remove_config_dt(pdev, plat_dat); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index 280ac0129572..ed817011a94a 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -21,7 +21,6 @@ #include <linux/delay.h> #include <linux/mfd/syscon.h> #include <linux/regmap.h> -#include <linux/pm_runtime.h> #include "stmmac_platform.h" @@ -1529,9 +1528,6 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv) return ret; } - pm_runtime_enable(dev); - pm_runtime_get_sync(dev); - if (bsp_priv->integrated_phy) rk_gmac_integrated_phy_powerup(bsp_priv); @@ -1540,14 +1536,9 @@ static int rk_gmac_powerup(struct rk_priv_data *bsp_priv) static void rk_gmac_powerdown(struct rk_priv_data *gmac) { - struct device *dev = &gmac->pdev->dev; - if (gmac->integrated_phy) rk_gmac_integrated_phy_powerdown(gmac); - pm_runtime_put_sync(dev); - pm_runtime_disable(dev); - phy_power_on(gmac, false); gmac_clk_enable(gmac, false); } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c index e63270267578..9292a1fab7d3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c @@ -170,13 +170,16 @@ int dwmac4_dma_interrupt(void __iomem *ioaddr, x->normal_irq_n++; if (likely(intr_status & DMA_CHAN_STATUS_RI)) { x->rx_normal_irq_n++; + x->rxq_stats[chan].rx_normal_irq_n++; ret |= handle_rx; } - if (likely(intr_status & (DMA_CHAN_STATUS_TI | - DMA_CHAN_STATUS_TBU))) { + if (likely(intr_status & DMA_CHAN_STATUS_TI)) { x->tx_normal_irq_n++; + x->txq_stats[chan].tx_normal_irq_n++; ret |= handle_tx; } + if (unlikely(intr_status & DMA_CHAN_STATUS_TBU)) + ret |= handle_tx; if (unlikely(intr_status & DMA_CHAN_STATUS_ERI)) x->rx_early_irq++; diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h index fcdb1d20389b..43eead726886 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h @@ -339,9 +339,9 @@ static inline bool stmmac_xdp_is_enabled(struct stmmac_priv *priv) static inline unsigned int stmmac_rx_offset(struct stmmac_priv *priv) { if (stmmac_xdp_is_enabled(priv)) - return XDP_PACKET_HEADROOM + NET_IP_ALIGN; + return XDP_PACKET_HEADROOM; - return NET_SKB_PAD + NET_IP_ALIGN; + return 0; } void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index d0ce608b81c3..d89455803bed 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -261,6 +261,18 @@ static const struct stmmac_stats stmmac_mmc[] = { }; #define STMMAC_MMC_STATS_LEN ARRAY_SIZE(stmmac_mmc) +static const char stmmac_qstats_tx_string[][ETH_GSTRING_LEN] = { + "tx_pkt_n", + "tx_irq_n", +#define STMMAC_TXQ_STATS ARRAY_SIZE(stmmac_qstats_tx_string) +}; + +static const char stmmac_qstats_rx_string[][ETH_GSTRING_LEN] = { + "rx_pkt_n", + "rx_irq_n", +#define STMMAC_RXQ_STATS ARRAY_SIZE(stmmac_qstats_rx_string) +}; + static void stmmac_ethtool_getdrvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { @@ -510,6 +522,31 @@ stmmac_set_pauseparam(struct net_device *netdev, } } +static void stmmac_get_per_qstats(struct stmmac_priv *priv, u64 *data) +{ + u32 tx_cnt = priv->plat->tx_queues_to_use; + u32 rx_cnt = priv->plat->rx_queues_to_use; + int q, stat; + char *p; + + for (q = 0; q < tx_cnt; q++) { + p = (char *)priv + offsetof(struct stmmac_priv, + xstats.txq_stats[q].tx_pkt_n); + for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) { + *data++ = (*(u64 *)p); + p += sizeof(u64 *); + } + } + for (q = 0; q < rx_cnt; q++) { + p = (char *)priv + offsetof(struct stmmac_priv, + xstats.rxq_stats[q].rx_pkt_n); + for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) { + *data++ = (*(u64 *)p); + p += sizeof(u64 *); + } + } +} + static void stmmac_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *dummy, u64 *data) { @@ -560,16 +597,21 @@ static void stmmac_get_ethtool_stats(struct net_device *dev, data[j++] = (stmmac_gstrings_stats[i].sizeof_stat == sizeof(u64)) ? (*(u64 *)p) : (*(u32 *)p); } + stmmac_get_per_qstats(priv, &data[j]); } static int stmmac_get_sset_count(struct net_device *netdev, int sset) { struct stmmac_priv *priv = netdev_priv(netdev); + u32 tx_cnt = priv->plat->tx_queues_to_use; + u32 rx_cnt = priv->plat->rx_queues_to_use; int i, len, safety_len = 0; switch (sset) { case ETH_SS_STATS: - len = STMMAC_STATS_LEN; + len = STMMAC_STATS_LEN + + STMMAC_TXQ_STATS * tx_cnt + + STMMAC_RXQ_STATS * rx_cnt; if (priv->dma_cap.rmon) len += STMMAC_MMC_STATS_LEN; @@ -592,6 +634,28 @@ static int stmmac_get_sset_count(struct net_device *netdev, int sset) } } +static void stmmac_get_qstats_string(struct stmmac_priv *priv, u8 *data) +{ + u32 tx_cnt = priv->plat->tx_queues_to_use; + u32 rx_cnt = priv->plat->rx_queues_to_use; + int q, stat; + + for (q = 0; q < tx_cnt; q++) { + for (stat = 0; stat < STMMAC_TXQ_STATS; stat++) { + snprintf(data, ETH_GSTRING_LEN, "q%d_%s", q, + stmmac_qstats_tx_string[stat]); + data += ETH_GSTRING_LEN; + } + } + for (q = 0; q < rx_cnt; q++) { + for (stat = 0; stat < STMMAC_RXQ_STATS; stat++) { + snprintf(data, ETH_GSTRING_LEN, "q%d_%s", q, + stmmac_qstats_rx_string[stat]); + data += ETH_GSTRING_LEN; + } + } +} + static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data) { int i; @@ -622,6 +686,7 @@ static void stmmac_get_strings(struct net_device *dev, u32 stringset, u8 *data) ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } + stmmac_get_qstats_string(priv, p); break; case ETH_SS_TEST: stmmac_selftest_get_strings(priv, p); @@ -809,7 +874,9 @@ static int __stmmac_get_coalesce(struct net_device *dev, } static int stmmac_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { return __stmmac_get_coalesce(dev, ec, -1); } @@ -893,7 +960,9 @@ static int __stmmac_set_coalesce(struct net_device *dev, } static int stmmac_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *ec) + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { return __stmmac_set_coalesce(dev, ec, -1); } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 7b8404a21544..ed0cd3920171 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -2500,6 +2500,7 @@ static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue) } else { priv->dev->stats.tx_packets++; priv->xstats.tx_pkt_n++; + priv->xstats.txq_stats[queue].tx_pkt_n++; } if (skb) stmmac_get_tx_hwtstamp(priv, p, skb); @@ -4914,6 +4915,10 @@ read_again: prefetch(np); + /* Ensure a valid XSK buffer before proceed */ + if (!buf->xdp) + break; + if (priv->extend_desc) stmmac_rx_extended_status(priv, &priv->dev->stats, &priv->xstats, @@ -4934,10 +4939,6 @@ read_again: continue; } - /* Ensure a valid XSK buffer before proceed */ - if (!buf->xdp) - break; - /* XSK pool expects RX frame 1:1 mapped to XSK buffer */ if (likely(status & rx_not_ls)) { xsk_buff_free(buf->xdp); @@ -5000,6 +5001,9 @@ read_again: stmmac_finalize_xdp_rx(priv, xdp_status); + priv->xstats.rx_pkt_n += count; + priv->xstats.rxq_stats[queue].rx_pkt_n += count; + if (xsk_uses_need_wakeup(rx_q->xsk_pool)) { if (failure || stmmac_rx_dirty(priv, queue) > 0) xsk_set_rx_need_wakeup(rx_q->xsk_pool); @@ -5287,6 +5291,7 @@ drain_data: stmmac_rx_refill(priv, queue); priv->xstats.rx_pkt_n += count; + priv->xstats.rxq_stats[queue].rx_pkt_n += count; return count; } @@ -6451,7 +6456,7 @@ static const struct net_device_ops stmmac_netdev_ops = { .ndo_set_features = stmmac_set_features, .ndo_set_rx_mode = stmmac_set_rx_mode, .ndo_tx_timeout = stmmac_tx_timeout, - .ndo_do_ioctl = stmmac_ioctl, + .ndo_eth_ioctl = stmmac_ioctl, .ndo_setup_tc = stmmac_setup_tc, .ndo_select_queue = stmmac_select_queue, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c index 4f3b6437b114..8160087ee92f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c @@ -884,11 +884,13 @@ static int tc_setup_taprio(struct stmmac_priv *priv, return 0; disable: - mutex_lock(&priv->plat->est->lock); - priv->plat->est->enable = false; - stmmac_est_configure(priv, priv->ioaddr, priv->plat->est, - priv->plat->clk_ptp_rate); - mutex_unlock(&priv->plat->est->lock); + if (priv->plat->est) { + mutex_lock(&priv->plat->est->lock); + priv->plat->est->enable = false; + stmmac_est_configure(priv, priv->ioaddr, priv->plat->est, + priv->plat->clk_ptp_rate); + mutex_unlock(&priv->plat->est->lock); + } priv->plat->fpe_cfg->enable = false; stmmac_fpe_configure(priv, priv->ioaddr, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c index 105821b53020..2a616c6f7cd0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_xdp.c @@ -34,18 +34,18 @@ static int stmmac_xdp_enable_pool(struct stmmac_priv *priv, need_update = netif_running(priv->dev) && stmmac_xdp_is_enabled(priv); if (need_update) { - stmmac_disable_rx_queue(priv, queue); - stmmac_disable_tx_queue(priv, queue); napi_disable(&ch->rx_napi); napi_disable(&ch->tx_napi); + stmmac_disable_rx_queue(priv, queue); + stmmac_disable_tx_queue(priv, queue); } set_bit(queue, priv->af_xdp_zc_qps); if (need_update) { - napi_enable(&ch->rxtx_napi); stmmac_enable_rx_queue(priv, queue); stmmac_enable_tx_queue(priv, queue); + napi_enable(&ch->rxtx_napi); err = stmmac_xsk_wakeup(priv->dev, queue, XDP_WAKEUP_RX); if (err) @@ -72,10 +72,10 @@ static int stmmac_xdp_disable_pool(struct stmmac_priv *priv, u16 queue) need_update = netif_running(priv->dev) && stmmac_xdp_is_enabled(priv); if (need_update) { + napi_disable(&ch->rxtx_napi); stmmac_disable_rx_queue(priv, queue); stmmac_disable_tx_queue(priv, queue); synchronize_rcu(); - napi_disable(&ch->rxtx_napi); } xsk_pool_dma_unmap(pool, STMMAC_RX_DMA_ATTR); @@ -83,10 +83,10 @@ static int stmmac_xdp_disable_pool(struct stmmac_priv *priv, u16 queue) clear_bit(queue, priv->af_xdp_zc_qps); if (need_update) { - napi_enable(&ch->rx_napi); - napi_enable(&ch->tx_napi); stmmac_enable_rx_queue(priv, queue); stmmac_enable_tx_queue(priv, queue); + napi_enable(&ch->rx_napi); + napi_enable(&ch->tx_napi); } return 0; diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c index 981685c88308..287ae4c538aa 100644 --- a/drivers/net/ethernet/sun/cassini.c +++ b/drivers/net/ethernet/sun/cassini.c @@ -4876,7 +4876,7 @@ static const struct net_device_ops cas_netdev_ops = { .ndo_start_xmit = cas_start_xmit, .ndo_get_stats = cas_get_stats, .ndo_set_rx_mode = cas_set_multicast, - .ndo_do_ioctl = cas_ioctl, + .ndo_eth_ioctl = cas_ioctl, .ndo_tx_timeout = cas_tx_timeout, .ndo_change_mtu = cas_change_mtu, .ndo_set_mac_address = eth_mac_addr, diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index 860644d182ab..a68a01d1b2b1 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -9208,7 +9208,7 @@ static int niu_get_of_props(struct niu *np) else dp = pci_device_to_OF_node(np->pdev); - phy_type = of_get_property(dp, "phy-type", &prop_len); + phy_type = of_get_property(dp, "phy-type", NULL); if (!phy_type) { netdev_err(dev, "%pOF: OF node lacks phy-type property\n", dp); return -EINVAL; @@ -9242,12 +9242,12 @@ static int niu_get_of_props(struct niu *np) return -EINVAL; } - model = of_get_property(dp, "model", &prop_len); + model = of_get_property(dp, "model", NULL); if (model) strcpy(np->vpd.model, model); - if (of_find_property(dp, "hot-swappable-phy", &prop_len)) { + if (of_find_property(dp, "hot-swappable-phy", NULL)) { np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | NIU_FLAGS_HOTPLUG_PHY); } @@ -9668,7 +9668,7 @@ static const struct net_device_ops niu_netdev_ops = { .ndo_set_rx_mode = niu_set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = niu_set_mac_addr, - .ndo_do_ioctl = niu_ioctl, + .ndo_eth_ioctl = niu_ioctl, .ndo_tx_timeout = niu_tx_timeout, .ndo_change_mtu = niu_change_mtu, }; @@ -9722,7 +9722,6 @@ static int niu_pci_init_one(struct pci_dev *pdev, struct net_device *dev; struct niu *np; int err; - u64 dma_mask; niu_driver_version(); @@ -9777,18 +9776,11 @@ static int niu_pci_init_one(struct pci_dev *pdev, PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_RELAX_EN); - dma_mask = DMA_BIT_MASK(44); - err = pci_set_dma_mask(pdev, dma_mask); - if (!err) { + err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44)); + if (!err) dev->features |= NETIF_F_HIGHDMA; - err = pci_set_consistent_dma_mask(pdev, dma_mask); - if (err) { - dev_err(&pdev->dev, "Unable to obtain 44 bit DMA for consistent allocations, aborting\n"); - goto err_out_release_parent; - } - } if (err) { - err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "No usable DMA configuration, aborting\n"); goto err_out_release_parent; diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c index cfb9e21b18b7..d72018a60c0f 100644 --- a/drivers/net/ethernet/sun/sungem.c +++ b/drivers/net/ethernet/sun/sungem.c @@ -2831,7 +2831,7 @@ static const struct net_device_ops gem_netdev_ops = { .ndo_start_xmit = gem_start_xmit, .ndo_get_stats = gem_get_stats, .ndo_set_rx_mode = gem_set_multicast, - .ndo_do_ioctl = gem_ioctl, + .ndo_eth_ioctl = gem_ioctl, .ndo_tx_timeout = gem_tx_timeout, .ndo_change_mtu = gem_change_mtu, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index a2c1a404c52d..62f81b0d14ed 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -251,14 +251,6 @@ static u32 pci_hme_read_desc32(hme32 *p) ((__hp)->write_txd((__txd), (__flags), (__addr))) #define hme_read_desc32(__hp, __p) \ ((__hp)->read_desc32(__p)) -#define hme_dma_map(__hp, __ptr, __size, __dir) \ - ((__hp)->dma_map((__hp)->dma_dev, (__ptr), (__size), (__dir))) -#define hme_dma_unmap(__hp, __addr, __size, __dir) \ - ((__hp)->dma_unmap((__hp)->dma_dev, (__addr), (__size), (__dir))) -#define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \ - ((__hp)->dma_sync_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir))) -#define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \ - ((__hp)->dma_sync_for_device((__hp)->dma_dev, (__addr), (__size), (__dir))) #else #ifdef CONFIG_SBUS /* SBUS only compilation */ @@ -277,14 +269,6 @@ do { (__txd)->tx_addr = (__force hme32)(u32)(__addr); \ (__txd)->tx_flags = (__force hme32)(u32)(__flags); \ } while(0) #define hme_read_desc32(__hp, __p) ((__force u32)(hme32)*(__p)) -#define hme_dma_map(__hp, __ptr, __size, __dir) \ - dma_map_single((__hp)->dma_dev, (__ptr), (__size), (__dir)) -#define hme_dma_unmap(__hp, __addr, __size, __dir) \ - dma_unmap_single((__hp)->dma_dev, (__addr), (__size), (__dir)) -#define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \ - dma_dma_sync_single_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir)) -#define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \ - dma_dma_sync_single_for_device((__hp)->dma_dev, (__addr), (__size), (__dir)) #else /* PCI only compilation */ #define hme_write32(__hp, __reg, __val) \ @@ -305,14 +289,6 @@ static inline u32 hme_read_desc32(struct happy_meal *hp, hme32 *p) { return le32_to_cpup((__le32 *)p); } -#define hme_dma_map(__hp, __ptr, __size, __dir) \ - pci_map_single((__hp)->dma_dev, (__ptr), (__size), (__dir)) -#define hme_dma_unmap(__hp, __addr, __size, __dir) \ - pci_unmap_single((__hp)->dma_dev, (__addr), (__size), (__dir)) -#define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \ - pci_dma_sync_single_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir)) -#define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \ - pci_dma_sync_single_for_device((__hp)->dma_dev, (__addr), (__size), (__dir)) #endif #endif diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c index bc198eadfcab..49f8c6be9459 100644 --- a/drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-ethtool.c @@ -146,8 +146,11 @@ static void xlgmac_ethtool_get_channels(struct net_device *netdev, channel->tx_count = pdata->tx_q_count; } -static int xlgmac_ethtool_get_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) +static int +xlgmac_ethtool_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct xlgmac_pdata *pdata = netdev_priv(netdev); @@ -158,8 +161,11 @@ static int xlgmac_ethtool_get_coalesce(struct net_device *netdev, return 0; } -static int xlgmac_ethtool_set_coalesce(struct net_device *netdev, - struct ethtool_coalesce *ec) +static int +xlgmac_ethtool_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct xlgmac_pdata *pdata = netdev_priv(netdev); struct xlgmac_hw_ops *hw_ops = &pdata->hw_ops; diff --git a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c index 26d178f8616b..1db7104fef3a 100644 --- a/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c +++ b/drivers/net/ethernet/synopsys/dwc-xlgmac-net.c @@ -933,7 +933,7 @@ static const struct net_device_ops xlgmac_netdev_ops = { .ndo_change_mtu = xlgmac_change_mtu, .ndo_set_mac_address = xlgmac_set_mac_address, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = xlgmac_ioctl, + .ndo_eth_ioctl = xlgmac_ioctl, .ndo_vlan_rx_add_vid = xlgmac_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = xlgmac_vlan_rx_kill_vid, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c index d054c6e83b1c..6b409f9c5863 100644 --- a/drivers/net/ethernet/tehuti/tehuti.c +++ b/drivers/net/ethernet/tehuti/tehuti.c @@ -637,7 +637,8 @@ static int bdx_range_check(struct bdx_priv *priv, u32 offset) -EINVAL : 0; } -static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd) +static int bdx_siocdevprivate(struct net_device *ndev, struct ifreq *ifr, + void __user *udata, int cmd) { struct bdx_priv *priv = netdev_priv(ndev); u32 data[3]; @@ -647,7 +648,7 @@ static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd) DBG("jiffies=%ld cmd=%d\n", jiffies, cmd); if (cmd != SIOCDEVPRIVATE) { - error = copy_from_user(data, ifr->ifr_data, sizeof(data)); + error = copy_from_user(data, udata, sizeof(data)); if (error) { pr_err("can't copy from user\n"); RET(-EFAULT); @@ -669,7 +670,7 @@ static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd) data[2] = READ_REG(priv, data[1]); DBG("read_reg(0x%x)=0x%x (dec %d)\n", data[1], data[2], data[2]); - error = copy_to_user(ifr->ifr_data, data, sizeof(data)); + error = copy_to_user(udata, data, sizeof(data)); if (error) RET(-EFAULT); break; @@ -688,15 +689,6 @@ static int bdx_ioctl_priv(struct net_device *ndev, struct ifreq *ifr, int cmd) return 0; } -static int bdx_ioctl(struct net_device *ndev, struct ifreq *ifr, int cmd) -{ - ENTER; - if (cmd >= SIOCDEVPRIVATE && cmd <= (SIOCDEVPRIVATE + 15)) - RET(bdx_ioctl_priv(ndev, ifr, cmd)); - else - RET(-EOPNOTSUPP); -} - /** * __bdx_vlan_rx_vid - private helper for adding/killing VLAN vid * @ndev: network device @@ -1860,7 +1852,7 @@ static const struct net_device_ops bdx_netdev_ops = { .ndo_stop = bdx_close, .ndo_start_xmit = bdx_tx_transmit, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = bdx_ioctl, + .ndo_siocdevprivate = bdx_siocdevprivate, .ndo_set_rx_mode = bdx_setmulti, .ndo_change_mtu = bdx_change_mtu, .ndo_set_mac_address = bdx_set_mac, @@ -2159,8 +2151,10 @@ bdx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) * @netdev * @ecoal */ -static int -bdx_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal) +static int bdx_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ecoal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { u32 rdintcm; u32 tdintcm; @@ -2188,8 +2182,10 @@ bdx_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal) * @netdev * @ecoal */ -static int -bdx_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecoal) +static int bdx_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ecoal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { u32 rdintcm; u32 tdintcm; diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 67a08cbba859..130346f74ee8 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -27,6 +27,7 @@ #include <linux/sys_soc.h> #include <linux/dma/ti-cppi5.h> #include <linux/dma/k3-udma-glue.h> +#include <net/switchdev.h> #include "cpsw_ale.h" #include "cpsw_sl.h" @@ -518,6 +519,10 @@ static int am65_cpsw_nuss_common_open(struct am65_cpsw_common *common, } napi_enable(&common->napi_rx); + if (common->rx_irq_disabled) { + common->rx_irq_disabled = false; + enable_irq(common->rx_chns.irq); + } dev_dbg(common->dev, "cpsw_nuss started\n"); return 0; @@ -871,8 +876,12 @@ static int am65_cpsw_nuss_rx_poll(struct napi_struct *napi_rx, int budget) dev_dbg(common->dev, "%s num_rx:%d %d\n", __func__, num_rx, budget); - if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) - enable_irq(common->rx_chns.irq); + if (num_rx < budget && napi_complete_done(napi_rx, num_rx)) { + if (common->rx_irq_disabled) { + common->rx_irq_disabled = false; + enable_irq(common->rx_chns.irq); + } + } return num_rx; } @@ -1077,19 +1086,20 @@ static int am65_cpsw_nuss_tx_poll(struct napi_struct *napi_tx, int budget) else num_tx = am65_cpsw_nuss_tx_compl_packets(tx_chn->common, tx_chn->id, budget); - num_tx = min(num_tx, budget); - if (num_tx < budget) { - napi_complete(napi_tx); + if (num_tx >= budget) + return budget; + + if (napi_complete_done(napi_tx, num_tx)) enable_irq(tx_chn->irq); - } - return num_tx; + return 0; } static irqreturn_t am65_cpsw_nuss_rx_irq(int irq, void *dev_id) { struct am65_cpsw_common *common = dev_id; + common->rx_irq_disabled = true; disable_irq_nosync(irq); napi_schedule(&common->napi_rx); @@ -1479,7 +1489,7 @@ static const struct net_device_ops am65_cpsw_nuss_netdev_ops = { .ndo_tx_timeout = am65_cpsw_nuss_ndo_host_tx_timeout, .ndo_vlan_rx_add_vid = am65_cpsw_nuss_ndo_slave_add_vid, .ndo_vlan_rx_kill_vid = am65_cpsw_nuss_ndo_slave_kill_vid, - .ndo_do_ioctl = am65_cpsw_nuss_ndo_slave_ioctl, + .ndo_eth_ioctl = am65_cpsw_nuss_ndo_slave_ioctl, .ndo_setup_tc = am65_cpsw_qos_ndo_setup_tc, .ndo_get_devlink_port = am65_cpsw_ndo_get_devlink_port, }; @@ -2081,10 +2091,13 @@ bool am65_cpsw_port_dev_check(const struct net_device *ndev) return false; } -static int am65_cpsw_netdevice_port_link(struct net_device *ndev, struct net_device *br_ndev) +static int am65_cpsw_netdevice_port_link(struct net_device *ndev, + struct net_device *br_ndev, + struct netlink_ext_ack *extack) { struct am65_cpsw_common *common = am65_ndev_to_common(ndev); struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev); + int err; if (!common->br_members) { common->hw_bridge_dev = br_ndev; @@ -2096,6 +2109,11 @@ static int am65_cpsw_netdevice_port_link(struct net_device *ndev, struct net_dev return -EOPNOTSUPP; } + err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL, + false, extack); + if (err) + return err; + common->br_members |= BIT(priv->port->port_id); am65_cpsw_port_offload_fwd_mark_update(common); @@ -2108,6 +2126,8 @@ static void am65_cpsw_netdevice_port_unlink(struct net_device *ndev) struct am65_cpsw_common *common = am65_ndev_to_common(ndev); struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev); + switchdev_bridge_port_unoffload(ndev, NULL, NULL, NULL); + common->br_members &= ~BIT(priv->port->port_id); am65_cpsw_port_offload_fwd_mark_update(common); @@ -2120,6 +2140,7 @@ static void am65_cpsw_netdevice_port_unlink(struct net_device *ndev) static int am65_cpsw_netdevice_event(struct notifier_block *unused, unsigned long event, void *ptr) { + struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr); struct net_device *ndev = netdev_notifier_info_to_dev(ptr); struct netdev_notifier_changeupper_info *info; int ret = NOTIFY_DONE; @@ -2133,7 +2154,9 @@ static int am65_cpsw_netdevice_event(struct notifier_block *unused, if (netif_is_bridge_master(info->upper_dev)) { if (info->linking) - ret = am65_cpsw_netdevice_port_link(ndev, info->upper_dev); + ret = am65_cpsw_netdevice_port_link(ndev, + info->upper_dev, + extack); else am65_cpsw_netdevice_port_unlink(ndev); } @@ -2388,21 +2411,6 @@ static const struct devlink_param am65_cpsw_devlink_params[] = { am65_cpsw_dl_switch_mode_set, NULL), }; -static void am65_cpsw_unregister_devlink_ports(struct am65_cpsw_common *common) -{ - struct devlink_port *dl_port; - struct am65_cpsw_port *port; - int i; - - for (i = 1; i <= common->port_num; i++) { - port = am65_common_get_port(common, i); - dl_port = &port->devlink_port; - - if (dl_port->registered) - devlink_port_unregister(dl_port); - } -} - static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common) { struct devlink_port_attrs attrs = {}; @@ -2414,14 +2422,14 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common) int i; common->devlink = - devlink_alloc(&am65_cpsw_devlink_ops, sizeof(*dl_priv)); + devlink_alloc(&am65_cpsw_devlink_ops, sizeof(*dl_priv), dev); if (!common->devlink) return -ENOMEM; dl_priv = devlink_priv(common->devlink); dl_priv->common = common; - ret = devlink_register(common->devlink, dev); + ret = devlink_register(common->devlink); if (ret) { dev_err(dev, "devlink reg fail ret:%d\n", ret); goto dl_free; @@ -2464,7 +2472,12 @@ static int am65_cpsw_nuss_register_devlink(struct am65_cpsw_common *common) return ret; dl_port_unreg: - am65_cpsw_unregister_devlink_ports(common); + for (i = i - 1; i >= 1; i--) { + port = am65_common_get_port(common, i); + dl_port = &port->devlink_port; + + devlink_port_unregister(dl_port); + } dl_unreg: devlink_unregister(common->devlink); dl_free: @@ -2475,6 +2488,17 @@ dl_free: static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common) { + struct devlink_port *dl_port; + struct am65_cpsw_port *port; + int i; + + for (i = 1; i <= common->port_num; i++) { + port = am65_common_get_port(common, i); + dl_port = &port->devlink_port; + + devlink_port_unregister(dl_port); + } + if (!AM65_CPSW_IS_CPSW2G(common) && IS_ENABLED(CONFIG_TI_K3_AM65_CPSW_SWITCHDEV)) { devlink_params_unpublish(common->devlink); @@ -2482,7 +2506,6 @@ static void am65_cpsw_unregister_devlink(struct am65_cpsw_common *common) ARRAY_SIZE(am65_cpsw_devlink_params)); } - am65_cpsw_unregister_devlink_ports(common); devlink_unregister(common->devlink); devlink_free(common->devlink); } diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.h b/drivers/net/ethernet/ti/am65-cpsw-nuss.h index 5d93e346f05e..048ed10143c1 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.h +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.h @@ -126,6 +126,8 @@ struct am65_cpsw_common { struct am65_cpsw_rx_chn rx_chns; struct napi_struct napi_rx; + bool rx_irq_disabled; + u32 nuss_ver; u32 cpsw_ver; unsigned long bus_freq; diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c index c20715107075..02d4e51f7306 100644 --- a/drivers/net/ethernet/ti/cpmac.c +++ b/drivers/net/ethernet/ti/cpmac.c @@ -1044,7 +1044,7 @@ static const struct net_device_ops cpmac_netdev_ops = { .ndo_start_xmit = cpmac_start_xmit, .ndo_tx_timeout = cpmac_tx_timeout, .ndo_set_rx_mode = cpmac_set_multicast_list, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, }; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index cbbd0f665796..66f7ddd9b1f9 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -431,7 +431,7 @@ static void cpsw_rx_handler(void *token, int len, int status) skb->protocol = eth_type_trans(skb, ndev); /* mark skb for recycling */ - skb_mark_for_recycle(skb, page, pool); + skb_mark_for_recycle(skb); netif_receive_skb(skb); ndev->stats.rx_bytes += len; @@ -845,7 +845,7 @@ static int cpsw_ndo_open(struct net_device *ndev) struct ethtool_coalesce coal; coal.rx_coalesce_usecs = cpsw->coal_intvl; - cpsw_set_coalesce(ndev, &coal); + cpsw_set_coalesce(ndev, &coal, NULL, NULL); } cpdma_ctlr_start(cpsw->dma); @@ -905,7 +905,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, struct cpdma_chan *txch; int ret, q_idx; - if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) { + if (skb_put_padto(skb, CPSW_MIN_PACKET_SIZE)) { cpsw_err(priv, tx_err, "packet pad failed\n"); ndev->stats.tx_dropped++; return NET_XMIT_DROP; @@ -1159,7 +1159,7 @@ static const struct net_device_ops cpsw_netdev_ops = { .ndo_stop = cpsw_ndo_stop, .ndo_start_xmit = cpsw_ndo_start_xmit, .ndo_set_mac_address = cpsw_ndo_set_mac_address, - .ndo_do_ioctl = cpsw_ndo_ioctl, + .ndo_eth_ioctl = cpsw_ndo_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = cpsw_ndo_tx_timeout, .ndo_set_rx_mode = cpsw_ndo_set_rx_mode, diff --git a/drivers/net/ethernet/ti/cpsw_ethtool.c b/drivers/net/ethernet/ti/cpsw_ethtool.c index 4619c3a950b0..158c8d3793f4 100644 --- a/drivers/net/ethernet/ti/cpsw_ethtool.c +++ b/drivers/net/ethernet/ti/cpsw_ethtool.c @@ -152,7 +152,9 @@ void cpsw_set_msglevel(struct net_device *ndev, u32 value) priv->msg_enable = value; } -int cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal) +int cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct cpsw_common *cpsw = ndev_to_cpsw(ndev); @@ -160,7 +162,9 @@ int cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal) return 0; } -int cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal) +int cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct cpsw_priv *priv = netdev_priv(ndev); u32 int_ctrl; diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c index d1d02001cef6..7968f24d99c8 100644 --- a/drivers/net/ethernet/ti/cpsw_new.c +++ b/drivers/net/ethernet/ti/cpsw_new.c @@ -28,6 +28,7 @@ #include <linux/kmemleak.h> #include <linux/sys_soc.h> +#include <net/switchdev.h> #include <net/page_pool.h> #include <net/pkt_cls.h> #include <net/devlink.h> @@ -374,7 +375,7 @@ static void cpsw_rx_handler(void *token, int len, int status) skb->protocol = eth_type_trans(skb, ndev); /* mark skb for recycling */ - skb_mark_for_recycle(skb, page, pool); + skb_mark_for_recycle(skb); netif_receive_skb(skb); ndev->stats.rx_bytes += len; @@ -501,7 +502,7 @@ static void cpsw_restore(struct cpsw_priv *priv) static void cpsw_init_stp_ale_entry(struct cpsw_common *cpsw) { - char stpa[] = {0x01, 0x80, 0xc2, 0x0, 0x0, 0x0}; + static const char stpa[] = {0x01, 0x80, 0xc2, 0x0, 0x0, 0x0}; cpsw_ale_add_mcast(cpsw->ale, stpa, ALE_PORT_HOST, ALE_SUPER, 0, @@ -893,7 +894,7 @@ static int cpsw_ndo_open(struct net_device *ndev) struct ethtool_coalesce coal; coal.rx_coalesce_usecs = cpsw->coal_intvl; - cpsw_set_coalesce(ndev, &coal); + cpsw_set_coalesce(ndev, &coal, NULL, NULL); } cpdma_ctlr_start(cpsw->dma); @@ -1127,7 +1128,7 @@ static const struct net_device_ops cpsw_netdev_ops = { .ndo_stop = cpsw_ndo_stop, .ndo_start_xmit = cpsw_ndo_start_xmit, .ndo_set_mac_address = cpsw_ndo_set_mac_address, - .ndo_do_ioctl = cpsw_ndo_ioctl, + .ndo_eth_ioctl = cpsw_ndo_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = cpsw_ndo_tx_timeout, .ndo_set_rx_mode = cpsw_ndo_set_rx_mode, @@ -1500,10 +1501,12 @@ static void cpsw_port_offload_fwd_mark_update(struct cpsw_common *cpsw) } static int cpsw_netdevice_port_link(struct net_device *ndev, - struct net_device *br_ndev) + struct net_device *br_ndev, + struct netlink_ext_ack *extack) { struct cpsw_priv *priv = netdev_priv(ndev); struct cpsw_common *cpsw = priv->cpsw; + int err; if (!cpsw->br_members) { cpsw->hw_bridge_dev = br_ndev; @@ -1515,6 +1518,11 @@ static int cpsw_netdevice_port_link(struct net_device *ndev, return -EOPNOTSUPP; } + err = switchdev_bridge_port_offload(ndev, ndev, NULL, NULL, NULL, + false, extack); + if (err) + return err; + cpsw->br_members |= BIT(priv->emac_port); cpsw_port_offload_fwd_mark_update(cpsw); @@ -1527,6 +1535,8 @@ static void cpsw_netdevice_port_unlink(struct net_device *ndev) struct cpsw_priv *priv = netdev_priv(ndev); struct cpsw_common *cpsw = priv->cpsw; + switchdev_bridge_port_unoffload(ndev, NULL, NULL, NULL); + cpsw->br_members &= ~BIT(priv->emac_port); cpsw_port_offload_fwd_mark_update(cpsw); @@ -1539,6 +1549,7 @@ static void cpsw_netdevice_port_unlink(struct net_device *ndev) static int cpsw_netdevice_event(struct notifier_block *unused, unsigned long event, void *ptr) { + struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr); struct net_device *ndev = netdev_notifier_info_to_dev(ptr); struct netdev_notifier_changeupper_info *info; int ret = NOTIFY_DONE; @@ -1553,7 +1564,8 @@ static int cpsw_netdevice_event(struct notifier_block *unused, if (netif_is_bridge_master(info->upper_dev)) { if (info->linking) ret = cpsw_netdevice_port_link(ndev, - info->upper_dev); + info->upper_dev, + extack); else cpsw_netdevice_port_unlink(ndev); } @@ -1791,14 +1803,14 @@ static int cpsw_register_devlink(struct cpsw_common *cpsw) struct cpsw_devlink *dl_priv; int ret = 0; - cpsw->devlink = devlink_alloc(&cpsw_devlink_ops, sizeof(*dl_priv)); + cpsw->devlink = devlink_alloc(&cpsw_devlink_ops, sizeof(*dl_priv), dev); if (!cpsw->devlink) return -ENOMEM; dl_priv = devlink_priv(cpsw->devlink); dl_priv->cpsw = cpsw; - ret = devlink_register(cpsw->devlink, dev); + ret = devlink_register(cpsw->devlink); if (ret) { dev_err(dev, "DL reg fail ret:%d\n", ret); goto dl_free; diff --git a/drivers/net/ethernet/ti/cpsw_priv.h b/drivers/net/ethernet/ti/cpsw_priv.h index 2951fb7b9dae..435668ee542d 100644 --- a/drivers/net/ethernet/ti/cpsw_priv.h +++ b/drivers/net/ethernet/ti/cpsw_priv.h @@ -464,8 +464,12 @@ void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv); /* ethtool */ u32 cpsw_get_msglevel(struct net_device *ndev); void cpsw_set_msglevel(struct net_device *ndev, u32 value); -int cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal); -int cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal); +int cpsw_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack); +int cpsw_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack); int cpsw_get_sset_count(struct net_device *ndev, int sset); void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data); void cpsw_get_ethtool_stats(struct net_device *ndev, diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index c674e34b6839..e8291d848839 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -383,12 +383,16 @@ static void emac_get_drvinfo(struct net_device *ndev, * emac_get_coalesce - Get interrupt coalesce settings for this device * @ndev : The DaVinci EMAC network adapter * @coal : ethtool coalesce settings structure + * @kernel_coal: ethtool CQE mode setting structure + * @extack: extack for reporting error messages * * Fetch the current interrupt coalesce settings * */ static int emac_get_coalesce(struct net_device *ndev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct emac_priv *priv = netdev_priv(ndev); @@ -401,12 +405,16 @@ static int emac_get_coalesce(struct net_device *ndev, * emac_set_coalesce - Set interrupt coalesce settings for this device * @ndev : The DaVinci EMAC network adapter * @coal : ethtool coalesce settings structure + * @kernel_coal: ethtool CQE mode setting structure + * @extack: extack for reporting error messages * * Set interrupt coalesce parameters * */ static int emac_set_coalesce(struct net_device *ndev, - struct ethtool_coalesce *coal) + struct ethtool_coalesce *coal, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct emac_priv *priv = netdev_priv(ndev); u32 int_ctrl, num_interrupts = 0; @@ -943,7 +951,7 @@ static int emac_dev_xmit(struct sk_buff *skb, struct net_device *ndev) goto fail_tx; } - ret_code = skb_padto(skb, EMAC_DEF_MIN_ETHPKTSIZE); + ret_code = skb_put_padto(skb, EMAC_DEF_MIN_ETHPKTSIZE); if (unlikely(ret_code < 0)) { if (netif_msg_tx_err(priv) && net_ratelimit()) dev_err(emac_dev, "DaVinci EMAC: packet pad failed"); @@ -1462,7 +1470,7 @@ static int emac_dev_open(struct net_device *ndev) struct ethtool_coalesce coal; coal.rx_coalesce_usecs = (priv->coal_intvl << 4); - emac_set_coalesce(ndev, &coal); + emac_set_coalesce(ndev, &coal, NULL, NULL); } cpdma_ctlr_start(priv->dma); @@ -1670,7 +1678,7 @@ static const struct net_device_ops emac_netdev_ops = { .ndo_start_xmit = emac_dev_xmit, .ndo_set_rx_mode = emac_dev_mcast_set, .ndo_set_mac_address = emac_dev_setmac_addr, - .ndo_do_ioctl = emac_devioctl, + .ndo_eth_ioctl = emac_devioctl, .ndo_tx_timeout = emac_dev_tx_timeout, .ndo_get_stats = emac_dev_getnetstats, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c index 97942b0e3897..eda2961c0fe2 100644 --- a/drivers/net/ethernet/ti/netcp_core.c +++ b/drivers/net/ethernet/ti/netcp_core.c @@ -1944,7 +1944,7 @@ static const struct net_device_ops netcp_netdev_ops = { .ndo_stop = netcp_ndo_stop, .ndo_start_xmit = netcp_ndo_start_xmit, .ndo_set_rx_mode = netcp_set_rx_mode, - .ndo_do_ioctl = netcp_ndo_ioctl, + .ndo_eth_ioctl = netcp_ndo_ioctl, .ndo_get_stats64 = netcp_get_stats, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c index e0cb713193ea..77c448ad67ce 100644 --- a/drivers/net/ethernet/ti/tlan.c +++ b/drivers/net/ethernet/ti/tlan.c @@ -749,7 +749,7 @@ static const struct net_device_ops tlan_netdev_ops = { .ndo_tx_timeout = tlan_tx_timeout, .ndo_get_stats = tlan_get_stats, .ndo_set_rx_mode = tlan_set_multicast_list, - .ndo_do_ioctl = tlan_ioctl, + .ndo_eth_ioctl = tlan_ioctl, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c index 226a76633e65..66d4e024d11e 100644 --- a/drivers/net/ethernet/toshiba/spider_net.c +++ b/drivers/net/ethernet/toshiba/spider_net.c @@ -354,9 +354,10 @@ spider_net_free_rx_chain_contents(struct spider_net_card *card) descr = card->rx_chain.head; do { if (descr->skb) { - pci_unmap_single(card->pdev, descr->hwdescr->buf_addr, + dma_unmap_single(&card->pdev->dev, + descr->hwdescr->buf_addr, SPIDER_NET_MAX_FRAME, - PCI_DMA_BIDIRECTIONAL); + DMA_BIDIRECTIONAL); dev_kfree_skb(descr->skb); descr->skb = NULL; } @@ -411,9 +412,9 @@ spider_net_prepare_rx_descr(struct spider_net_card *card, if (offset) skb_reserve(descr->skb, SPIDER_NET_RXBUF_ALIGN - offset); /* iommu-map the skb */ - buf = pci_map_single(card->pdev, descr->skb->data, - SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); - if (pci_dma_mapping_error(card->pdev, buf)) { + buf = dma_map_single(&card->pdev->dev, descr->skb->data, + SPIDER_NET_MAX_FRAME, DMA_FROM_DEVICE); + if (dma_mapping_error(&card->pdev->dev, buf)) { dev_kfree_skb_any(descr->skb); descr->skb = NULL; if (netif_msg_rx_err(card) && net_ratelimit()) @@ -653,8 +654,9 @@ spider_net_prepare_tx_descr(struct spider_net_card *card, dma_addr_t buf; unsigned long flags; - buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); - if (pci_dma_mapping_error(card->pdev, buf)) { + buf = dma_map_single(&card->pdev->dev, skb->data, skb->len, + DMA_TO_DEVICE); + if (dma_mapping_error(&card->pdev->dev, buf)) { if (netif_msg_tx_err(card) && net_ratelimit()) dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). " "Dropping packet\n", skb->data, skb->len); @@ -666,7 +668,8 @@ spider_net_prepare_tx_descr(struct spider_net_card *card, descr = card->tx_chain.head; if (descr->next == chain->tail->prev) { spin_unlock_irqrestore(&chain->lock, flags); - pci_unmap_single(card->pdev, buf, skb->len, PCI_DMA_TODEVICE); + dma_unmap_single(&card->pdev->dev, buf, skb->len, + DMA_TO_DEVICE); return -ENOMEM; } hwdescr = descr->hwdescr; @@ -822,8 +825,8 @@ spider_net_release_tx_chain(struct spider_net_card *card, int brutal) /* unmap the skb */ if (skb) { - pci_unmap_single(card->pdev, buf_addr, skb->len, - PCI_DMA_TODEVICE); + dma_unmap_single(&card->pdev->dev, buf_addr, skb->len, + DMA_TO_DEVICE); dev_consume_skb_any(skb); } } @@ -1165,8 +1168,8 @@ spider_net_decode_one_descr(struct spider_net_card *card) /* unmap descriptor */ hw_buf_addr = hwdescr->buf_addr; hwdescr->buf_addr = 0xffffffff; - pci_unmap_single(card->pdev, hw_buf_addr, - SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE); + dma_unmap_single(&card->pdev->dev, hw_buf_addr, SPIDER_NET_MAX_FRAME, + DMA_FROM_DEVICE); if ( (status == SPIDER_NET_DESCR_RESPONSE_ERROR) || (status == SPIDER_NET_DESCR_PROTECTION_ERROR) || @@ -2214,7 +2217,7 @@ static const struct net_device_ops spider_net_ops = { .ndo_start_xmit = spider_net_xmit, .ndo_set_rx_mode = spider_net_set_multi, .ndo_set_mac_address = spider_net_set_mac, - .ndo_do_ioctl = spider_net_do_ioctl, + .ndo_eth_ioctl = spider_net_do_ioctl, .ndo_tx_timeout = spider_net_tx_timeout, .ndo_validate_addr = eth_validate_addr, /* HW VLAN */ diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c index fedb2bf69261..52245ac60fc7 100644 --- a/drivers/net/ethernet/toshiba/tc35815.c +++ b/drivers/net/ethernet/toshiba/tc35815.c @@ -750,7 +750,7 @@ static const struct net_device_ops tc35815_netdev_ops = { .ndo_get_stats = tc35815_get_stats, .ndo_set_rx_mode = tc35815_set_multicast_list, .ndo_tx_timeout = tc35815_tx_timeout, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, #ifdef CONFIG_NET_POLL_CONTROLLER diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index c62f474b6d08..cf0917b29e30 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c @@ -1538,7 +1538,7 @@ static const struct net_device_ops tsi108_netdev_ops = { .ndo_start_xmit = tsi108_send_packet, .ndo_set_rx_mode = tsi108_set_rx_mode, .ndo_get_stats = tsi108_get_stats, - .ndo_do_ioctl = tsi108_do_ioctl, + .ndo_eth_ioctl = tsi108_do_ioctl, .ndo_set_mac_address = tsi108_set_mac, .ndo_validate_addr = eth_validate_addr, }; diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index 73ca597ebd1b..3b73a9c55a5a 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c @@ -884,7 +884,7 @@ static const struct net_device_ops rhine_netdev_ops = { .ndo_set_rx_mode = rhine_set_rx_mode, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, - .ndo_do_ioctl = netdev_ioctl, + .ndo_eth_ioctl = netdev_ioctl, .ndo_tx_timeout = rhine_tx_timeout, .ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid, @@ -1113,13 +1113,12 @@ err_out: static int rhine_init_one_platform(struct platform_device *pdev) { - const struct of_device_id *match; const u32 *quirks; int irq; void __iomem *ioaddr; - match = of_match_device(rhine_of_tbl, &pdev->dev); - if (!match) + quirks = of_device_get_match_data(&pdev->dev); + if (!quirks) return -EINVAL; ioaddr = devm_platform_ioremap_resource(pdev, 0); @@ -1130,10 +1129,6 @@ static int rhine_init_one_platform(struct platform_device *pdev) if (!irq) return -EINVAL; - quirks = match->data; - if (!quirks) - return -EINVAL; - return rhine_init_one_common(&pdev->dev, *quirks, (long)ioaddr, ioaddr, irq); } diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index 88426b5e410b..4b9c30f735b5 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -2637,7 +2637,7 @@ static const struct net_device_ops velocity_netdev_ops = { .ndo_set_mac_address = eth_mac_addr, .ndo_set_rx_mode = velocity_set_multi, .ndo_change_mtu = velocity_change_mtu, - .ndo_do_ioctl = velocity_ioctl, + .ndo_eth_ioctl = velocity_ioctl, .ndo_vlan_rx_add_vid = velocity_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = velocity_vlan_rx_kill_vid, #ifdef CONFIG_NET_POLL_CONTROLLER @@ -2943,14 +2943,12 @@ static void velocity_pci_remove(struct pci_dev *pdev) static int velocity_platform_probe(struct platform_device *pdev) { - const struct of_device_id *of_id; const struct velocity_info_tbl *info; int irq; - of_id = of_match_device(velocity_of_ids, &pdev->dev); - if (!of_id) + info = of_device_get_match_data(&pdev->dev); + if (!info) return -EINVAL; - info = of_id->data; irq = irq_of_parse_and_map(pdev->dev.of_node, 0); if (!irq) @@ -3520,7 +3518,9 @@ static void set_pending_timer_val(int *val, u32 us) static int velocity_get_coalesce(struct net_device *dev, - struct ethtool_coalesce *ecmd) + struct ethtool_coalesce *ecmd, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct velocity_info *vptr = netdev_priv(dev); @@ -3534,7 +3534,9 @@ static int velocity_get_coalesce(struct net_device *dev, } static int velocity_set_coalesce(struct net_device *dev, - struct ethtool_coalesce *ecmd) + struct ethtool_coalesce *ecmd, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct velocity_info *vptr = netdev_priv(dev); int max_us = 0x3f * 64; diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c index 811815f8cd3b..f974e70a82e8 100644 --- a/drivers/net/ethernet/wiznet/w5100.c +++ b/drivers/net/ethernet/wiznet/w5100.c @@ -1047,6 +1047,8 @@ static int w5100_mmio_probe(struct platform_device *pdev) mac_addr = data->mac_addr; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) + return -EINVAL; if (resource_size(mem) < W5100_BUS_DIRECT_SIZE) ops = &w5100_mmio_indirect_ops; else diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 60a4f79b8fa1..463094ced104 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1237,7 +1237,7 @@ static const struct net_device_ops temac_netdev_ops = { .ndo_set_rx_mode = temac_set_multicast_list, .ndo_set_mac_address = temac_set_mac_address, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = phy_do_ioctl_running, + .ndo_eth_ioctl = phy_do_ioctl_running, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = temac_poll_controller, #endif @@ -1310,8 +1310,11 @@ static int ll_temac_ethtools_set_ringparam(struct net_device *ndev, return 0; } -static int ll_temac_ethtools_get_coalesce(struct net_device *ndev, - struct ethtool_coalesce *ec) +static int +ll_temac_ethtools_get_coalesce(struct net_device *ndev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct temac_local *lp = netdev_priv(ndev); @@ -1322,8 +1325,11 @@ static int ll_temac_ethtools_get_coalesce(struct net_device *ndev, return 0; } -static int ll_temac_ethtools_set_coalesce(struct net_device *ndev, - struct ethtool_coalesce *ec) +static int +ll_temac_ethtools_set_coalesce(struct net_device *ndev, + struct ethtool_coalesce *ec, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct temac_local *lp = netdev_priv(ndev); diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index 13cd799541aa..871b5ec3183d 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1227,7 +1227,7 @@ static const struct net_device_ops axienet_netdev_ops = { .ndo_change_mtu = axienet_change_mtu, .ndo_set_mac_address = netdev_set_mac_address, .ndo_validate_addr = eth_validate_addr, - .ndo_do_ioctl = axienet_ioctl, + .ndo_eth_ioctl = axienet_ioctl, .ndo_set_rx_mode = axienet_set_multicast_list, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = axienet_poll_controller, @@ -1400,6 +1400,8 @@ axienet_ethtools_set_pauseparam(struct net_device *ndev, * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count. * @ndev: Pointer to net_device structure * @ecoalesce: Pointer to ethtool_coalesce structure + * @kernel_coal: ethtool CQE mode setting structure + * @extack: extack for reporting error messages * * This implements ethtool command for getting the DMA interrupt coalescing * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to @@ -1407,8 +1409,11 @@ axienet_ethtools_set_pauseparam(struct net_device *ndev, * * Return: 0 always */ -static int axienet_ethtools_get_coalesce(struct net_device *ndev, - struct ethtool_coalesce *ecoalesce) +static int +axienet_ethtools_get_coalesce(struct net_device *ndev, + struct ethtool_coalesce *ecoalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { u32 regval = 0; struct axienet_local *lp = netdev_priv(ndev); @@ -1425,6 +1430,8 @@ static int axienet_ethtools_get_coalesce(struct net_device *ndev, * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count. * @ndev: Pointer to net_device structure * @ecoalesce: Pointer to ethtool_coalesce structure + * @kernel_coal: ethtool CQE mode setting structure + * @extack: extack for reporting error messages * * This implements ethtool command for setting the DMA interrupt coalescing * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux @@ -1432,8 +1439,11 @@ static int axienet_ethtools_get_coalesce(struct net_device *ndev, * * Return: 0, on success, Non-zero error value on failure. */ -static int axienet_ethtools_set_coalesce(struct net_device *ndev, - struct ethtool_coalesce *ecoalesce) +static int +axienet_ethtools_set_coalesce(struct net_device *ndev, + struct ethtool_coalesce *ecoalesce, + struct kernel_ethtool_coalesce *kernel_coal, + struct netlink_ext_ack *extack) { struct axienet_local *lp = netdev_priv(ndev); diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index b06377fe7293..b780aad3550a 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -1263,7 +1263,7 @@ static const struct net_device_ops xemaclite_netdev_ops = { .ndo_start_xmit = xemaclite_send, .ndo_set_mac_address = xemaclite_set_mac_address, .ndo_tx_timeout = xemaclite_tx_timeout, - .ndo_do_ioctl = xemaclite_ioctl, + .ndo_eth_ioctl = xemaclite_ioctl, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = xemaclite_poll_controller, #endif diff --git a/drivers/net/ethernet/xircom/xirc2ps_cs.c b/drivers/net/ethernet/xircom/xirc2ps_cs.c index 4f6db6f5c272..ae611e46da6a 100644 --- a/drivers/net/ethernet/xircom/xirc2ps_cs.c +++ b/drivers/net/ethernet/xircom/xirc2ps_cs.c @@ -464,7 +464,7 @@ static const struct net_device_ops netdev_ops = { .ndo_start_xmit = do_start_xmit, .ndo_tx_timeout = xirc_tx_timeout, .ndo_set_config = do_config, - .ndo_do_ioctl = do_ioctl, + .ndo_eth_ioctl = do_ioctl, .ndo_set_rx_mode = set_multicast_list, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, diff --git a/drivers/net/ethernet/xscale/Kconfig b/drivers/net/ethernet/xscale/Kconfig index 468ffe3d1707..0e878fa6e322 100644 --- a/drivers/net/ethernet/xscale/Kconfig +++ b/drivers/net/ethernet/xscale/Kconfig @@ -29,9 +29,9 @@ config IXP4XX_ETH on IXP4xx processor. config PTP_1588_CLOCK_IXP46X - tristate "Intel IXP46x as PTP clock" + bool "Intel IXP46x as PTP clock" depends on IXP4XX_ETH - depends on PTP_1588_CLOCK + depends on PTP_1588_CLOCK=y || PTP_1588_CLOCK=IXP4XX_ETH default y help This driver adds support for using the IXP46X as a PTP diff --git a/drivers/net/ethernet/xscale/Makefile b/drivers/net/ethernet/xscale/Makefile index 607f91b1e878..e935f2a2979f 100644 --- a/drivers/net/ethernet/xscale/Makefile +++ b/drivers/net/ethernet/xscale/Makefile @@ -3,5 +3,9 @@ # Makefile for the Intel XScale IXP device drivers. # +# Keep this link order to avoid deferred probing +ifdef CONFIG_PTP_1588_CLOCK_IXP46X +obj-$(CONFIG_IXP4XX_ETH) += ptp_ixp46x.o +endif + obj-$(CONFIG_IXP4XX_ETH) += ixp4xx_eth.o -obj-$(CONFIG_PTP_1588_CLOCK_IXP46X) += ptp_ixp46x.o diff --git a/drivers/net/ethernet/xscale/ixp46x_ts.h b/drivers/net/ethernet/xscale/ixp46x_ts.h index d792130e27b0..ee9b93ded20a 100644 --- a/drivers/net/ethernet/xscale/ixp46x_ts.h +++ b/drivers/net/ethernet/xscale/ixp46x_ts.h @@ -62,7 +62,16 @@ struct ixp46x_ts_regs { #define TX_SNAPSHOT_LOCKED (1<<0) #define RX_SNAPSHOT_LOCKED (1<<1) -/* The ptp_ixp46x module will set this variable */ -extern int ixp46x_phc_index; +#if IS_ENABLED(CONFIG_PTP_1588_CLOCK_IXP46X) +int ixp46x_ptp_find(struct ixp46x_ts_regs *__iomem *regs, int *phc_index); +#else +static inline int ixp46x_ptp_find(struct ixp46x_ts_regs *__iomem *regs, int *phc_index) +{ + *regs = NULL; + *phc_index = -1; + + return -ENODEV; +} +#endif #endif diff --git a/drivers/net/ethernet/xscale/ixp4xx_eth.c b/drivers/net/ethernet/xscale/ixp4xx_eth.c index 7ae754eadf22..931494cc1c39 100644 --- a/drivers/net/ethernet/xscale/ixp4xx_eth.c +++ b/drivers/net/ethernet/xscale/ixp4xx_eth.c @@ -37,7 +37,6 @@ #include <linux/module.h> #include <linux/soc/ixp4xx/npe.h> #include <linux/soc/ixp4xx/qmgr.h> -#include <mach/hardware.h> #include <linux/soc/ixp4xx/cpu.h> #include "ixp46x_ts.h" @@ -169,13 +168,15 @@ struct eth_regs { struct port { struct eth_regs __iomem *regs; + struct ixp46x_ts_regs __iomem *timesync_regs; + int phc_index; struct npe *npe; struct net_device *netdev; struct napi_struct napi; struct eth_plat_info *plat; buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS]; struct desc *desc_tab; /* coherent */ - u32 desc_tab_phys; + dma_addr_t desc_tab_phys; int id; /* logical port ID */ int speed, duplex; u8 firmware[4]; @@ -295,7 +296,7 @@ static void ixp_rx_timestamp(struct port *port, struct sk_buff *skb) ch = PORT2CHANNEL(port); - regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT; + regs = port->timesync_regs; val = __raw_readl(®s->channel[ch].ch_event); @@ -340,7 +341,7 @@ static void ixp_tx_timestamp(struct port *port, struct sk_buff *skb) ch = PORT2CHANNEL(port); - regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT; + regs = port->timesync_regs; /* * This really stinks, but we have to poll for the Tx time stamp. @@ -375,6 +376,7 @@ static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) struct hwtstamp_config cfg; struct ixp46x_ts_regs *regs; struct port *port = netdev_priv(netdev); + int ret; int ch; if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg))) @@ -383,8 +385,12 @@ static int hwtstamp_set(struct net_device *netdev, struct ifreq *ifr) if (cfg.flags) /* reserved for future extensions */ return -EINVAL; + ret = ixp46x_ptp_find(&port->timesync_regs, &port->phc_index); + if (ret) + return ret; + ch = PORT2CHANNEL(port); - regs = (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT; + regs = port->timesync_regs; if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON) return -ERANGE; @@ -850,14 +856,14 @@ static int eth_xmit(struct sk_buff *skb, struct net_device *dev) bytes = len; mem = skb->data; #else - offset = (int)skb->data & 3; /* keep 32-bit alignment */ + offset = (uintptr_t)skb->data & 3; /* keep 32-bit alignment */ bytes = ALIGN(offset + len, 4); if (!(mem = kmalloc(bytes, GFP_ATOMIC))) { dev_kfree_skb(skb); dev->stats.tx_dropped++; return NETDEV_TX_OK; } - memcpy_swab32(mem, (u32 *)((int)skb->data & ~3), bytes / 4); + memcpy_swab32(mem, (u32 *)((uintptr_t)skb->data & ~3), bytes / 4); #endif phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE); @@ -988,25 +994,27 @@ static void ixp4xx_get_drvinfo(struct net_device *dev, strlcpy(info->bus_info, "internal", sizeof(info->bus_info)); } -int ixp46x_phc_index = -1; -EXPORT_SYMBOL_GPL(ixp46x_phc_index); - static int ixp4xx_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) { - if (!cpu_is_ixp46x()) { + struct port *port = netdev_priv(dev); + + if (port->phc_index < 0) + ixp46x_ptp_find(&port->timesync_regs, &port->phc_index); + + info->phc_index = port->phc_index; + + if (info->phc_index < 0) { info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_SOFTWARE; - info->phc_index = -1; return 0; } info->so_timestamping = SOF_TIMESTAMPING_TX_HARDWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; - info->phc_index = ixp46x_phc_index; info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); @@ -1357,7 +1365,7 @@ static const struct net_device_ops ixp4xx_netdev_ops = { .ndo_stop = eth_close, .ndo_start_xmit = eth_xmit, .ndo_set_rx_mode = eth_set_mcast_list, - .ndo_do_ioctl = eth_ioctl, + .ndo_eth_ioctl = eth_ioctl, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; @@ -1481,6 +1489,7 @@ static int ixp4xx_eth_probe(struct platform_device *pdev) port = netdev_priv(ndev); port->netdev = ndev; port->id = plat->npe; + port->phc_index = -1; /* Get the port resource and remap */ port->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); diff --git a/drivers/net/ethernet/xscale/ptp_ixp46x.c b/drivers/net/ethernet/xscale/ptp_ixp46x.c index a6fb88fd42f7..ecece21315c3 100644 --- a/drivers/net/ethernet/xscale/ptp_ixp46x.c +++ b/drivers/net/ethernet/xscale/ptp_ixp46x.c @@ -5,14 +5,16 @@ * Copyright (C) 2010 OMICRON electronics GmbH */ #include <linux/device.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> #include <linux/err.h> -#include <linux/gpio.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/ptp_clock_kernel.h> +#include <linux/platform_device.h> #include <linux/soc/ixp4xx/cpu.h> #include <linux/module.h> #include <mach/ixp4xx-regs.h> @@ -21,10 +23,6 @@ #define DRIVER "ptp_ixp46x" #define N_EXT_TS 2 -#define MASTER_GPIO 8 -#define MASTER_IRQ 25 -#define SLAVE_GPIO 7 -#define SLAVE_IRQ 24 struct ixp_clock { struct ixp46x_ts_regs *regs; @@ -32,9 +30,11 @@ struct ixp_clock { struct ptp_clock_info caps; int exts0_enabled; int exts1_enabled; + int slave_irq; + int master_irq; }; -DEFINE_SPINLOCK(register_lock); +static DEFINE_SPINLOCK(register_lock); /* * Register access functions @@ -243,53 +243,38 @@ static const struct ptp_clock_info ptp_ixp_caps = { static struct ixp_clock ixp_clock; -static int setup_interrupt(int gpio) +int ixp46x_ptp_find(struct ixp46x_ts_regs *__iomem *regs, int *phc_index) { - int irq; - int err; - - err = gpio_request(gpio, "ixp4-ptp"); - if (err) - return err; - - err = gpio_direction_input(gpio); - if (err) - return err; - - irq = gpio_to_irq(gpio); - if (irq < 0) - return irq; + *regs = ixp_clock.regs; + *phc_index = ptp_clock_index(ixp_clock.ptp_clock); - err = irq_set_irq_type(irq, IRQF_TRIGGER_FALLING); - if (err) { - pr_err("cannot set trigger type for irq %d\n", irq); - return err; - } - - err = request_irq(irq, isr, 0, DRIVER, &ixp_clock); - if (err) { - pr_err("request_irq failed for irq %d\n", irq); - return err; - } + if (!ixp_clock.ptp_clock) + return -EPROBE_DEFER; - return irq; + return 0; } +EXPORT_SYMBOL_GPL(ixp46x_ptp_find); -static void __exit ptp_ixp_exit(void) +/* Called from the registered devm action */ +static void ptp_ixp_unregister_action(void *d) { - free_irq(MASTER_IRQ, &ixp_clock); - free_irq(SLAVE_IRQ, &ixp_clock); - ixp46x_phc_index = -1; - ptp_clock_unregister(ixp_clock.ptp_clock); + struct ptp_clock *ptp_clock = d; + + ptp_clock_unregister(ptp_clock); + ixp_clock.ptp_clock = NULL; } -static int __init ptp_ixp_init(void) +static int ptp_ixp_probe(struct platform_device *pdev) { - if (!cpu_is_ixp46x()) - return -ENODEV; + struct device *dev = &pdev->dev; + int ret; - ixp_clock.regs = - (struct ixp46x_ts_regs __iomem *) IXP4XX_TIMESYNC_BASE_VIRT; + ixp_clock.regs = devm_platform_ioremap_resource(pdev, 0); + ixp_clock.master_irq = platform_get_irq(pdev, 0); + ixp_clock.slave_irq = platform_get_irq(pdev, 1); + if (IS_ERR(ixp_clock.regs) || + !ixp_clock.master_irq || !ixp_clock.slave_irq) + return -ENXIO; ixp_clock.caps = ptp_ixp_caps; @@ -298,32 +283,51 @@ static int __init ptp_ixp_init(void) if (IS_ERR(ixp_clock.ptp_clock)) return PTR_ERR(ixp_clock.ptp_clock); - ixp46x_phc_index = ptp_clock_index(ixp_clock.ptp_clock); + ret = devm_add_action_or_reset(dev, ptp_ixp_unregister_action, + ixp_clock.ptp_clock); + if (ret) { + dev_err(dev, "failed to install clock removal handler\n"); + return ret; + } __raw_writel(DEFAULT_ADDEND, &ixp_clock.regs->addend); __raw_writel(1, &ixp_clock.regs->trgt_lo); __raw_writel(0, &ixp_clock.regs->trgt_hi); __raw_writel(TTIPEND, &ixp_clock.regs->event); - if (MASTER_IRQ != setup_interrupt(MASTER_GPIO)) { - pr_err("failed to setup gpio %d as irq\n", MASTER_GPIO); - goto no_master; - } - if (SLAVE_IRQ != setup_interrupt(SLAVE_GPIO)) { - pr_err("failed to setup gpio %d as irq\n", SLAVE_GPIO); - goto no_slave; - } + ret = devm_request_irq(dev, ixp_clock.master_irq, isr, + 0, DRIVER, &ixp_clock); + if (ret) + return dev_err_probe(dev, ret, + "request_irq failed for irq %d\n", + ixp_clock.master_irq); + + ret = devm_request_irq(dev, ixp_clock.slave_irq, isr, + 0, DRIVER, &ixp_clock); + if (ret) + return dev_err_probe(dev, ret, + "request_irq failed for irq %d\n", + ixp_clock.slave_irq); return 0; -no_slave: - free_irq(MASTER_IRQ, &ixp_clock); -no_master: - ptp_clock_unregister(ixp_clock.ptp_clock); - return -ENODEV; } -module_init(ptp_ixp_init); -module_exit(ptp_ixp_exit); +static const struct of_device_id ptp_ixp_match[] = { + { + .compatible = "intel,ixp46x-ptp-timer", + }, + { }, +}; + +static struct platform_driver ptp_ixp_driver = { + .driver = { + .name = "ptp-ixp46x", + .of_match_table = ptp_ixp_match, + .suppress_bind_attrs = true, + }, + .probe = ptp_ixp_probe, +}; +module_platform_driver(ptp_ixp_driver); MODULE_AUTHOR("Richard Cochran <richardcochran@gmail.com>"); MODULE_DESCRIPTION("PTP clock using the IXP46X timer"); |