diff options
Diffstat (limited to 'drivers/net')
729 files changed, 45340 insertions, 17632 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index d03775100f7d..6371958dd170 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -397,10 +397,10 @@ config NET_SB1000 At present this driver only compiles as a module, so say M here if you have this card. The module will be called sb1000. Then read - <file:Documentation/networking/README.sb1000> for information on how - to use this module, as it needs special ppp scripts for establishing - a connection. Further documentation and the necessary scripts can be - found at: + <file:Documentation/networking/device_drivers/sb1000.txt> for + information on how to use this module, as it needs special ppp + scripts for establishing a connection. Further documentation + and the necessary scripts can be found at: <http://www.jacksonville.net/~fventuri/> <http://home.adelphia.net/~siglercm/sb1000.html> diff --git a/drivers/net/appletalk/cops.c b/drivers/net/appletalk/cops.c index bb49f6e40a19..f90bb723985f 100644 --- a/drivers/net/appletalk/cops.c +++ b/drivers/net/appletalk/cops.c @@ -777,10 +777,7 @@ static void cops_rx(struct net_device *dev) } /* Get response length. */ - if(lp->board==DAYNA) - pkt_len = inb(ioaddr) & 0xFF; - else - pkt_len = inb(ioaddr) & 0x00FF; + pkt_len = inb(ioaddr); pkt_len |= (inb(ioaddr) << 8); /* Input IO code. */ rsp_type=inb(ioaddr); @@ -892,10 +889,7 @@ static netdev_tx_t cops_send_packet(struct sk_buff *skb, /* Output IO length. */ outb(skb->len, ioaddr); - if(lp->board == DAYNA) - outb(skb->len >> 8, ioaddr); - else - outb((skb->len >> 8)&0x0FF, ioaddr); + outb(skb->len >> 8, ioaddr); /* Output IO code. */ outb(LAP_WRITE, ioaddr); diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c index 93dfcef8afc4..7c46d9f4fefd 100644 --- a/drivers/net/bonding/bond_3ad.c +++ b/drivers/net/bonding/bond_3ad.c @@ -1220,7 +1220,7 @@ static void ad_churn_machine(struct port *port) port->sm_churn_partner_state = AD_CHURN_MONITOR; port->sm_churn_actor_timer_counter = __ad_timer_to_ticks(AD_ACTOR_CHURN_TIMER, 0); - port->sm_churn_partner_timer_counter = + port->sm_churn_partner_timer_counter = __ad_timer_to_ticks(AD_PARTNER_CHURN_TIMER, 0); return; } @@ -2128,7 +2128,7 @@ void bond_3ad_unbind_slave(struct slave *slave) if ((new_aggregator->lag_ports == port) && new_aggregator->is_active) { netdev_info(bond->dev, "Removing an active aggregator\n"); - select_new_active_agg = 1; + select_new_active_agg = 1; } new_aggregator->is_individual = aggregator->is_individual; diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index e82108c917a6..9431127bbc60 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -1031,7 +1031,7 @@ static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[], */ memcpy(ss.__data, addr, len); ss.ss_family = dev->type; - if (dev_set_mac_address(dev, (struct sockaddr *)&ss)) { + if (dev_set_mac_address(dev, (struct sockaddr *)&ss, NULL)) { netdev_err(slave->bond->dev, "dev_set_mac_address of dev %s failed! ALB mode requires that the base driver support setting the hw address also when the network device's interface is open\n", dev->name); return -EOPNOTSUPP; @@ -1250,7 +1250,7 @@ static int alb_set_mac_address(struct bonding *bond, void *addr) bond_hw_addr_copy(tmp_addr, slave->dev->dev_addr, slave->dev->addr_len); - res = dev_set_mac_address(slave->dev, addr); + res = dev_set_mac_address(slave->dev, addr, NULL); /* restore net_device's hw address */ bond_hw_addr_copy(slave->dev->dev_addr, tmp_addr, @@ -1273,7 +1273,7 @@ unwind: bond_hw_addr_copy(tmp_addr, rollback_slave->dev->dev_addr, rollback_slave->dev->addr_len); dev_set_mac_address(rollback_slave->dev, - (struct sockaddr *)&ss); + (struct sockaddr *)&ss, NULL); bond_hw_addr_copy(rollback_slave->dev->dev_addr, tmp_addr, rollback_slave->dev->addr_len); } @@ -1732,7 +1732,8 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave bond->dev->addr_len); ss.ss_family = bond->dev->type; /* we don't care if it can't change its mac, best effort */ - dev_set_mac_address(new_slave->dev, (struct sockaddr *)&ss); + dev_set_mac_address(new_slave->dev, (struct sockaddr *)&ss, + NULL); bond_hw_addr_copy(new_slave->dev->dev_addr, tmp_addr, new_slave->dev->addr_len); diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c index 3868e1a5126d..1360f1ffe070 100644 --- a/drivers/net/bonding/bond_debugfs.c +++ b/drivers/net/bonding/bond_debugfs.c @@ -45,19 +45,7 @@ static int bond_debug_rlb_hash_show(struct seq_file *m, void *v) return 0; } - -static int bond_debug_rlb_hash_open(struct inode *inode, struct file *file) -{ - return single_open(file, bond_debug_rlb_hash_show, inode->i_private); -} - -static const struct file_operations bond_debug_rlb_hash_fops = { - .owner = THIS_MODULE, - .open = bond_debug_rlb_hash_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(bond_debug_rlb_hash); void bond_debug_register(struct bonding *bond) { diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 333387f1f1fe..a9d597f28023 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -609,14 +609,21 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active, * * Should be called with RTNL held. */ -static void bond_set_dev_addr(struct net_device *bond_dev, - struct net_device *slave_dev) +static int bond_set_dev_addr(struct net_device *bond_dev, + struct net_device *slave_dev) { + int err; + netdev_dbg(bond_dev, "bond_dev=%p slave_dev=%p slave_dev->name=%s slave_dev->addr_len=%d\n", bond_dev, slave_dev, slave_dev->name, slave_dev->addr_len); + err = dev_pre_changeaddr_notify(bond_dev, slave_dev->dev_addr, NULL); + if (err) + return err; + memcpy(bond_dev->dev_addr, slave_dev->dev_addr, slave_dev->addr_len); bond_dev->addr_assign_type = NET_ADDR_STOLEN; call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev); + return 0; } static struct slave *bond_get_old_active(struct bonding *bond, @@ -652,8 +659,12 @@ static void bond_do_fail_over_mac(struct bonding *bond, switch (bond->params.fail_over_mac) { case BOND_FOM_ACTIVE: - if (new_active) - bond_set_dev_addr(bond->dev, new_active->dev); + if (new_active) { + rv = bond_set_dev_addr(bond->dev, new_active->dev); + if (rv) + netdev_err(bond->dev, "Error %d setting MAC of slave %s\n", + -rv, bond->dev->name); + } break; case BOND_FOM_FOLLOW: /* if new_active && old_active, swap them @@ -680,7 +691,7 @@ static void bond_do_fail_over_mac(struct bonding *bond, } rv = dev_set_mac_address(new_active->dev, - (struct sockaddr *)&ss); + (struct sockaddr *)&ss, NULL); if (rv) { netdev_err(bond->dev, "Error %d setting MAC of slave %s\n", -rv, new_active->dev->name); @@ -695,7 +706,7 @@ static void bond_do_fail_over_mac(struct bonding *bond, ss.ss_family = old_active->dev->type; rv = dev_set_mac_address(old_active->dev, - (struct sockaddr *)&ss); + (struct sockaddr *)&ss, NULL); if (rv) netdev_err(bond->dev, "Error %d setting MAC of slave %s\n", -rv, new_active->dev->name); @@ -1489,8 +1500,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, * address to be the same as the slave's. */ if (!bond_has_slaves(bond) && - bond->dev->addr_assign_type == NET_ADDR_RANDOM) - bond_set_dev_addr(bond->dev, slave_dev); + bond->dev->addr_assign_type == NET_ADDR_RANDOM) { + res = bond_set_dev_addr(bond->dev, slave_dev); + if (res) + goto err_undo_flags; + } new_slave = bond_alloc_slave(bond); if (!new_slave) { @@ -1527,7 +1541,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, */ memcpy(ss.__data, bond_dev->dev_addr, bond_dev->addr_len); ss.ss_family = slave_dev->type; - res = dev_set_mac_address(slave_dev, (struct sockaddr *)&ss); + res = dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, + extack); if (res) { netdev_dbg(bond_dev, "Error %d calling set_mac_address\n", res); goto err_restore_mtu; @@ -1538,7 +1553,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, slave_dev->flags |= IFF_SLAVE; /* open the slave since the application closed it */ - res = dev_open(slave_dev); + res = dev_open(slave_dev, extack); if (res) { netdev_dbg(bond_dev, "Opening slave %s failed\n", slave_dev->name); goto err_restore_mac; @@ -1818,7 +1833,7 @@ err_restore_mac: bond_hw_addr_copy(ss.__data, new_slave->perm_hwaddr, new_slave->dev->addr_len); ss.ss_family = slave_dev->type; - dev_set_mac_address(slave_dev, (struct sockaddr *)&ss); + dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL); } err_restore_mtu: @@ -1999,7 +2014,7 @@ static int __bond_release_one(struct net_device *bond_dev, bond_hw_addr_copy(ss.__data, slave->perm_hwaddr, slave->dev->addr_len); ss.ss_family = slave_dev->type; - dev_set_mac_address(slave_dev, (struct sockaddr *)&ss); + dev_set_mac_address(slave_dev, (struct sockaddr *)&ss, NULL); } if (unregister) @@ -3544,8 +3559,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd break; case BOND_SETHWADDR_OLD: case SIOCBONDSETHWADDR: - bond_set_dev_addr(bond_dev, slave_dev); - res = 0; + res = bond_set_dev_addr(bond_dev, slave_dev); break; case BOND_CHANGE_ACTIVE_OLD: case SIOCBONDCHANGEACTIVE: @@ -3732,7 +3746,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr) bond_for_each_slave(bond, slave, iter) { netdev_dbg(bond_dev, "slave %p %s\n", slave, slave->dev->name); - res = dev_set_mac_address(slave->dev, addr); + res = dev_set_mac_address(slave->dev, addr, NULL); if (res) { /* TODO: consider downing the slave * and retry ? @@ -3761,7 +3775,7 @@ unwind: break; tmp_res = dev_set_mac_address(rollback_slave->dev, - (struct sockaddr *)&tmp_ss); + (struct sockaddr *)&tmp_ss, NULL); if (tmp_res) { netdev_dbg(bond_dev, "unwind err %d dev %s\n", tmp_res, rollback_slave->dev->name); diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig index 7cdd0cead693..e0f0ad7a550a 100644 --- a/drivers/net/can/Kconfig +++ b/drivers/net/can/Kconfig @@ -96,7 +96,7 @@ config CAN_AT91 config CAN_FLEXCAN tristate "Support for Freescale FLEXCAN based chips" - depends on ARM || PPC + depends on OF && HAS_IOMEM ---help--- Say Y here if you want to support for Freescale FlexCAN. diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c index 75ce11395ee8..0f36eafe3ac1 100644 --- a/drivers/net/can/flexcan.c +++ b/drivers/net/can/flexcan.c @@ -19,11 +19,13 @@ #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/io.h> +#include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> +#include <linux/regmap.h> #define DRV_NAME "flexcan" @@ -131,16 +133,15 @@ (FLEXCAN_ESR_ERR_BUS | FLEXCAN_ESR_ERR_STATE) #define FLEXCAN_ESR_ALL_INT \ (FLEXCAN_ESR_TWRN_INT | FLEXCAN_ESR_RWRN_INT | \ - FLEXCAN_ESR_BOFF_INT | FLEXCAN_ESR_ERR_INT) + FLEXCAN_ESR_BOFF_INT | FLEXCAN_ESR_ERR_INT | \ + FLEXCAN_ESR_WAK_INT) /* FLEXCAN interrupt flag register (IFLAG) bits */ /* Errata ERR005829 step7: Reserve first valid MB */ #define FLEXCAN_TX_MB_RESERVED_OFF_FIFO 8 #define FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP 0 -#define FLEXCAN_TX_MB 63 #define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST (FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP + 1) -#define FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST (FLEXCAN_TX_MB - 1) -#define FLEXCAN_IFLAG_MB(x) BIT(x & 0x1f) +#define FLEXCAN_IFLAG_MB(x) BIT((x) & 0x1f) #define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7) #define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6) #define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE BIT(5) @@ -189,12 +190,13 @@ #define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP BIT(5) /* Use timestamp based offloading */ #define FLEXCAN_QUIRK_BROKEN_PERR_STATE BIT(6) /* No interrupt for error passive */ #define FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN BIT(7) /* default to BE register access */ +#define FLEXCAN_QUIRK_SETUP_STOP_MODE BIT(8) /* Setup stop mode to support wakeup */ /* Structure of the message buffer */ struct flexcan_mb { u32 can_ctrl; u32 can_id; - u32 data[2]; + u32 data[]; }; /* Structure of the hardware registers */ @@ -223,7 +225,7 @@ struct flexcan_regs { u32 rxfgmask; /* 0x48 */ u32 rxfir; /* 0x4c */ u32 _reserved3[12]; /* 0x50 */ - struct flexcan_mb mb[64]; /* 0x80 */ + u8 mb[2][512]; /* 0x80 */ /* FIFO-mode: * MB * 0x080...0x08f 0 RX message buffer @@ -253,12 +255,24 @@ struct flexcan_devtype_data { u32 quirks; /* quirks needed for different IP cores */ }; +struct flexcan_stop_mode { + struct regmap *gpr; + u8 req_gpr; + u8 req_bit; + u8 ack_gpr; + u8 ack_bit; +}; + struct flexcan_priv { struct can_priv can; struct can_rx_offload offload; struct flexcan_regs __iomem *regs; + struct flexcan_mb __iomem *tx_mb; struct flexcan_mb __iomem *tx_mb_reserved; + u8 tx_mb_idx; + u8 mb_count; + u8 mb_size; u32 reg_ctrl_default; u32 reg_imask1_default; u32 reg_imask2_default; @@ -267,6 +281,7 @@ struct flexcan_priv { struct clk *clk_per; const struct flexcan_devtype_data *devtype_data; struct regulator *reg_xceiver; + struct flexcan_stop_mode stm; /* Read and Write APIs */ u32 (*read)(void __iomem *addr); @@ -290,7 +305,8 @@ static const struct flexcan_devtype_data fsl_imx28_devtype_data = { static const struct flexcan_devtype_data fsl_imx6q_devtype_data = { .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | - FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_BROKEN_PERR_STATE, + FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_BROKEN_PERR_STATE | + FLEXCAN_QUIRK_SETUP_STOP_MODE, }; static const struct flexcan_devtype_data fsl_vf610_devtype_data = { @@ -350,6 +366,68 @@ static inline void flexcan_write_le(u32 val, void __iomem *addr) iowrite32(val, addr); } +static struct flexcan_mb __iomem *flexcan_get_mb(const struct flexcan_priv *priv, + u8 mb_index) +{ + u8 bank_size; + bool bank; + + if (WARN_ON(mb_index >= priv->mb_count)) + return NULL; + + bank_size = sizeof(priv->regs->mb[0]) / priv->mb_size; + + bank = mb_index >= bank_size; + if (bank) + mb_index -= bank_size; + + return (struct flexcan_mb __iomem *) + (&priv->regs->mb[bank][priv->mb_size * mb_index]); +} + +static void flexcan_enable_wakeup_irq(struct flexcan_priv *priv, bool enable) +{ + struct flexcan_regs __iomem *regs = priv->regs; + u32 reg_mcr; + + reg_mcr = priv->read(®s->mcr); + + if (enable) + reg_mcr |= FLEXCAN_MCR_WAK_MSK; + else + reg_mcr &= ~FLEXCAN_MCR_WAK_MSK; + + priv->write(reg_mcr, ®s->mcr); +} + +static inline void flexcan_enter_stop_mode(struct flexcan_priv *priv) +{ + struct flexcan_regs __iomem *regs = priv->regs; + u32 reg_mcr; + + reg_mcr = priv->read(®s->mcr); + reg_mcr |= FLEXCAN_MCR_SLF_WAK; + priv->write(reg_mcr, ®s->mcr); + + /* enable stop request */ + regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr, + 1 << priv->stm.req_bit, 1 << priv->stm.req_bit); +} + +static inline void flexcan_exit_stop_mode(struct flexcan_priv *priv) +{ + struct flexcan_regs __iomem *regs = priv->regs; + u32 reg_mcr; + + /* remove stop request */ + regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr, + 1 << priv->stm.req_bit, 0); + + reg_mcr = priv->read(®s->mcr); + reg_mcr &= ~FLEXCAN_MCR_SLF_WAK; + priv->write(reg_mcr, ®s->mcr); +} + static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->regs; @@ -512,11 +590,11 @@ static int flexcan_get_berr_counter(const struct net_device *dev, static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) { const struct flexcan_priv *priv = netdev_priv(dev); - struct flexcan_regs __iomem *regs = priv->regs; struct can_frame *cf = (struct can_frame *)skb->data; u32 can_id; u32 data; u32 ctrl = FLEXCAN_MB_CODE_TX_DATA | (cf->can_dlc << 16); + int i; if (can_dropped_invalid_skb(dev, skb)) return NETDEV_TX_OK; @@ -533,27 +611,23 @@ static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *de if (cf->can_id & CAN_RTR_FLAG) ctrl |= FLEXCAN_MB_CNT_RTR; - if (cf->can_dlc > 0) { - data = be32_to_cpup((__be32 *)&cf->data[0]); - priv->write(data, ®s->mb[FLEXCAN_TX_MB].data[0]); - } - if (cf->can_dlc > 4) { - data = be32_to_cpup((__be32 *)&cf->data[4]); - priv->write(data, ®s->mb[FLEXCAN_TX_MB].data[1]); + for (i = 0; i < cf->can_dlc; i += sizeof(u32)) { + data = be32_to_cpup((__be32 *)&cf->data[i]); + priv->write(data, &priv->tx_mb->data[i / sizeof(u32)]); } can_put_echo_skb(skb, dev, 0); - priv->write(can_id, ®s->mb[FLEXCAN_TX_MB].can_id); - priv->write(ctrl, ®s->mb[FLEXCAN_TX_MB].can_ctrl); + priv->write(can_id, &priv->tx_mb->can_id); + priv->write(ctrl, &priv->tx_mb->can_ctrl); /* Errata ERR005829 step8: * Write twice INACTIVE(0x8) code to first MB. */ priv->write(FLEXCAN_MB_CODE_TX_INACTIVE, - &priv->tx_mb_reserved->can_ctrl); + &priv->tx_mb_reserved->can_ctrl); priv->write(FLEXCAN_MB_CODE_TX_INACTIVE, - &priv->tx_mb_reserved->can_ctrl); + &priv->tx_mb_reserved->can_ctrl); return NETDEV_TX_OK; } @@ -672,8 +746,11 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload, { struct flexcan_priv *priv = rx_offload_to_priv(offload); struct flexcan_regs __iomem *regs = priv->regs; - struct flexcan_mb __iomem *mb = ®s->mb[n]; + struct flexcan_mb __iomem *mb; u32 reg_ctrl, reg_id, reg_iflag1; + int i; + + mb = flexcan_get_mb(priv, n); if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { u32 code; @@ -714,8 +791,10 @@ static unsigned int flexcan_mailbox_read(struct can_rx_offload *offload, cf->can_id |= CAN_RTR_FLAG; cf->can_dlc = get_can_dlc((reg_ctrl >> 16) & 0xf); - *(__be32 *)(cf->data + 0) = cpu_to_be32(priv->read(&mb->data[0])); - *(__be32 *)(cf->data + 4) = cpu_to_be32(priv->read(&mb->data[1])); + for (i = 0; i < cf->can_dlc; i += sizeof(u32)) { + __be32 data = cpu_to_be32(priv->read(&mb->data[i / sizeof(u32)])); + *(__be32 *)(cf->data + i) = data; + } /* mark as read */ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { @@ -744,7 +823,7 @@ static inline u64 flexcan_read_reg_iflag_rx(struct flexcan_priv *priv) u32 iflag1, iflag2; iflag2 = priv->read(®s->iflag2) & priv->reg_imask2_default & - ~FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB); + ~FLEXCAN_IFLAG_MB(priv->tx_mb_idx); iflag1 = priv->read(®s->iflag1) & priv->reg_imask1_default; return (u64)iflag2 << 32 | iflag1; @@ -794,8 +873,8 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) reg_iflag2 = priv->read(®s->iflag2); /* transmission complete interrupt */ - if (reg_iflag2 & FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB)) { - u32 reg_ctrl = priv->read(®s->mb[FLEXCAN_TX_MB].can_ctrl); + if (reg_iflag2 & FLEXCAN_IFLAG_MB(priv->tx_mb_idx)) { + u32 reg_ctrl = priv->read(&priv->tx_mb->can_ctrl); handled = IRQ_HANDLED; stats->tx_bytes += can_rx_offload_get_echo_skb(&priv->offload, @@ -805,8 +884,8 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) /* after sending a RTR frame MB is in RX mode */ priv->write(FLEXCAN_MB_CODE_TX_INACTIVE, - ®s->mb[FLEXCAN_TX_MB].can_ctrl); - priv->write(FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB), ®s->iflag2); + &priv->tx_mb->can_ctrl); + priv->write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), ®s->iflag2); netif_wake_queue(dev); } @@ -821,7 +900,7 @@ static irqreturn_t flexcan_irq(int irq, void *dev_id) /* state change interrupt or broken error state quirk fix is enabled */ if ((reg_esr & FLEXCAN_ESR_ERR_STATE) || (priv->devtype_data->quirks & (FLEXCAN_QUIRK_BROKEN_WERR_STATE | - FLEXCAN_QUIRK_BROKEN_PERR_STATE))) + FLEXCAN_QUIRK_BROKEN_PERR_STATE))) flexcan_irq_state(dev, reg_esr); /* bus error IRQ - handle if bus error reporting is activated */ @@ -919,6 +998,7 @@ static int flexcan_chip_start(struct net_device *dev) struct flexcan_regs __iomem *regs = priv->regs; u32 reg_mcr, reg_ctrl, reg_ctrl2, reg_mecr; int err, i; + struct flexcan_mb __iomem *mb; /* enable module */ err = flexcan_chip_enable(priv); @@ -935,11 +1015,9 @@ static int flexcan_chip_start(struct net_device *dev) /* MCR * * enable freeze - * enable fifo * halt now * only supervisor access * enable warning int - * disable local echo * enable individual RX masking * choose format C * set max mailbox number @@ -947,14 +1025,37 @@ static int flexcan_chip_start(struct net_device *dev) reg_mcr = priv->read(®s->mcr); reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff); reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV | - FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS | FLEXCAN_MCR_IRMQ | - FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_MAXMB(FLEXCAN_TX_MB); + FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_IRMQ | FLEXCAN_MCR_IDAM_C | + FLEXCAN_MCR_MAXMB(priv->tx_mb_idx); + /* MCR + * + * FIFO: + * - disable for timestamp mode + * - enable for FIFO mode + */ if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) reg_mcr &= ~FLEXCAN_MCR_FEN; else reg_mcr |= FLEXCAN_MCR_FEN; + /* MCR + * + * NOTE: In loopback mode, the CAN_MCR[SRXDIS] cannot be + * asserted because this will impede the self reception + * of a transmitted message. This is not documented in + * earlier versions of flexcan block guide. + * + * Self Reception: + * - enable Self Reception for loopback mode + * (by clearing "Self Reception Disable" bit) + * - disable for normal operation + */ + if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) + reg_mcr &= ~FLEXCAN_MCR_SRX_DIS; + else + reg_mcr |= FLEXCAN_MCR_SRX_DIS; + netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr); priv->write(reg_mcr, ®s->mcr); @@ -999,14 +1100,16 @@ static int flexcan_chip_start(struct net_device *dev) if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++) { + mb = flexcan_get_mb(priv, i); priv->write(FLEXCAN_MB_CODE_RX_EMPTY, - ®s->mb[i].can_ctrl); + &mb->can_ctrl); } } else { /* clear and invalidate unused mailboxes first */ - for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i <= ARRAY_SIZE(regs->mb); i++) { + for (i = FLEXCAN_TX_MB_RESERVED_OFF_FIFO; i <= priv->mb_count; i++) { + mb = flexcan_get_mb(priv, i); priv->write(FLEXCAN_MB_CODE_RX_INACTIVE, - ®s->mb[i].can_ctrl); + &mb->can_ctrl); } } @@ -1016,7 +1119,7 @@ static int flexcan_chip_start(struct net_device *dev) /* mark TX mailbox as INACTIVE */ priv->write(FLEXCAN_MB_CODE_TX_INACTIVE, - ®s->mb[FLEXCAN_TX_MB].can_ctrl); + &priv->tx_mb->can_ctrl); /* acceptance mask/acceptance code (accept everything) */ priv->write(0x0, ®s->rxgmask); @@ -1027,7 +1130,7 @@ static int flexcan_chip_start(struct net_device *dev) priv->write(0x0, ®s->rxfgmask); /* clear acceptance filters */ - for (i = 0; i < ARRAY_SIZE(regs->mb); i++) + for (i = 0; i < priv->mb_count; i++) priv->write(0, ®s->rximr[i]); /* On Vybrid, disable memory error detection interrupts @@ -1128,10 +1231,49 @@ static int flexcan_open(struct net_device *dev) if (err) goto out_close; + priv->mb_size = sizeof(struct flexcan_mb) + CAN_MAX_DLEN; + priv->mb_count = (sizeof(priv->regs->mb[0]) / priv->mb_size) + + (sizeof(priv->regs->mb[1]) / priv->mb_size); + + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) + priv->tx_mb_reserved = + flexcan_get_mb(priv, FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP); + else + priv->tx_mb_reserved = + flexcan_get_mb(priv, FLEXCAN_TX_MB_RESERVED_OFF_FIFO); + priv->tx_mb_idx = priv->mb_count - 1; + priv->tx_mb = flexcan_get_mb(priv, priv->tx_mb_idx); + + priv->reg_imask1_default = 0; + priv->reg_imask2_default = FLEXCAN_IFLAG_MB(priv->tx_mb_idx); + + priv->offload.mailbox_read = flexcan_mailbox_read; + + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { + u64 imask; + + priv->offload.mb_first = FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST; + priv->offload.mb_last = priv->mb_count - 2; + + imask = GENMASK_ULL(priv->offload.mb_last, + priv->offload.mb_first); + priv->reg_imask1_default |= imask; + priv->reg_imask2_default |= imask >> 32; + + err = can_rx_offload_add_timestamp(dev, &priv->offload); + } else { + priv->reg_imask1_default |= FLEXCAN_IFLAG_RX_FIFO_OVERFLOW | + FLEXCAN_IFLAG_RX_FIFO_AVAILABLE; + err = can_rx_offload_add_fifo(dev, &priv->offload, + FLEXCAN_NAPI_WEIGHT); + } + if (err) + goto out_free_irq; + /* start chip and queuing */ err = flexcan_chip_start(dev); if (err) - goto out_free_irq; + goto out_offload_del; can_led_event(dev, CAN_LED_EVENT_OPEN); @@ -1140,6 +1282,8 @@ static int flexcan_open(struct net_device *dev) return 0; + out_offload_del: + can_rx_offload_del(&priv->offload); out_free_irq: free_irq(dev->irq, dev); out_close: @@ -1160,6 +1304,7 @@ static int flexcan_close(struct net_device *dev) can_rx_offload_disable(&priv->offload); flexcan_chip_stop(dev); + can_rx_offload_del(&priv->offload); free_irq(dev->irq, dev); clk_disable_unprepare(priv->clk_per); clk_disable_unprepare(priv->clk_ipg); @@ -1260,6 +1405,59 @@ static void unregister_flexcandev(struct net_device *dev) unregister_candev(dev); } +static int flexcan_setup_stop_mode(struct platform_device *pdev) +{ + struct net_device *dev = platform_get_drvdata(pdev); + struct device_node *np = pdev->dev.of_node; + struct device_node *gpr_np; + struct flexcan_priv *priv; + phandle phandle; + u32 out_val[5]; + int ret; + + if (!np) + return -EINVAL; + + /* stop mode property format is: + * <&gpr req_gpr req_bit ack_gpr ack_bit>. + */ + ret = of_property_read_u32_array(np, "fsl,stop-mode", out_val, + ARRAY_SIZE(out_val)); + if (ret) { + dev_dbg(&pdev->dev, "no stop-mode property\n"); + return ret; + } + phandle = *out_val; + + gpr_np = of_find_node_by_phandle(phandle); + if (!gpr_np) { + dev_dbg(&pdev->dev, "could not find gpr node by phandle\n"); + return PTR_ERR(gpr_np); + } + + priv = netdev_priv(dev); + priv->stm.gpr = syscon_node_to_regmap(gpr_np); + of_node_put(gpr_np); + if (IS_ERR(priv->stm.gpr)) { + dev_dbg(&pdev->dev, "could not find gpr regmap\n"); + return PTR_ERR(priv->stm.gpr); + } + + priv->stm.req_gpr = out_val[1]; + priv->stm.req_bit = out_val[2]; + priv->stm.ack_gpr = out_val[3]; + priv->stm.ack_bit = out_val[4]; + + dev_dbg(&pdev->dev, + "gpr %s req_gpr=0x02%x req_bit=%u ack_gpr=0x02%x ack_bit=%u\n", + gpr_np->full_name, priv->stm.req_gpr, priv->stm.req_bit, + priv->stm.ack_gpr, priv->stm.ack_bit); + + device_set_wakeup_capable(&pdev->dev, true); + + return 0; +} + static const struct of_device_id flexcan_of_match[] = { { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, }, { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, }, @@ -1371,35 +1569,6 @@ static int flexcan_probe(struct platform_device *pdev) priv->devtype_data = devtype_data; priv->reg_xceiver = reg_xceiver; - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) - priv->tx_mb_reserved = ®s->mb[FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP]; - else - priv->tx_mb_reserved = ®s->mb[FLEXCAN_TX_MB_RESERVED_OFF_FIFO]; - - priv->reg_imask1_default = 0; - priv->reg_imask2_default = FLEXCAN_IFLAG_MB(FLEXCAN_TX_MB); - - priv->offload.mailbox_read = flexcan_mailbox_read; - - if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) { - u64 imask; - - priv->offload.mb_first = FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST; - priv->offload.mb_last = FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST; - - imask = GENMASK_ULL(priv->offload.mb_last, priv->offload.mb_first); - priv->reg_imask1_default |= imask; - priv->reg_imask2_default |= imask >> 32; - - err = can_rx_offload_add_timestamp(dev, &priv->offload); - } else { - priv->reg_imask1_default |= FLEXCAN_IFLAG_RX_FIFO_OVERFLOW | - FLEXCAN_IFLAG_RX_FIFO_AVAILABLE; - err = can_rx_offload_add_fifo(dev, &priv->offload, FLEXCAN_NAPI_WEIGHT); - } - if (err) - goto failed_offload; - err = register_flexcandev(dev); if (err) { dev_err(&pdev->dev, "registering netdev failed\n"); @@ -1408,12 +1577,17 @@ static int flexcan_probe(struct platform_device *pdev) devm_can_led_init(dev); + if (priv->devtype_data->quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE) { + err = flexcan_setup_stop_mode(pdev); + if (err) + dev_dbg(&pdev->dev, "failed to setup stop-mode\n"); + } + dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n", priv->regs, dev->irq); return 0; - failed_offload: failed_register: free_candev(dev); return err; @@ -1422,10 +1596,8 @@ static int flexcan_probe(struct platform_device *pdev) static int flexcan_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); - struct flexcan_priv *priv = netdev_priv(dev); unregister_flexcandev(dev); - can_rx_offload_del(&priv->offload); free_candev(dev); return 0; @@ -1438,9 +1610,17 @@ static int __maybe_unused flexcan_suspend(struct device *device) int err; if (netif_running(dev)) { - err = flexcan_chip_disable(priv); - if (err) - return err; + /* if wakeup is enabled, enter stop mode + * else enter disabled mode. + */ + if (device_may_wakeup(device)) { + enable_irq_wake(dev->irq); + flexcan_enter_stop_mode(priv); + } else { + err = flexcan_chip_disable(priv); + if (err) + return err; + } netif_stop_queue(dev); netif_device_detach(dev); } @@ -1459,14 +1639,45 @@ static int __maybe_unused flexcan_resume(struct device *device) if (netif_running(dev)) { netif_device_attach(dev); netif_start_queue(dev); - err = flexcan_chip_enable(priv); - if (err) - return err; + if (device_may_wakeup(device)) { + disable_irq_wake(dev->irq); + } else { + err = flexcan_chip_enable(priv); + if (err) + return err; + } } return 0; } -static SIMPLE_DEV_PM_OPS(flexcan_pm_ops, flexcan_suspend, flexcan_resume); +static int __maybe_unused flexcan_noirq_suspend(struct device *device) +{ + struct net_device *dev = dev_get_drvdata(device); + struct flexcan_priv *priv = netdev_priv(dev); + + if (netif_running(dev) && device_may_wakeup(device)) + flexcan_enable_wakeup_irq(priv, true); + + return 0; +} + +static int __maybe_unused flexcan_noirq_resume(struct device *device) +{ + struct net_device *dev = dev_get_drvdata(device); + struct flexcan_priv *priv = netdev_priv(dev); + + if (netif_running(dev) && device_may_wakeup(device)) { + flexcan_enable_wakeup_irq(priv, false); + flexcan_exit_stop_mode(priv); + } + + return 0; +} + +static const struct dev_pm_ops flexcan_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(flexcan_suspend, flexcan_resume) + SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(flexcan_noirq_suspend, flexcan_noirq_resume) +}; static struct platform_driver flexcan_driver = { .driver = { diff --git a/drivers/net/can/rcar/Kconfig b/drivers/net/can/rcar/Kconfig index 7b03a3a37db7..bd5a8fcd83e1 100644 --- a/drivers/net/can/rcar/Kconfig +++ b/drivers/net/can/rcar/Kconfig @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 config CAN_RCAR tristate "Renesas R-Car CAN controller" depends on ARCH_RENESAS || ARM diff --git a/drivers/net/can/rcar/Makefile b/drivers/net/can/rcar/Makefile index 08de36a4cfcc..c9185b0c04a8 100644 --- a/drivers/net/can/rcar/Makefile +++ b/drivers/net/can/rcar/Makefile @@ -1,3 +1,4 @@ +# SPDX-License-Identifier: GPL-2.0 # # Makefile for the Renesas R-Car CAN & CAN FD controller drivers # diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c index 771a46083739..13e66297b65f 100644 --- a/drivers/net/can/rcar/rcar_can.c +++ b/drivers/net/can/rcar/rcar_can.c @@ -1,12 +1,8 @@ +// SPDX-License-Identifier: GPL-2.0+ /* Renesas R-Car CAN device driver * * Copyright (C) 2013 Cogent Embedded, Inc. <source@cogentembedded.com> * Copyright (C) 2013 Renesas Solutions Corp. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. */ #include <linux/module.h> diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c index 602c19e23f05..05410008aa6b 100644 --- a/drivers/net/can/rcar/rcar_canfd.c +++ b/drivers/net/can/rcar/rcar_canfd.c @@ -1,11 +1,7 @@ +// SPDX-License-Identifier: GPL-2.0+ /* Renesas R-Car CAN FD device driver * * Copyright (C) 2015 Renesas Electronics Corp. - * - * This program is free software; you can redistribute it and/or modify it - * under the terms of the GNU General Public License as published by the - * Free Software Foundation; either version 2 of the License, or (at your - * option) any later version. */ /* The R-Car CAN FD controller can operate in either one of the below two modes diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig index 1e65cb6c2591..f6dc89927ece 100644 --- a/drivers/net/can/sja1000/Kconfig +++ b/drivers/net/can/sja1000/Kconfig @@ -88,6 +88,7 @@ config CAN_PLX_PCI - TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/) - IXXAT Automation PC-I 04/PCI card (http://www.ixxat.com/) - Connect Tech Inc. CANpro/104-Plus Opto (CRG001) card (http://www.connecttech.com) + - ASEM CAN raw - 2 isolated CAN channels (www.asem.it) config CAN_TSCAN1 tristate "TS-CAN1 PC104 boards" diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c index f8ff25c8ee2e..9bcdefea138a 100644 --- a/drivers/net/can/sja1000/plx_pci.c +++ b/drivers/net/can/sja1000/plx_pci.c @@ -46,7 +46,8 @@ MODULE_SUPPORTED_DEVICE("Adlink PCI-7841/cPCI-7841, " "esd CAN-PCIe/2000, " "Connect Tech Inc. CANpro/104-Plus Opto (CRG001), " "IXXAT PC-I 04/PCI, " - "ELCUS CAN-200-PCI") + "ELCUS CAN-200-PCI, " + "ASEM DUAL CAN-RAW") MODULE_LICENSE("GPL v2"); #define PLX_PCI_MAX_CHAN 2 @@ -70,7 +71,9 @@ struct plx_pci_card { */ #define PLX_LINT1_EN 0x1 /* Local interrupt 1 enable */ +#define PLX_LINT1_POL (1 << 1) /* Local interrupt 1 polarity */ #define PLX_LINT2_EN (1 << 3) /* Local interrupt 2 enable */ +#define PLX_LINT2_POL (1 << 4) /* Local interrupt 2 polarity */ #define PLX_PCI_INT_EN (1 << 6) /* PCI Interrupt Enable */ #define PLX_PCI_RESET (1 << 30) /* PCI Adapter Software Reset */ @@ -92,6 +95,9 @@ struct plx_pci_card { */ #define PLX_PCI_OCR (OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL) +/* OCR setting for ASEM Dual CAN raw */ +#define ASEM_PCI_OCR 0xfe + /* * In the CDR register, you should set CBP to 1. * You will probably also want to set the clock divider value to 7 @@ -145,10 +151,20 @@ struct plx_pci_card { #define MOXA_PCI_VENDOR_ID 0x1393 #define MOXA_PCI_DEVICE_ID 0x0100 +#define ASEM_RAW_CAN_VENDOR_ID 0x10b5 +#define ASEM_RAW_CAN_DEVICE_ID 0x9030 +#define ASEM_RAW_CAN_SUB_VENDOR_ID 0x3000 +#define ASEM_RAW_CAN_SUB_DEVICE_ID 0x1001 +#define ASEM_RAW_CAN_SUB_DEVICE_ID_BIS 0x1002 +#define ASEM_RAW_CAN_RST_REGISTER 0x54 +#define ASEM_RAW_CAN_RST_MASK_CAN1 0x20 +#define ASEM_RAW_CAN_RST_MASK_CAN2 0x04 + static void plx_pci_reset_common(struct pci_dev *pdev); static void plx9056_pci_reset_common(struct pci_dev *pdev); static void plx_pci_reset_marathon_pci(struct pci_dev *pdev); static void plx_pci_reset_marathon_pcie(struct pci_dev *pdev); +static void plx_pci_reset_asem_dual_can_raw(struct pci_dev *pdev); struct plx_pci_channel_map { u32 bar; @@ -269,6 +285,14 @@ static struct plx_pci_card_info plx_pci_card_info_moxa = { /* based on PLX9052 */ }; +static struct plx_pci_card_info plx_pci_card_info_asem_dual_can = { + "ASEM Dual CAN raw PCI", 2, + PLX_PCI_CAN_CLOCK, ASEM_PCI_OCR, PLX_PCI_CDR, + {0, 0x00, 0x00}, { {2, 0x00, 0x00}, {4, 0x00, 0x00} }, + &plx_pci_reset_asem_dual_can_raw + /* based on PLX9030 */ +}; + static const struct pci_device_id plx_pci_tbl[] = { { /* Adlink PCI-7841/cPCI-7841 */ @@ -375,6 +399,20 @@ static const struct pci_device_id plx_pci_tbl[] = { 0, 0, (kernel_ulong_t)&plx_pci_card_info_moxa }, + { + /* ASEM Dual CAN raw */ + ASEM_RAW_CAN_VENDOR_ID, ASEM_RAW_CAN_DEVICE_ID, + ASEM_RAW_CAN_SUB_VENDOR_ID, ASEM_RAW_CAN_SUB_DEVICE_ID, + 0, 0, + (kernel_ulong_t)&plx_pci_card_info_asem_dual_can + }, + { + /* ASEM Dual CAN raw -new model */ + ASEM_RAW_CAN_VENDOR_ID, ASEM_RAW_CAN_DEVICE_ID, + ASEM_RAW_CAN_SUB_VENDOR_ID, ASEM_RAW_CAN_SUB_DEVICE_ID_BIS, + 0, 0, + (kernel_ulong_t)&plx_pci_card_info_asem_dual_can + }, { 0,} }; MODULE_DEVICE_TABLE(pci, plx_pci_tbl); @@ -524,6 +562,31 @@ static void plx_pci_reset_marathon_pcie(struct pci_dev *pdev) } } +/* Special reset function for ASEM Dual CAN raw card */ +static void plx_pci_reset_asem_dual_can_raw(struct pci_dev *pdev) +{ + void __iomem *bar0_addr; + u8 tmpval; + + plx_pci_reset_common(pdev); + + bar0_addr = pci_iomap(pdev, 0, 0); + if (!bar0_addr) { + dev_err(&pdev->dev, "Failed to remap reset space 0 (BAR0)\n"); + return; + } + + /* reset the two SJA1000 chips */ + tmpval = ioread8(bar0_addr + ASEM_RAW_CAN_RST_REGISTER); + tmpval &= ~(ASEM_RAW_CAN_RST_MASK_CAN1 | ASEM_RAW_CAN_RST_MASK_CAN2); + iowrite8(tmpval, bar0_addr + ASEM_RAW_CAN_RST_REGISTER); + usleep_range(300, 400); + tmpval |= ASEM_RAW_CAN_RST_MASK_CAN1 | ASEM_RAW_CAN_RST_MASK_CAN2; + iowrite8(tmpval, bar0_addr + ASEM_RAW_CAN_RST_REGISTER); + usleep_range(300, 400); + pci_iounmap(pdev, bar0_addr); +} + static void plx_pci_del_card(struct pci_dev *pdev) { struct plx_pci_card *card = pci_get_drvdata(pdev); diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c index f3d5bda012a1..04aac3bb54ef 100644 --- a/drivers/net/can/usb/ucan.c +++ b/drivers/net/can/usb/ucan.c @@ -715,7 +715,7 @@ static void ucan_read_bulk_callback(struct urb *urb) up->in_ep_size, urb->transfer_buffer, urb->transfer_dma); - netdev_dbg(up->netdev, "not resumbmitting urb; status: %d\n", + netdev_dbg(up->netdev, "not resubmitting urb; status: %d\n", urb->status); return; default: diff --git a/drivers/net/can/vxcan.c b/drivers/net/can/vxcan.c index ed6828821fbd..80af658e530d 100644 --- a/drivers/net/can/vxcan.c +++ b/drivers/net/can/vxcan.c @@ -207,7 +207,7 @@ static int vxcan_newlink(struct net *net, struct net_device *dev, return PTR_ERR(peer_net); peer = rtnl_create_link(peer_net, ifname, name_assign_type, - &vxcan_link_ops, tbp); + &vxcan_link_ops, tbp, extack); if (IS_ERR(peer)) { put_net(peer_net); return PTR_ERR(peer); diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 045f0845e665..97d0933d9bd9 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -63,6 +63,7 @@ enum xcan_reg { XCAN_FSR_OFFSET = 0x00E8, /* RX FIFO Status */ XCAN_TXMSG_BASE_OFFSET = 0x0100, /* TX Message Space */ XCAN_RXMSG_BASE_OFFSET = 0x1100, /* RX Message Space */ + XCAN_RXMSG_2_BASE_OFFSET = 0x2100, /* RX Message Space */ }; #define XCAN_FRAME_ID_OFFSET(frame_base) ((frame_base) + 0x00) @@ -75,6 +76,8 @@ enum xcan_reg { XCAN_CANFD_FRAME_SIZE * (n)) #define XCAN_RXMSG_FRAME_OFFSET(n) (XCAN_RXMSG_BASE_OFFSET + \ XCAN_CANFD_FRAME_SIZE * (n)) +#define XCAN_RXMSG_2_FRAME_OFFSET(n) (XCAN_RXMSG_2_BASE_OFFSET + \ + XCAN_CANFD_FRAME_SIZE * (n)) /* the single TX mailbox used by this driver on CAN FD HW */ #define XCAN_TX_MAILBOX_IDX 0 @@ -152,6 +155,7 @@ enum xcan_reg { * instead of the regular FIFO at 0x50 */ #define XCAN_FLAG_RX_FIFO_MULTI 0x0010 +#define XCAN_FLAG_CANFD_2 0x0020 struct xcan_devtype_data { unsigned int flags; @@ -221,6 +225,18 @@ static const struct can_bittiming_const xcan_bittiming_const_canfd = { .brp_inc = 1, }; +static const struct can_bittiming_const xcan_bittiming_const_canfd2 = { + .name = DRIVER_NAME, + .tseg1_min = 1, + .tseg1_max = 256, + .tseg2_min = 1, + .tseg2_max = 128, + .sjw_max = 128, + .brp_min = 1, + .brp_max = 256, + .brp_inc = 1, +}; + /** * xcan_write_reg_le - Write a value to the device register little endian * @priv: Driver private data structure @@ -612,7 +628,7 @@ static int xcan_start_xmit_mailbox(struct sk_buff *skb, struct net_device *ndev) * * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY when the tx queue is full */ -static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) +static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct xcan_priv *priv = netdev_priv(ndev); int ret; @@ -973,7 +989,10 @@ static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv) if (!(fsr & XCAN_FSR_FL_MASK)) return -ENOENT; - offset = XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK); + if (priv->devtype.flags & XCAN_FLAG_CANFD_2) + offset = XCAN_RXMSG_2_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK); + else + offset = XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK); } else { /* check if RX FIFO is empty */ @@ -1430,11 +1449,24 @@ static const struct xcan_devtype_data xcan_canfd_data = { .bus_clk_name = "s_axi_aclk", }; +static const struct xcan_devtype_data xcan_canfd2_data = { + .flags = XCAN_FLAG_EXT_FILTERS | + XCAN_FLAG_RXMNF | + XCAN_FLAG_TX_MAILBOXES | + XCAN_FLAG_CANFD_2 | + XCAN_FLAG_RX_FIFO_MULTI, + .bittiming_const = &xcan_bittiming_const_canfd2, + .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD, + .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD, + .bus_clk_name = "s_axi_aclk", +}; + /* Match table for OF platform binding */ static const struct of_device_id xcan_of_match[] = { { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data }, { .compatible = "xlnx,axi-can-1.00.a", .data = &xcan_axi_data }, { .compatible = "xlnx,canfd-1.0", .data = &xcan_canfd_data }, + { .compatible = "xlnx,canfd-2.0", .data = &xcan_canfd2_data }, { /* end of list */ }, }; MODULE_DEVICE_TABLE(of, xcan_of_match); diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index 2eb68769562c..aa4a1f5206f1 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -710,6 +710,10 @@ static int bcm_sf2_sw_resume(struct dsa_switch *ds) return ret; } + ret = bcm_sf2_cfp_resume(ds); + if (ret) + return ret; + if (priv->hw_params.num_gphy == 1) bcm_sf2_gphy_enable_set(ds, true); @@ -1061,6 +1065,7 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) spin_lock_init(&priv->indir_lock); mutex_init(&priv->stats_mutex); mutex_init(&priv->cfp.lock); + INIT_LIST_HEAD(&priv->cfp.rules_list); /* CFP rule #0 cannot be used for specific classifications, flag it as * permanently used @@ -1090,12 +1095,16 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) return ret; } + bcm_sf2_gphy_enable_set(priv->dev->ds, true); + ret = bcm_sf2_mdio_register(ds); if (ret) { pr_err("failed to register MDIO bus\n"); return ret; } + bcm_sf2_gphy_enable_set(priv->dev->ds, false); + ret = bcm_sf2_cfp_rst(priv); if (ret) { pr_err("failed to reset CFP\n"); @@ -1166,6 +1175,7 @@ static int bcm_sf2_sw_remove(struct platform_device *pdev) priv->wol_ports_mask = 0; dsa_unregister_switch(priv->dev->ds); + bcm_sf2_cfp_exit(priv->dev->ds); /* Disable all ports and interrupts */ bcm_sf2_sw_suspend(priv->dev->ds); bcm_sf2_mdio_unregister(priv); diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h index cc31e986e6e3..faaef320ec48 100644 --- a/drivers/net/dsa/bcm_sf2.h +++ b/drivers/net/dsa/bcm_sf2.h @@ -56,6 +56,7 @@ struct bcm_sf2_cfp_priv { DECLARE_BITMAP(used, CFP_NUM_RULES); DECLARE_BITMAP(unique, CFP_NUM_RULES); unsigned int rules_cnt; + struct list_head rules_list; }; struct bcm_sf2_priv { @@ -213,5 +214,7 @@ int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port, int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port, struct ethtool_rxnfc *nfc); int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv); +void bcm_sf2_cfp_exit(struct dsa_switch *ds); +int bcm_sf2_cfp_resume(struct dsa_switch *ds); #endif /* __BCM_SF2_H */ diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c index 47c5f272a084..e14663ab6dbc 100644 --- a/drivers/net/dsa/bcm_sf2_cfp.c +++ b/drivers/net/dsa/bcm_sf2_cfp.c @@ -20,6 +20,12 @@ #include "bcm_sf2.h" #include "bcm_sf2_regs.h" +struct cfp_rule { + int port; + struct ethtool_rx_flow_spec fs; + struct list_head next; +}; + struct cfp_udf_slice_layout { u8 slices[UDFS_PER_SLICE]; u32 mask_value; @@ -515,6 +521,61 @@ static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv, core_writel(priv, reg, offset); } +static struct cfp_rule *bcm_sf2_cfp_rule_find(struct bcm_sf2_priv *priv, + int port, u32 location) +{ + struct cfp_rule *rule = NULL; + + list_for_each_entry(rule, &priv->cfp.rules_list, next) { + if (rule->port == port && rule->fs.location == location) + break; + } + + return rule; +} + +static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port, + struct ethtool_rx_flow_spec *fs) +{ + struct cfp_rule *rule = NULL; + size_t fs_size = 0; + int ret = 1; + + if (list_empty(&priv->cfp.rules_list)) + return ret; + + list_for_each_entry(rule, &priv->cfp.rules_list, next) { + ret = 1; + if (rule->port != port) + continue; + + if (rule->fs.flow_type != fs->flow_type || + rule->fs.ring_cookie != fs->ring_cookie || + rule->fs.m_ext.data[0] != fs->m_ext.data[0]) + continue; + + switch (fs->flow_type & ~FLOW_EXT) { + case TCP_V6_FLOW: + case UDP_V6_FLOW: + fs_size = sizeof(struct ethtool_tcpip6_spec); + break; + case TCP_V4_FLOW: + case UDP_V4_FLOW: + fs_size = sizeof(struct ethtool_tcpip4_spec); + break; + default: + continue; + } + + ret = memcmp(&rule->fs.h_u, &fs->h_u, fs_size); + ret |= memcmp(&rule->fs.m_u, &fs->m_u, fs_size); + if (ret == 0) + break; + } + + return ret; +} + static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port, unsigned int port_num, unsigned int queue_num, @@ -728,27 +789,14 @@ out_err: return ret; } -static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port, - struct ethtool_rx_flow_spec *fs) +static int bcm_sf2_cfp_rule_insert(struct dsa_switch *ds, int port, + struct ethtool_rx_flow_spec *fs) { struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); s8 cpu_port = ds->ports[port].cpu_dp->index; __u64 ring_cookie = fs->ring_cookie; unsigned int queue_num, port_num; - int ret = -EINVAL; - - /* Check for unsupported extensions */ - if ((fs->flow_type & FLOW_EXT) && (fs->m_ext.vlan_etype || - fs->m_ext.data[1])) - return -EINVAL; - - if (fs->location != RX_CLS_LOC_ANY && - test_bit(fs->location, priv->cfp.used)) - return -EBUSY; - - if (fs->location != RX_CLS_LOC_ANY && - fs->location > bcm_sf2_cfp_rule_size(priv)) - return -EINVAL; + int ret; /* This rule is a Wake-on-LAN filter and we must specifically * target the CPU port in order for it to be working. @@ -787,12 +835,54 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port, queue_num, fs); break; default: + ret = -EINVAL; break; } return ret; } +static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port, + struct ethtool_rx_flow_spec *fs) +{ + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + struct cfp_rule *rule = NULL; + int ret = -EINVAL; + + /* Check for unsupported extensions */ + if ((fs->flow_type & FLOW_EXT) && (fs->m_ext.vlan_etype || + fs->m_ext.data[1])) + return -EINVAL; + + if (fs->location != RX_CLS_LOC_ANY && + test_bit(fs->location, priv->cfp.used)) + return -EBUSY; + + if (fs->location != RX_CLS_LOC_ANY && + fs->location > bcm_sf2_cfp_rule_size(priv)) + return -EINVAL; + + ret = bcm_sf2_cfp_rule_cmp(priv, port, fs); + if (ret == 0) + return -EEXIST; + + rule = kzalloc(sizeof(*rule), GFP_KERNEL); + if (!rule) + return -ENOMEM; + + ret = bcm_sf2_cfp_rule_insert(ds, port, fs); + if (ret) { + kfree(rule); + return ret; + } + + rule->port = port; + memcpy(&rule->fs, fs, sizeof(*fs)); + list_add_tail(&rule->next, &priv->cfp.rules_list); + + return ret; +} + static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port, u32 loc, u32 *next_loc) { @@ -830,19 +920,12 @@ static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port, return 0; } -static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, - u32 loc) +static int bcm_sf2_cfp_rule_remove(struct bcm_sf2_priv *priv, int port, + u32 loc) { u32 next_loc = 0; int ret; - /* Refuse deleting unused rules, and those that are not unique since - * that could leave IPv6 rules with one of the chained rule in the - * table. - */ - if (!test_bit(loc, priv->cfp.unique) || loc == 0) - return -EINVAL; - ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc); if (ret) return ret; @@ -854,318 +937,54 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, return ret; } -static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow) +static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port, u32 loc) { - unsigned int i; - - for (i = 0; i < sizeof(flow->m_u); i++) - flow->m_u.hdata[i] ^= 0xff; - - flow->m_ext.vlan_etype ^= cpu_to_be16(~0); - flow->m_ext.vlan_tci ^= cpu_to_be16(~0); - flow->m_ext.data[0] ^= cpu_to_be32(~0); - flow->m_ext.data[1] ^= cpu_to_be32(~0); -} - -static int bcm_sf2_cfp_unslice_ipv4(struct bcm_sf2_priv *priv, - struct ethtool_tcpip4_spec *v4_spec, - bool mask) -{ - u32 reg, offset, ipv4; - u16 src_dst_port; - - if (mask) - offset = CORE_CFP_MASK_PORT(3); - else - offset = CORE_CFP_DATA_PORT(3); - - reg = core_readl(priv, offset); - /* src port [15:8] */ - src_dst_port = reg << 8; - - if (mask) - offset = CORE_CFP_MASK_PORT(2); - else - offset = CORE_CFP_DATA_PORT(2); - - reg = core_readl(priv, offset); - /* src port [7:0] */ - src_dst_port |= (reg >> 24); - - v4_spec->pdst = cpu_to_be16(src_dst_port); - v4_spec->psrc = cpu_to_be16((u16)(reg >> 8)); - - /* IPv4 dst [15:8] */ - ipv4 = (reg & 0xff) << 8; - - if (mask) - offset = CORE_CFP_MASK_PORT(1); - else - offset = CORE_CFP_DATA_PORT(1); - - reg = core_readl(priv, offset); - /* IPv4 dst [31:16] */ - ipv4 |= ((reg >> 8) & 0xffff) << 16; - /* IPv4 dst [7:0] */ - ipv4 |= (reg >> 24) & 0xff; - v4_spec->ip4dst = cpu_to_be32(ipv4); - - /* IPv4 src [15:8] */ - ipv4 = (reg & 0xff) << 8; - - if (mask) - offset = CORE_CFP_MASK_PORT(0); - else - offset = CORE_CFP_DATA_PORT(0); - reg = core_readl(priv, offset); + struct cfp_rule *rule; + int ret; - /* Once the TCAM is programmed, the mask reflects the slice number - * being matched, don't bother checking it when reading back the - * mask spec + /* Refuse deleting unused rules, and those that are not unique since + * that could leave IPv6 rules with one of the chained rule in the + * table. */ - if (!mask && !(reg & SLICE_VALID)) + if (!test_bit(loc, priv->cfp.unique) || loc == 0) return -EINVAL; - /* IPv4 src [7:0] */ - ipv4 |= (reg >> 24) & 0xff; - /* IPv4 src [31:16] */ - ipv4 |= ((reg >> 8) & 0xffff) << 16; - v4_spec->ip4src = cpu_to_be32(ipv4); - - return 0; -} - -static int bcm_sf2_cfp_ipv4_rule_get(struct bcm_sf2_priv *priv, int port, - struct ethtool_rx_flow_spec *fs) -{ - struct ethtool_tcpip4_spec *v4_spec = NULL, *v4_m_spec = NULL; - u32 reg; - int ret; - - reg = core_readl(priv, CORE_CFP_DATA_PORT(6)); - - switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) { - case IPPROTO_TCP: - fs->flow_type = TCP_V4_FLOW; - v4_spec = &fs->h_u.tcp_ip4_spec; - v4_m_spec = &fs->m_u.tcp_ip4_spec; - break; - case IPPROTO_UDP: - fs->flow_type = UDP_V4_FLOW; - v4_spec = &fs->h_u.udp_ip4_spec; - v4_m_spec = &fs->m_u.udp_ip4_spec; - break; - default: + rule = bcm_sf2_cfp_rule_find(priv, port, loc); + if (!rule) return -EINVAL; - } - - fs->m_ext.data[0] = cpu_to_be32((reg >> IP_FRAG_SHIFT) & 1); - v4_spec->tos = (reg >> IPTOS_SHIFT) & IPTOS_MASK; - - ret = bcm_sf2_cfp_unslice_ipv4(priv, v4_spec, false); - if (ret) - return ret; - - return bcm_sf2_cfp_unslice_ipv4(priv, v4_m_spec, true); -} - -static int bcm_sf2_cfp_unslice_ipv6(struct bcm_sf2_priv *priv, - __be32 *ip6_addr, __be16 *port, - bool mask) -{ - u32 reg, tmp, offset; - - /* C-Tag [31:24] - * UDF_n_B8 [23:8] (port) - * UDF_n_B7 (upper) [7:0] (addr[15:8]) - */ - if (mask) - offset = CORE_CFP_MASK_PORT(4); - else - offset = CORE_CFP_DATA_PORT(4); - reg = core_readl(priv, offset); - *port = cpu_to_be32(reg) >> 8; - tmp = (u32)(reg & 0xff) << 8; - - /* UDF_n_B7 (lower) [31:24] (addr[7:0]) - * UDF_n_B6 [23:8] (addr[31:16]) - * UDF_n_B5 (upper) [7:0] (addr[47:40]) - */ - if (mask) - offset = CORE_CFP_MASK_PORT(3); - else - offset = CORE_CFP_DATA_PORT(3); - reg = core_readl(priv, offset); - tmp |= (reg >> 24) & 0xff; - tmp |= (u32)((reg >> 8) << 16); - ip6_addr[3] = cpu_to_be32(tmp); - tmp = (u32)(reg & 0xff) << 8; - - /* UDF_n_B5 (lower) [31:24] (addr[39:32]) - * UDF_n_B4 [23:8] (addr[63:48]) - * UDF_n_B3 (upper) [7:0] (addr[79:72]) - */ - if (mask) - offset = CORE_CFP_MASK_PORT(2); - else - offset = CORE_CFP_DATA_PORT(2); - reg = core_readl(priv, offset); - tmp |= (reg >> 24) & 0xff; - tmp |= (u32)((reg >> 8) << 16); - ip6_addr[2] = cpu_to_be32(tmp); - tmp = (u32)(reg & 0xff) << 8; - /* UDF_n_B3 (lower) [31:24] (addr[71:64]) - * UDF_n_B2 [23:8] (addr[95:80]) - * UDF_n_B1 (upper) [7:0] (addr[111:104]) - */ - if (mask) - offset = CORE_CFP_MASK_PORT(1); - else - offset = CORE_CFP_DATA_PORT(1); - reg = core_readl(priv, offset); - tmp |= (reg >> 24) & 0xff; - tmp |= (u32)((reg >> 8) << 16); - ip6_addr[1] = cpu_to_be32(tmp); - tmp = (u32)(reg & 0xff) << 8; + ret = bcm_sf2_cfp_rule_remove(priv, port, loc); - /* UDF_n_B1 (lower) [31:24] (addr[103:96]) - * UDF_n_B0 [23:8] (addr[127:112]) - * Reserved [7:4] - * Slice ID [3:2] - * Slice valid [1:0] - */ - if (mask) - offset = CORE_CFP_MASK_PORT(0); - else - offset = CORE_CFP_DATA_PORT(0); - reg = core_readl(priv, offset); - tmp |= (reg >> 24) & 0xff; - tmp |= (u32)((reg >> 8) << 16); - ip6_addr[0] = cpu_to_be32(tmp); + list_del(&rule->next); + kfree(rule); - if (!mask && !(reg & SLICE_VALID)) - return -EINVAL; - - return 0; + return ret; } -static int bcm_sf2_cfp_ipv6_rule_get(struct bcm_sf2_priv *priv, int port, - struct ethtool_rx_flow_spec *fs, - u32 next_loc) +static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow) { - struct ethtool_tcpip6_spec *v6_spec = NULL, *v6_m_spec = NULL; - u32 reg; - int ret; - - /* UDPv6 and TCPv6 both use ethtool_tcpip6_spec so we are fine - * assuming tcp_ip6_spec here being an union. - */ - v6_spec = &fs->h_u.tcp_ip6_spec; - v6_m_spec = &fs->m_u.tcp_ip6_spec; - - /* Read the second half first */ - ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_spec->ip6dst, &v6_spec->pdst, - false); - if (ret) - return ret; - - ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_m_spec->ip6dst, - &v6_m_spec->pdst, true); - if (ret) - return ret; - - /* Read last to avoid next entry clobbering the results during search - * operations. We would not have the port enabled for this rule, so - * don't bother checking it. - */ - (void)core_readl(priv, CORE_CFP_DATA_PORT(7)); - - /* The slice number is valid, so read the rule we are chained from now - * which is our first half. - */ - bcm_sf2_cfp_rule_addr_set(priv, next_loc); - ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL); - if (ret) - return ret; - - reg = core_readl(priv, CORE_CFP_DATA_PORT(6)); - - switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) { - case IPPROTO_TCP: - fs->flow_type = TCP_V6_FLOW; - break; - case IPPROTO_UDP: - fs->flow_type = UDP_V6_FLOW; - break; - default: - return -EINVAL; - } + unsigned int i; - ret = bcm_sf2_cfp_unslice_ipv6(priv, v6_spec->ip6src, &v6_spec->psrc, - false); - if (ret) - return ret; + for (i = 0; i < sizeof(flow->m_u); i++) + flow->m_u.hdata[i] ^= 0xff; - return bcm_sf2_cfp_unslice_ipv6(priv, v6_m_spec->ip6src, - &v6_m_spec->psrc, true); + flow->m_ext.vlan_etype ^= cpu_to_be16(~0); + flow->m_ext.vlan_tci ^= cpu_to_be16(~0); + flow->m_ext.data[0] ^= cpu_to_be32(~0); + flow->m_ext.data[1] ^= cpu_to_be32(~0); } static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port, struct ethtool_rxnfc *nfc) { - u32 reg, ipv4_or_chain_id; - unsigned int queue_num; - int ret; - - bcm_sf2_cfp_rule_addr_set(priv, nfc->fs.location); - - ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | ACT_POL_RAM); - if (ret) - return ret; - - reg = core_readl(priv, CORE_ACT_POL_DATA0); - - ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL); - if (ret) - return ret; - - /* Extract the destination port */ - nfc->fs.ring_cookie = fls((reg >> DST_MAP_IB_SHIFT) & - DST_MAP_IB_MASK) - 1; - - /* There is no Port 6, so we compensate for that here */ - if (nfc->fs.ring_cookie >= 6) - nfc->fs.ring_cookie++; - nfc->fs.ring_cookie *= SF2_NUM_EGRESS_QUEUES; - - /* Extract the destination queue */ - queue_num = (reg >> NEW_TC_SHIFT) & NEW_TC_MASK; - nfc->fs.ring_cookie += queue_num; - - /* Extract the L3_FRAMING or CHAIN_ID */ - reg = core_readl(priv, CORE_CFP_DATA_PORT(6)); + struct cfp_rule *rule; - /* With IPv6 rules this would contain a non-zero chain ID since - * we reserve entry 0 and it cannot be used. So if we read 0 here - * this means an IPv4 rule. - */ - ipv4_or_chain_id = (reg >> L3_FRAMING_SHIFT) & 0xff; - if (ipv4_or_chain_id == 0) - ret = bcm_sf2_cfp_ipv4_rule_get(priv, port, &nfc->fs); - else - ret = bcm_sf2_cfp_ipv6_rule_get(priv, port, &nfc->fs, - ipv4_or_chain_id); - if (ret) - return ret; - - /* Read last to avoid next entry clobbering the results during search - * operations - */ - reg = core_readl(priv, CORE_CFP_DATA_PORT(7)); - if (!(reg & 1 << port)) + rule = bcm_sf2_cfp_rule_find(priv, port, nfc->fs.location); + if (!rule) return -EINVAL; + memcpy(&nfc->fs, &rule->fs, sizeof(rule->fs)); + bcm_sf2_invert_masks(&nfc->fs); /* Put the TCAM size here */ @@ -1302,3 +1121,51 @@ int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv) return 0; } + +void bcm_sf2_cfp_exit(struct dsa_switch *ds) +{ + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + struct cfp_rule *rule, *n; + + if (list_empty(&priv->cfp.rules_list)) + return; + + list_for_each_entry_safe_reverse(rule, n, &priv->cfp.rules_list, next) + bcm_sf2_cfp_rule_del(priv, rule->port, rule->fs.location); +} + +int bcm_sf2_cfp_resume(struct dsa_switch *ds) +{ + struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds); + struct cfp_rule *rule; + int ret = 0; + u32 reg; + + if (list_empty(&priv->cfp.rules_list)) + return ret; + + reg = core_readl(priv, CORE_CFP_CTL_REG); + reg &= ~CFP_EN_MAP_MASK; + core_writel(priv, reg, CORE_CFP_CTL_REG); + + ret = bcm_sf2_cfp_rst(priv); + if (ret) + return ret; + + list_for_each_entry(rule, &priv->cfp.rules_list, next) { + ret = bcm_sf2_cfp_rule_remove(priv, rule->port, + rule->fs.location); + if (ret) { + dev_err(ds->dev, "failed to remove rule\n"); + return ret; + } + + ret = bcm_sf2_cfp_rule_insert(ds, rule->port, &rule->fs); + if (ret) { + dev_err(ds->dev, "failed to restore rule\n"); + return ret; + } + } + + return ret; +} diff --git a/drivers/net/dsa/microchip/Kconfig b/drivers/net/dsa/microchip/Kconfig index a8b8f59099ce..bea29fde9f3d 100644 --- a/drivers/net/dsa/microchip/Kconfig +++ b/drivers/net/dsa/microchip/Kconfig @@ -1,12 +1,16 @@ -menuconfig MICROCHIP_KSZ - tristate "Microchip KSZ series switch support" +config NET_DSA_MICROCHIP_KSZ_COMMON + tristate + +menuconfig NET_DSA_MICROCHIP_KSZ9477 + tristate "Microchip KSZ9477 series switch support" depends on NET_DSA - select NET_DSA_TAG_KSZ + select NET_DSA_TAG_KSZ9477 + select NET_DSA_MICROCHIP_KSZ_COMMON help - This driver adds support for Microchip KSZ switch chips. + This driver adds support for Microchip KSZ9477 switch chips. -config MICROCHIP_KSZ_SPI_DRIVER - tristate "KSZ series SPI connected switch driver" - depends on MICROCHIP_KSZ && SPI +config NET_DSA_MICROCHIP_KSZ9477_SPI + tristate "KSZ9477 series SPI connected switch driver" + depends on NET_DSA_MICROCHIP_KSZ9477 && SPI help Select to enable support for registering switches configured through SPI. diff --git a/drivers/net/dsa/microchip/Makefile b/drivers/net/dsa/microchip/Makefile index ed335e29fae8..3142c18b8f57 100644 --- a/drivers/net/dsa/microchip/Makefile +++ b/drivers/net/dsa/microchip/Makefile @@ -1,2 +1,3 @@ -obj-$(CONFIG_MICROCHIP_KSZ) += ksz_common.o -obj-$(CONFIG_MICROCHIP_KSZ_SPI_DRIVER) += ksz_spi.o +obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ_COMMON) += ksz_common.o +obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477) += ksz9477.o +obj-$(CONFIG_NET_DSA_MICROCHIP_KSZ9477_SPI) += ksz9477_spi.o diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c new file mode 100644 index 000000000000..89ed059bb576 --- /dev/null +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -0,0 +1,1316 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Microchip KSZ9477 switch driver main logic + * + * Copyright (C) 2017-2018 Microchip Technology Inc. + */ + +#include <linux/delay.h> +#include <linux/export.h> +#include <linux/gpio.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/platform_data/microchip-ksz.h> +#include <linux/phy.h> +#include <linux/etherdevice.h> +#include <linux/if_bridge.h> +#include <net/dsa.h> +#include <net/switchdev.h> + +#include "ksz_priv.h" +#include "ksz_common.h" +#include "ksz9477_reg.h" + +static const struct { + int index; + char string[ETH_GSTRING_LEN]; +} ksz9477_mib_names[TOTAL_SWITCH_COUNTER_NUM] = { + { 0x00, "rx_hi" }, + { 0x01, "rx_undersize" }, + { 0x02, "rx_fragments" }, + { 0x03, "rx_oversize" }, + { 0x04, "rx_jabbers" }, + { 0x05, "rx_symbol_err" }, + { 0x06, "rx_crc_err" }, + { 0x07, "rx_align_err" }, + { 0x08, "rx_mac_ctrl" }, + { 0x09, "rx_pause" }, + { 0x0A, "rx_bcast" }, + { 0x0B, "rx_mcast" }, + { 0x0C, "rx_ucast" }, + { 0x0D, "rx_64_or_less" }, + { 0x0E, "rx_65_127" }, + { 0x0F, "rx_128_255" }, + { 0x10, "rx_256_511" }, + { 0x11, "rx_512_1023" }, + { 0x12, "rx_1024_1522" }, + { 0x13, "rx_1523_2000" }, + { 0x14, "rx_2001" }, + { 0x15, "tx_hi" }, + { 0x16, "tx_late_col" }, + { 0x17, "tx_pause" }, + { 0x18, "tx_bcast" }, + { 0x19, "tx_mcast" }, + { 0x1A, "tx_ucast" }, + { 0x1B, "tx_deferred" }, + { 0x1C, "tx_total_col" }, + { 0x1D, "tx_exc_col" }, + { 0x1E, "tx_single_col" }, + { 0x1F, "tx_mult_col" }, + { 0x80, "rx_total" }, + { 0x81, "tx_total" }, + { 0x82, "rx_discards" }, + { 0x83, "tx_discards" }, +}; + +static void ksz9477_cfg32(struct ksz_device *dev, u32 addr, u32 bits, bool set) +{ + u32 data; + + ksz_read32(dev, addr, &data); + if (set) + data |= bits; + else + data &= ~bits; + ksz_write32(dev, addr, data); +} + +static void ksz9477_port_cfg32(struct ksz_device *dev, int port, int offset, + u32 bits, bool set) +{ + u32 addr; + u32 data; + + addr = PORT_CTRL_ADDR(port, offset); + ksz_read32(dev, addr, &data); + + if (set) + data |= bits; + else + data &= ~bits; + + ksz_write32(dev, addr, data); +} + +static int ksz9477_wait_vlan_ctrl_ready(struct ksz_device *dev, u32 waiton, + int timeout) +{ + u8 data; + + do { + ksz_read8(dev, REG_SW_VLAN_CTRL, &data); + if (!(data & waiton)) + break; + usleep_range(1, 10); + } while (timeout-- > 0); + + if (timeout <= 0) + return -ETIMEDOUT; + + return 0; +} + +static int ksz9477_get_vlan_table(struct ksz_device *dev, u16 vid, + u32 *vlan_table) +{ + int ret; + + mutex_lock(&dev->vlan_mutex); + + ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M); + ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_READ | VLAN_START); + + /* wait to be cleared */ + ret = ksz9477_wait_vlan_ctrl_ready(dev, VLAN_START, 1000); + if (ret < 0) { + dev_dbg(dev->dev, "Failed to read vlan table\n"); + goto exit; + } + + ksz_read32(dev, REG_SW_VLAN_ENTRY__4, &vlan_table[0]); + ksz_read32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, &vlan_table[1]); + ksz_read32(dev, REG_SW_VLAN_ENTRY_PORTS__4, &vlan_table[2]); + + ksz_write8(dev, REG_SW_VLAN_CTRL, 0); + +exit: + mutex_unlock(&dev->vlan_mutex); + + return ret; +} + +static int ksz9477_set_vlan_table(struct ksz_device *dev, u16 vid, + u32 *vlan_table) +{ + int ret; + + mutex_lock(&dev->vlan_mutex); + + ksz_write32(dev, REG_SW_VLAN_ENTRY__4, vlan_table[0]); + ksz_write32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, vlan_table[1]); + ksz_write32(dev, REG_SW_VLAN_ENTRY_PORTS__4, vlan_table[2]); + + ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M); + ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_START | VLAN_WRITE); + + /* wait to be cleared */ + ret = ksz9477_wait_vlan_ctrl_ready(dev, VLAN_START, 1000); + if (ret < 0) { + dev_dbg(dev->dev, "Failed to write vlan table\n"); + goto exit; + } + + ksz_write8(dev, REG_SW_VLAN_CTRL, 0); + + /* update vlan cache table */ + dev->vlan_cache[vid].table[0] = vlan_table[0]; + dev->vlan_cache[vid].table[1] = vlan_table[1]; + dev->vlan_cache[vid].table[2] = vlan_table[2]; + +exit: + mutex_unlock(&dev->vlan_mutex); + + return ret; +} + +static void ksz9477_read_table(struct ksz_device *dev, u32 *table) +{ + ksz_read32(dev, REG_SW_ALU_VAL_A, &table[0]); + ksz_read32(dev, REG_SW_ALU_VAL_B, &table[1]); + ksz_read32(dev, REG_SW_ALU_VAL_C, &table[2]); + ksz_read32(dev, REG_SW_ALU_VAL_D, &table[3]); +} + +static void ksz9477_write_table(struct ksz_device *dev, u32 *table) +{ + ksz_write32(dev, REG_SW_ALU_VAL_A, table[0]); + ksz_write32(dev, REG_SW_ALU_VAL_B, table[1]); + ksz_write32(dev, REG_SW_ALU_VAL_C, table[2]); + ksz_write32(dev, REG_SW_ALU_VAL_D, table[3]); +} + +static int ksz9477_wait_alu_ready(struct ksz_device *dev, u32 waiton, + int timeout) +{ + u32 data; + + do { + ksz_read32(dev, REG_SW_ALU_CTRL__4, &data); + if (!(data & waiton)) + break; + usleep_range(1, 10); + } while (timeout-- > 0); + + if (timeout <= 0) + return -ETIMEDOUT; + + return 0; +} + +static int ksz9477_wait_alu_sta_ready(struct ksz_device *dev, u32 waiton, + int timeout) +{ + u32 data; + + do { + ksz_read32(dev, REG_SW_ALU_STAT_CTRL__4, &data); + if (!(data & waiton)) + break; + usleep_range(1, 10); + } while (timeout-- > 0); + + if (timeout <= 0) + return -ETIMEDOUT; + + return 0; +} + +static int ksz9477_reset_switch(struct ksz_device *dev) +{ + u8 data8; + u16 data16; + u32 data32; + + /* reset switch */ + ksz_cfg(dev, REG_SW_OPERATION, SW_RESET, true); + + /* turn off SPI DO Edge select */ + ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8); + data8 &= ~SPI_AUTO_EDGE_DETECTION; + ksz_write8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, data8); + + /* default configuration */ + ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8); + data8 = SW_AGING_ENABLE | SW_LINK_AUTO_AGING | + SW_SRC_ADDR_FILTER | SW_FLUSH_STP_TABLE | SW_FLUSH_MSTP_TABLE; + ksz_write8(dev, REG_SW_LUE_CTRL_1, data8); + + /* disable interrupts */ + ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK); + ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0x7F); + ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32); + + /* set broadcast storm protection 10% rate */ + ksz_read16(dev, REG_SW_MAC_CTRL_2, &data16); + data16 &= ~BROADCAST_STORM_RATE; + data16 |= (BROADCAST_STORM_VALUE * BROADCAST_STORM_PROT_RATE) / 100; + ksz_write16(dev, REG_SW_MAC_CTRL_2, data16); + + return 0; +} + +static enum dsa_tag_protocol ksz9477_get_tag_protocol(struct dsa_switch *ds, + int port) +{ + return DSA_TAG_PROTO_KSZ9477; +} + +static int ksz9477_phy_read16(struct dsa_switch *ds, int addr, int reg) +{ + struct ksz_device *dev = ds->priv; + u16 val = 0xffff; + + /* No real PHY after this. Simulate the PHY. + * A fixed PHY can be setup in the device tree, but this function is + * still called for that port during initialization. + * For RGMII PHY there is no way to access it so the fixed PHY should + * be used. For SGMII PHY the supporting code will be added later. + */ + if (addr >= dev->phy_port_cnt) { + struct ksz_port *p = &dev->ports[addr]; + + switch (reg) { + case MII_BMCR: + val = 0x1140; + break; + case MII_BMSR: + val = 0x796d; + break; + case MII_PHYSID1: + val = 0x0022; + break; + case MII_PHYSID2: + val = 0x1631; + break; + case MII_ADVERTISE: + val = 0x05e1; + break; + case MII_LPA: + val = 0xc5e1; + break; + case MII_CTRL1000: + val = 0x0700; + break; + case MII_STAT1000: + if (p->phydev.speed == SPEED_1000) + val = 0x3800; + else + val = 0; + break; + } + } else { + ksz_pread16(dev, addr, 0x100 + (reg << 1), &val); + } + + return val; +} + +static int ksz9477_phy_write16(struct dsa_switch *ds, int addr, int reg, + u16 val) +{ + struct ksz_device *dev = ds->priv; + + /* No real PHY after this. */ + if (addr >= dev->phy_port_cnt) + return 0; + ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val); + + return 0; +} + +static void ksz9477_get_strings(struct dsa_switch *ds, int port, + u32 stringset, uint8_t *buf) +{ + int i; + + if (stringset != ETH_SS_STATS) + return; + + for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) { + memcpy(buf + i * ETH_GSTRING_LEN, ksz9477_mib_names[i].string, + ETH_GSTRING_LEN); + } +} + +static void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, + uint64_t *buf) +{ + struct ksz_device *dev = ds->priv; + int i; + u32 data; + int timeout; + + mutex_lock(&dev->stats_mutex); + + for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) { + data = MIB_COUNTER_READ; + data |= ((ksz9477_mib_names[i].index & 0xFF) << + MIB_COUNTER_INDEX_S); + ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, data); + + timeout = 1000; + do { + ksz_pread32(dev, port, REG_PORT_MIB_CTRL_STAT__4, + &data); + usleep_range(1, 10); + if (!(data & MIB_COUNTER_READ)) + break; + } while (timeout-- > 0); + + /* failed to read MIB. get out of loop */ + if (!timeout) { + dev_dbg(dev->dev, "Failed to get MIB\n"); + break; + } + + /* count resets upon read */ + ksz_pread32(dev, port, REG_PORT_MIB_DATA, &data); + + dev->mib_value[i] += (uint64_t)data; + buf[i] = dev->mib_value[i]; + } + + mutex_unlock(&dev->stats_mutex); +} + +static void ksz9477_cfg_port_member(struct ksz_device *dev, int port, + u8 member) +{ + ksz_pwrite32(dev, port, REG_PORT_VLAN_MEMBERSHIP__4, member); + dev->ports[port].member = member; +} + +static void ksz9477_port_stp_state_set(struct dsa_switch *ds, int port, + u8 state) +{ + struct ksz_device *dev = ds->priv; + struct ksz_port *p = &dev->ports[port]; + u8 data; + int member = -1; + + ksz_pread8(dev, port, P_STP_CTRL, &data); + data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE); + + switch (state) { + case BR_STATE_DISABLED: + data |= PORT_LEARN_DISABLE; + if (port != dev->cpu_port) + member = 0; + break; + case BR_STATE_LISTENING: + data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE); + if (port != dev->cpu_port && + p->stp_state == BR_STATE_DISABLED) + member = dev->host_mask | p->vid_member; + break; + case BR_STATE_LEARNING: + data |= PORT_RX_ENABLE; + break; + case BR_STATE_FORWARDING: + data |= (PORT_TX_ENABLE | PORT_RX_ENABLE); + + /* This function is also used internally. */ + if (port == dev->cpu_port) + break; + + member = dev->host_mask | p->vid_member; + + /* Port is a member of a bridge. */ + if (dev->br_member & (1 << port)) { + dev->member |= (1 << port); + member = dev->member; + } + break; + case BR_STATE_BLOCKING: + data |= PORT_LEARN_DISABLE; + if (port != dev->cpu_port && + p->stp_state == BR_STATE_DISABLED) + member = dev->host_mask | p->vid_member; + break; + default: + dev_err(ds->dev, "invalid STP state: %d\n", state); + return; + } + + ksz_pwrite8(dev, port, P_STP_CTRL, data); + p->stp_state = state; + if (data & PORT_RX_ENABLE) + dev->rx_ports |= (1 << port); + else + dev->rx_ports &= ~(1 << port); + if (data & PORT_TX_ENABLE) + dev->tx_ports |= (1 << port); + else + dev->tx_ports &= ~(1 << port); + + /* Port membership may share register with STP state. */ + if (member >= 0 && member != p->member) + ksz9477_cfg_port_member(dev, port, (u8)member); + + /* Check if forwarding needs to be updated. */ + if (state != BR_STATE_FORWARDING) { + if (dev->br_member & (1 << port)) + dev->member &= ~(1 << port); + } + + /* When topology has changed the function ksz_update_port_member + * should be called to modify port forwarding behavior. However + * as the offload_fwd_mark indication cannot be reported here + * the switch forwarding function is not enabled. + */ +} + +static void ksz9477_flush_dyn_mac_table(struct ksz_device *dev, int port) +{ + u8 data; + + ksz_read8(dev, REG_SW_LUE_CTRL_2, &data); + data &= ~(SW_FLUSH_OPTION_M << SW_FLUSH_OPTION_S); + data |= (SW_FLUSH_OPTION_DYN_MAC << SW_FLUSH_OPTION_S); + ksz_write8(dev, REG_SW_LUE_CTRL_2, data); + if (port < dev->mib_port_cnt) { + /* flush individual port */ + ksz_pread8(dev, port, P_STP_CTRL, &data); + if (!(data & PORT_LEARN_DISABLE)) + ksz_pwrite8(dev, port, P_STP_CTRL, + data | PORT_LEARN_DISABLE); + ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_DYN_MAC_TABLE, true); + ksz_pwrite8(dev, port, P_STP_CTRL, data); + } else { + /* flush all */ + ksz_cfg(dev, S_FLUSH_TABLE_CTRL, SW_FLUSH_STP_TABLE, true); + } +} + +static int ksz9477_port_vlan_filtering(struct dsa_switch *ds, int port, + bool flag) +{ + struct ksz_device *dev = ds->priv; + + if (flag) { + ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL, + PORT_VLAN_LOOKUP_VID_0, true); + ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, true); + } else { + ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, false); + ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL, + PORT_VLAN_LOOKUP_VID_0, false); + } + + return 0; +} + +static void ksz9477_port_vlan_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct ksz_device *dev = ds->priv; + u32 vlan_table[3]; + u16 vid; + bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { + if (ksz9477_get_vlan_table(dev, vid, vlan_table)) { + dev_dbg(dev->dev, "Failed to get vlan table\n"); + return; + } + + vlan_table[0] = VLAN_VALID | (vid & VLAN_FID_M); + if (untagged) + vlan_table[1] |= BIT(port); + else + vlan_table[1] &= ~BIT(port); + vlan_table[1] &= ~(BIT(dev->cpu_port)); + + vlan_table[2] |= BIT(port) | BIT(dev->cpu_port); + + if (ksz9477_set_vlan_table(dev, vid, vlan_table)) { + dev_dbg(dev->dev, "Failed to set vlan table\n"); + return; + } + + /* change PVID */ + if (vlan->flags & BRIDGE_VLAN_INFO_PVID) + ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, vid); + } +} + +static int ksz9477_port_vlan_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) +{ + struct ksz_device *dev = ds->priv; + bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + u32 vlan_table[3]; + u16 vid; + u16 pvid; + + ksz_pread16(dev, port, REG_PORT_DEFAULT_VID, &pvid); + pvid = pvid & 0xFFF; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { + if (ksz9477_get_vlan_table(dev, vid, vlan_table)) { + dev_dbg(dev->dev, "Failed to get vlan table\n"); + return -ETIMEDOUT; + } + + vlan_table[2] &= ~BIT(port); + + if (pvid == vid) + pvid = 1; + + if (untagged) + vlan_table[1] &= ~BIT(port); + + if (ksz9477_set_vlan_table(dev, vid, vlan_table)) { + dev_dbg(dev->dev, "Failed to set vlan table\n"); + return -ETIMEDOUT; + } + } + + ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, pvid); + + return 0; +} + +static int ksz9477_port_fdb_add(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid) +{ + struct ksz_device *dev = ds->priv; + u32 alu_table[4]; + u32 data; + int ret = 0; + + mutex_lock(&dev->alu_mutex); + + /* find any entry with mac & vid */ + data = vid << ALU_FID_INDEX_S; + data |= ((addr[0] << 8) | addr[1]); + ksz_write32(dev, REG_SW_ALU_INDEX_0, data); + + data = ((addr[2] << 24) | (addr[3] << 16)); + data |= ((addr[4] << 8) | addr[5]); + ksz_write32(dev, REG_SW_ALU_INDEX_1, data); + + /* start read operation */ + ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START); + + /* wait to be finished */ + ret = ksz9477_wait_alu_ready(dev, ALU_START, 1000); + if (ret < 0) { + dev_dbg(dev->dev, "Failed to read ALU\n"); + goto exit; + } + + /* read ALU entry */ + ksz9477_read_table(dev, alu_table); + + /* update ALU entry */ + alu_table[0] = ALU_V_STATIC_VALID; + alu_table[1] |= BIT(port); + if (vid) + alu_table[1] |= ALU_V_USE_FID; + alu_table[2] = (vid << ALU_V_FID_S); + alu_table[2] |= ((addr[0] << 8) | addr[1]); + alu_table[3] = ((addr[2] << 24) | (addr[3] << 16)); + alu_table[3] |= ((addr[4] << 8) | addr[5]); + + ksz9477_write_table(dev, alu_table); + + ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START); + + /* wait to be finished */ + ret = ksz9477_wait_alu_ready(dev, ALU_START, 1000); + if (ret < 0) + dev_dbg(dev->dev, "Failed to write ALU\n"); + +exit: + mutex_unlock(&dev->alu_mutex); + + return ret; +} + +static int ksz9477_port_fdb_del(struct dsa_switch *ds, int port, + const unsigned char *addr, u16 vid) +{ + struct ksz_device *dev = ds->priv; + u32 alu_table[4]; + u32 data; + int ret = 0; + + mutex_lock(&dev->alu_mutex); + + /* read any entry with mac & vid */ + data = vid << ALU_FID_INDEX_S; + data |= ((addr[0] << 8) | addr[1]); + ksz_write32(dev, REG_SW_ALU_INDEX_0, data); + + data = ((addr[2] << 24) | (addr[3] << 16)); + data |= ((addr[4] << 8) | addr[5]); + ksz_write32(dev, REG_SW_ALU_INDEX_1, data); + + /* start read operation */ + ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START); + + /* wait to be finished */ + ret = ksz9477_wait_alu_ready(dev, ALU_START, 1000); + if (ret < 0) { + dev_dbg(dev->dev, "Failed to read ALU\n"); + goto exit; + } + + ksz_read32(dev, REG_SW_ALU_VAL_A, &alu_table[0]); + if (alu_table[0] & ALU_V_STATIC_VALID) { + ksz_read32(dev, REG_SW_ALU_VAL_B, &alu_table[1]); + ksz_read32(dev, REG_SW_ALU_VAL_C, &alu_table[2]); + ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]); + + /* clear forwarding port */ + alu_table[2] &= ~BIT(port); + + /* if there is no port to forward, clear table */ + if ((alu_table[2] & ALU_V_PORT_MAP) == 0) { + alu_table[0] = 0; + alu_table[1] = 0; + alu_table[2] = 0; + alu_table[3] = 0; + } + } else { + alu_table[0] = 0; + alu_table[1] = 0; + alu_table[2] = 0; + alu_table[3] = 0; + } + + ksz9477_write_table(dev, alu_table); + + ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START); + + /* wait to be finished */ + ret = ksz9477_wait_alu_ready(dev, ALU_START, 1000); + if (ret < 0) + dev_dbg(dev->dev, "Failed to write ALU\n"); + +exit: + mutex_unlock(&dev->alu_mutex); + + return ret; +} + +static void ksz9477_convert_alu(struct alu_struct *alu, u32 *alu_table) +{ + alu->is_static = !!(alu_table[0] & ALU_V_STATIC_VALID); + alu->is_src_filter = !!(alu_table[0] & ALU_V_SRC_FILTER); + alu->is_dst_filter = !!(alu_table[0] & ALU_V_DST_FILTER); + alu->prio_age = (alu_table[0] >> ALU_V_PRIO_AGE_CNT_S) & + ALU_V_PRIO_AGE_CNT_M; + alu->mstp = alu_table[0] & ALU_V_MSTP_M; + + alu->is_override = !!(alu_table[1] & ALU_V_OVERRIDE); + alu->is_use_fid = !!(alu_table[1] & ALU_V_USE_FID); + alu->port_forward = alu_table[1] & ALU_V_PORT_MAP; + + alu->fid = (alu_table[2] >> ALU_V_FID_S) & ALU_V_FID_M; + + alu->mac[0] = (alu_table[2] >> 8) & 0xFF; + alu->mac[1] = alu_table[2] & 0xFF; + alu->mac[2] = (alu_table[3] >> 24) & 0xFF; + alu->mac[3] = (alu_table[3] >> 16) & 0xFF; + alu->mac[4] = (alu_table[3] >> 8) & 0xFF; + alu->mac[5] = alu_table[3] & 0xFF; +} + +static int ksz9477_port_fdb_dump(struct dsa_switch *ds, int port, + dsa_fdb_dump_cb_t *cb, void *data) +{ + struct ksz_device *dev = ds->priv; + int ret = 0; + u32 ksz_data; + u32 alu_table[4]; + struct alu_struct alu; + int timeout; + + mutex_lock(&dev->alu_mutex); + + /* start ALU search */ + ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_START | ALU_SEARCH); + + do { + timeout = 1000; + do { + ksz_read32(dev, REG_SW_ALU_CTRL__4, &ksz_data); + if ((ksz_data & ALU_VALID) || !(ksz_data & ALU_START)) + break; + usleep_range(1, 10); + } while (timeout-- > 0); + + if (!timeout) { + dev_dbg(dev->dev, "Failed to search ALU\n"); + ret = -ETIMEDOUT; + goto exit; + } + + /* read ALU table */ + ksz9477_read_table(dev, alu_table); + + ksz9477_convert_alu(&alu, alu_table); + + if (alu.port_forward & BIT(port)) { + ret = cb(alu.mac, alu.fid, alu.is_static, data); + if (ret) + goto exit; + } + } while (ksz_data & ALU_START); + +exit: + + /* stop ALU search */ + ksz_write32(dev, REG_SW_ALU_CTRL__4, 0); + + mutex_unlock(&dev->alu_mutex); + + return ret; +} + +static void ksz9477_port_mdb_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb) +{ + struct ksz_device *dev = ds->priv; + u32 static_table[4]; + u32 data; + int index; + u32 mac_hi, mac_lo; + + mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]); + mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16)); + mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]); + + mutex_lock(&dev->alu_mutex); + + for (index = 0; index < dev->num_statics; index++) { + /* find empty slot first */ + data = (index << ALU_STAT_INDEX_S) | + ALU_STAT_READ | ALU_STAT_START; + ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data); + + /* wait to be finished */ + if (ksz9477_wait_alu_sta_ready(dev, ALU_STAT_START, 1000) < 0) { + dev_dbg(dev->dev, "Failed to read ALU STATIC\n"); + goto exit; + } + + /* read ALU static table */ + ksz9477_read_table(dev, static_table); + + if (static_table[0] & ALU_V_STATIC_VALID) { + /* check this has same vid & mac address */ + if (((static_table[2] >> ALU_V_FID_S) == mdb->vid) && + ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) && + static_table[3] == mac_lo) { + /* found matching one */ + break; + } + } else { + /* found empty one */ + break; + } + } + + /* no available entry */ + if (index == dev->num_statics) + goto exit; + + /* add entry */ + static_table[0] = ALU_V_STATIC_VALID; + static_table[1] |= BIT(port); + if (mdb->vid) + static_table[1] |= ALU_V_USE_FID; + static_table[2] = (mdb->vid << ALU_V_FID_S); + static_table[2] |= mac_hi; + static_table[3] = mac_lo; + + ksz9477_write_table(dev, static_table); + + data = (index << ALU_STAT_INDEX_S) | ALU_STAT_START; + ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data); + + /* wait to be finished */ + if (ksz9477_wait_alu_sta_ready(dev, ALU_STAT_START, 1000) < 0) + dev_dbg(dev->dev, "Failed to read ALU STATIC\n"); + +exit: + mutex_unlock(&dev->alu_mutex); +} + +static int ksz9477_port_mdb_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb) +{ + struct ksz_device *dev = ds->priv; + u32 static_table[4]; + u32 data; + int index; + int ret = 0; + u32 mac_hi, mac_lo; + + mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]); + mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16)); + mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]); + + mutex_lock(&dev->alu_mutex); + + for (index = 0; index < dev->num_statics; index++) { + /* find empty slot first */ + data = (index << ALU_STAT_INDEX_S) | + ALU_STAT_READ | ALU_STAT_START; + ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data); + + /* wait to be finished */ + ret = ksz9477_wait_alu_sta_ready(dev, ALU_STAT_START, 1000); + if (ret < 0) { + dev_dbg(dev->dev, "Failed to read ALU STATIC\n"); + goto exit; + } + + /* read ALU static table */ + ksz9477_read_table(dev, static_table); + + if (static_table[0] & ALU_V_STATIC_VALID) { + /* check this has same vid & mac address */ + + if (((static_table[2] >> ALU_V_FID_S) == mdb->vid) && + ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) && + static_table[3] == mac_lo) { + /* found matching one */ + break; + } + } + } + + /* no available entry */ + if (index == dev->num_statics) + goto exit; + + /* clear port */ + static_table[1] &= ~BIT(port); + + if ((static_table[1] & ALU_V_PORT_MAP) == 0) { + /* delete entry */ + static_table[0] = 0; + static_table[1] = 0; + static_table[2] = 0; + static_table[3] = 0; + } + + ksz9477_write_table(dev, static_table); + + data = (index << ALU_STAT_INDEX_S) | ALU_STAT_START; + ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data); + + /* wait to be finished */ + ret = ksz9477_wait_alu_sta_ready(dev, ALU_STAT_START, 1000); + if (ret < 0) + dev_dbg(dev->dev, "Failed to read ALU STATIC\n"); + +exit: + mutex_unlock(&dev->alu_mutex); + + return ret; +} + +static int ksz9477_port_mirror_add(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror, + bool ingress) +{ + struct ksz_device *dev = ds->priv; + + if (ingress) + ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true); + else + ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true); + + ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_SNIFFER, false); + + /* configure mirror port */ + ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL, + PORT_MIRROR_SNIFFER, true); + + ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false); + + return 0; +} + +static void ksz9477_port_mirror_del(struct dsa_switch *ds, int port, + struct dsa_mall_mirror_tc_entry *mirror) +{ + struct ksz_device *dev = ds->priv; + u8 data; + + if (mirror->ingress) + ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false); + else + ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false); + + ksz_pread8(dev, port, P_MIRROR_CTRL, &data); + + if (!(data & (PORT_MIRROR_RX | PORT_MIRROR_TX))) + ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL, + PORT_MIRROR_SNIFFER, false); +} + +static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port) +{ + u8 data8; + u8 member; + u16 data16; + struct ksz_port *p = &dev->ports[port]; + + /* enable tag tail for host port */ + if (cpu_port) + ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_TAIL_TAG_ENABLE, + true); + + ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, false); + + /* set back pressure */ + ksz_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE, true); + + /* enable broadcast storm limit */ + ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true); + + /* disable DiffServ priority */ + ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_DIFFSERV_PRIO_ENABLE, false); + + /* replace priority */ + ksz_port_cfg(dev, port, REG_PORT_MRI_MAC_CTRL, PORT_USER_PRIO_CEILING, + false); + ksz9477_port_cfg32(dev, port, REG_PORT_MTI_QUEUE_CTRL_0__4, + MTI_PVID_REPLACE, false); + + /* enable 802.1p priority */ + ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_PRIO_ENABLE, true); + + if (port < dev->phy_port_cnt) { + /* do not force flow control */ + ksz_port_cfg(dev, port, REG_PORT_CTRL_0, + PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL, + false); + + } else { + /* force flow control */ + ksz_port_cfg(dev, port, REG_PORT_CTRL_0, + PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL, + true); + + /* configure MAC to 1G & RGMII mode */ + ksz_pread8(dev, port, REG_PORT_XMII_CTRL_1, &data8); + data8 &= ~PORT_MII_NOT_1GBIT; + data8 &= ~PORT_MII_SEL_M; + switch (dev->interface) { + case PHY_INTERFACE_MODE_MII: + data8 |= PORT_MII_NOT_1GBIT; + data8 |= PORT_MII_SEL; + p->phydev.speed = SPEED_100; + break; + case PHY_INTERFACE_MODE_RMII: + data8 |= PORT_MII_NOT_1GBIT; + data8 |= PORT_RMII_SEL; + p->phydev.speed = SPEED_100; + break; + case PHY_INTERFACE_MODE_GMII: + data8 |= PORT_GMII_SEL; + p->phydev.speed = SPEED_1000; + break; + default: + data8 &= ~PORT_RGMII_ID_IG_ENABLE; + data8 &= ~PORT_RGMII_ID_EG_ENABLE; + if (dev->interface == PHY_INTERFACE_MODE_RGMII_ID || + dev->interface == PHY_INTERFACE_MODE_RGMII_RXID) + data8 |= PORT_RGMII_ID_IG_ENABLE; + if (dev->interface == PHY_INTERFACE_MODE_RGMII_ID || + dev->interface == PHY_INTERFACE_MODE_RGMII_TXID) + data8 |= PORT_RGMII_ID_EG_ENABLE; + data8 |= PORT_RGMII_SEL; + p->phydev.speed = SPEED_1000; + break; + } + ksz_pwrite8(dev, port, REG_PORT_XMII_CTRL_1, data8); + p->phydev.duplex = 1; + } + if (cpu_port) { + member = dev->port_mask; + dev->on_ports = dev->host_mask; + dev->live_ports = dev->host_mask; + } else { + member = dev->host_mask | p->vid_member; + dev->on_ports |= (1 << port); + + /* Link was detected before port is enabled. */ + if (p->phydev.link) + dev->live_ports |= (1 << port); + } + ksz9477_cfg_port_member(dev, port, member); + + /* clear pending interrupts */ + if (port < dev->phy_port_cnt) + ksz_pread16(dev, port, REG_PORT_PHY_INT_ENABLE, &data16); +} + +static void ksz9477_config_cpu_port(struct dsa_switch *ds) +{ + struct ksz_device *dev = ds->priv; + struct ksz_port *p; + int i; + + ds->num_ports = dev->port_cnt; + + for (i = 0; i < dev->port_cnt; i++) { + if (dsa_is_cpu_port(ds, i) && (dev->cpu_ports & (1 << i))) { + dev->cpu_port = i; + dev->host_mask = (1 << dev->cpu_port); + dev->port_mask |= dev->host_mask; + + /* enable cpu port */ + ksz9477_port_setup(dev, i, true); + p = &dev->ports[dev->cpu_port]; + p->vid_member = dev->port_mask; + p->on = 1; + } + } + + dev->member = dev->host_mask; + + for (i = 0; i < dev->mib_port_cnt; i++) { + if (i == dev->cpu_port) + continue; + p = &dev->ports[i]; + + /* Initialize to non-zero so that ksz_cfg_port_member() will + * be called. + */ + p->vid_member = (1 << i); + p->member = dev->port_mask; + ksz9477_port_stp_state_set(ds, i, BR_STATE_DISABLED); + p->on = 1; + if (i < dev->phy_port_cnt) + p->phy = 1; + if (dev->chip_id == 0x00947700 && i == 6) { + p->sgmii = 1; + + /* SGMII PHY detection code is not implemented yet. */ + p->phy = 0; + } + } +} + +static int ksz9477_setup(struct dsa_switch *ds) +{ + struct ksz_device *dev = ds->priv; + int ret = 0; + + dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table), + dev->num_vlans, GFP_KERNEL); + if (!dev->vlan_cache) + return -ENOMEM; + + ret = ksz9477_reset_switch(dev); + if (ret) { + dev_err(ds->dev, "failed to reset switch\n"); + return ret; + } + + /* Required for port partitioning. */ + ksz9477_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY, + true); + + /* accept packet up to 2000bytes */ + ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_LEGAL_PACKET_DISABLE, true); + + ksz9477_config_cpu_port(ds); + + ksz_cfg(dev, REG_SW_MAC_CTRL_1, MULTICAST_STORM_DISABLE, true); + + /* queue based egress rate limit */ + ksz_cfg(dev, REG_SW_MAC_CTRL_5, SW_OUT_RATE_LIMIT_QUEUE_BASED, true); + + /* start switch */ + ksz_cfg(dev, REG_SW_OPERATION, SW_START, true); + + return 0; +} + +static const struct dsa_switch_ops ksz9477_switch_ops = { + .get_tag_protocol = ksz9477_get_tag_protocol, + .setup = ksz9477_setup, + .phy_read = ksz9477_phy_read16, + .phy_write = ksz9477_phy_write16, + .port_enable = ksz_enable_port, + .port_disable = ksz_disable_port, + .get_strings = ksz9477_get_strings, + .get_ethtool_stats = ksz_get_ethtool_stats, + .get_sset_count = ksz_sset_count, + .port_bridge_join = ksz_port_bridge_join, + .port_bridge_leave = ksz_port_bridge_leave, + .port_stp_state_set = ksz9477_port_stp_state_set, + .port_fast_age = ksz_port_fast_age, + .port_vlan_filtering = ksz9477_port_vlan_filtering, + .port_vlan_prepare = ksz_port_vlan_prepare, + .port_vlan_add = ksz9477_port_vlan_add, + .port_vlan_del = ksz9477_port_vlan_del, + .port_fdb_dump = ksz9477_port_fdb_dump, + .port_fdb_add = ksz9477_port_fdb_add, + .port_fdb_del = ksz9477_port_fdb_del, + .port_mdb_prepare = ksz_port_mdb_prepare, + .port_mdb_add = ksz9477_port_mdb_add, + .port_mdb_del = ksz9477_port_mdb_del, + .port_mirror_add = ksz9477_port_mirror_add, + .port_mirror_del = ksz9477_port_mirror_del, +}; + +static u32 ksz9477_get_port_addr(int port, int offset) +{ + return PORT_CTRL_ADDR(port, offset); +} + +static int ksz9477_switch_detect(struct ksz_device *dev) +{ + u8 data8; + u32 id32; + int ret; + + /* turn off SPI DO Edge select */ + ret = ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8); + if (ret) + return ret; + + data8 &= ~SPI_AUTO_EDGE_DETECTION; + ret = ksz_write8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, data8); + if (ret) + return ret; + + /* read chip id */ + ret = ksz_read32(dev, REG_CHIP_ID0__1, &id32); + if (ret) + return ret; + + /* Number of ports can be reduced depending on chip. */ + dev->mib_port_cnt = TOTAL_PORT_NUM; + dev->phy_port_cnt = 5; + + dev->chip_id = id32; + + return 0; +} + +struct ksz_chip_data { + u32 chip_id; + const char *dev_name; + int num_vlans; + int num_alus; + int num_statics; + int cpu_ports; + int port_cnt; +}; + +static const struct ksz_chip_data ksz9477_switch_chips[] = { + { + .chip_id = 0x00947700, + .dev_name = "KSZ9477", + .num_vlans = 4096, + .num_alus = 4096, + .num_statics = 16, + .cpu_ports = 0x7F, /* can be configured as cpu port */ + .port_cnt = 7, /* total physical port count */ + }, + { + .chip_id = 0x00989700, + .dev_name = "KSZ9897", + .num_vlans = 4096, + .num_alus = 4096, + .num_statics = 16, + .cpu_ports = 0x7F, /* can be configured as cpu port */ + .port_cnt = 7, /* total physical port count */ + }, +}; + +static int ksz9477_switch_init(struct ksz_device *dev) +{ + int i; + + dev->ds->ops = &ksz9477_switch_ops; + + for (i = 0; i < ARRAY_SIZE(ksz9477_switch_chips); i++) { + const struct ksz_chip_data *chip = &ksz9477_switch_chips[i]; + + if (dev->chip_id == chip->chip_id) { + dev->name = chip->dev_name; + dev->num_vlans = chip->num_vlans; + dev->num_alus = chip->num_alus; + dev->num_statics = chip->num_statics; + dev->port_cnt = chip->port_cnt; + dev->cpu_ports = chip->cpu_ports; + + break; + } + } + + /* no switch found */ + if (!dev->port_cnt) + return -ENODEV; + + dev->port_mask = (1 << dev->port_cnt) - 1; + + dev->reg_mib_cnt = SWITCH_COUNTER_NUM; + dev->mib_cnt = TOTAL_SWITCH_COUNTER_NUM; + + i = dev->mib_port_cnt; + dev->ports = devm_kzalloc(dev->dev, sizeof(struct ksz_port) * i, + GFP_KERNEL); + if (!dev->ports) + return -ENOMEM; + for (i = 0; i < dev->mib_port_cnt; i++) { + dev->ports[i].mib.counters = + devm_kzalloc(dev->dev, + sizeof(u64) * + (TOTAL_SWITCH_COUNTER_NUM + 1), + GFP_KERNEL); + if (!dev->ports[i].mib.counters) + return -ENOMEM; + } + dev->interface = PHY_INTERFACE_MODE_RGMII_TXID; + + return 0; +} + +static void ksz9477_switch_exit(struct ksz_device *dev) +{ + ksz9477_reset_switch(dev); +} + +static const struct ksz_dev_ops ksz9477_dev_ops = { + .get_port_addr = ksz9477_get_port_addr, + .cfg_port_member = ksz9477_cfg_port_member, + .flush_dyn_mac_table = ksz9477_flush_dyn_mac_table, + .port_setup = ksz9477_port_setup, + .shutdown = ksz9477_reset_switch, + .detect = ksz9477_switch_detect, + .init = ksz9477_switch_init, + .exit = ksz9477_switch_exit, +}; + +int ksz9477_switch_register(struct ksz_device *dev) +{ + return ksz_switch_register(dev, &ksz9477_dev_ops); +} +EXPORT_SYMBOL(ksz9477_switch_register); + +MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>"); +MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch DSA Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/dsa/microchip/ksz_9477_reg.h b/drivers/net/dsa/microchip/ksz9477_reg.h index 6aa6752035a1..2938e892b631 100644 --- a/drivers/net/dsa/microchip/ksz_9477_reg.h +++ b/drivers/net/dsa/microchip/ksz9477_reg.h @@ -1,19 +1,8 @@ -/* - * Microchip KSZ9477 register definitions - * - * Copyright (C) 2017 +/* SPDX-License-Identifier: GPL-2.0 * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. + * Microchip KSZ9477 register definitions * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * Copyright (C) 2017-2018 Microchip Technology Inc. */ #ifndef __KSZ9477_REGS_H diff --git a/drivers/net/dsa/microchip/ksz9477_spi.c b/drivers/net/dsa/microchip/ksz9477_spi.c new file mode 100644 index 000000000000..d757ba151cb1 --- /dev/null +++ b/drivers/net/dsa/microchip/ksz9477_spi.c @@ -0,0 +1,177 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Microchip KSZ9477 series register access through SPI + * + * Copyright (C) 2017-2018 Microchip Technology Inc. + */ + +#include <asm/unaligned.h> + +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/spi/spi.h> + +#include "ksz_priv.h" +#include "ksz_spi.h" + +/* SPI frame opcodes */ +#define KS_SPIOP_RD 3 +#define KS_SPIOP_WR 2 + +#define SPI_ADDR_SHIFT 24 +#define SPI_ADDR_MASK (BIT(SPI_ADDR_SHIFT) - 1) +#define SPI_TURNAROUND_SHIFT 5 + +/* Enough to read all switch port registers. */ +#define SPI_TX_BUF_LEN 0x100 + +static int ksz9477_spi_read_reg(struct spi_device *spi, u32 reg, u8 *val, + unsigned int len) +{ + u32 txbuf; + int ret; + + txbuf = reg & SPI_ADDR_MASK; + txbuf |= KS_SPIOP_RD << SPI_ADDR_SHIFT; + txbuf <<= SPI_TURNAROUND_SHIFT; + txbuf = cpu_to_be32(txbuf); + + ret = spi_write_then_read(spi, &txbuf, 4, val, len); + return ret; +} + +static int ksz9477_spi_write_reg(struct spi_device *spi, u32 reg, u8 *val, + unsigned int len) +{ + u32 *txbuf = (u32 *)val; + + *txbuf = reg & SPI_ADDR_MASK; + *txbuf |= (KS_SPIOP_WR << SPI_ADDR_SHIFT); + *txbuf <<= SPI_TURNAROUND_SHIFT; + *txbuf = cpu_to_be32(*txbuf); + + return spi_write(spi, txbuf, 4 + len); +} + +static int ksz_spi_read(struct ksz_device *dev, u32 reg, u8 *data, + unsigned int len) +{ + struct spi_device *spi = dev->priv; + + return ksz9477_spi_read_reg(spi, reg, data, len); +} + +static int ksz_spi_write(struct ksz_device *dev, u32 reg, void *data, + unsigned int len) +{ + struct spi_device *spi = dev->priv; + + if (len > SPI_TX_BUF_LEN) + len = SPI_TX_BUF_LEN; + memcpy(&dev->txbuf[4], data, len); + return ksz9477_spi_write_reg(spi, reg, dev->txbuf, len); +} + +static int ksz_spi_read24(struct ksz_device *dev, u32 reg, u32 *val) +{ + int ret; + + *val = 0; + ret = ksz_spi_read(dev, reg, (u8 *)val, 3); + if (!ret) { + *val = be32_to_cpu(*val); + /* convert to 24bit */ + *val >>= 8; + } + + return ret; +} + +static int ksz_spi_write24(struct ksz_device *dev, u32 reg, u32 value) +{ + /* make it to big endian 24bit from MSB */ + value <<= 8; + value = cpu_to_be32(value); + return ksz_spi_write(dev, reg, &value, 3); +} + +static const struct ksz_io_ops ksz9477_spi_ops = { + .read8 = ksz_spi_read8, + .read16 = ksz_spi_read16, + .read24 = ksz_spi_read24, + .read32 = ksz_spi_read32, + .write8 = ksz_spi_write8, + .write16 = ksz_spi_write16, + .write24 = ksz_spi_write24, + .write32 = ksz_spi_write32, + .get = ksz_spi_get, + .set = ksz_spi_set, +}; + +static int ksz9477_spi_probe(struct spi_device *spi) +{ + struct ksz_device *dev; + int ret; + + dev = ksz_switch_alloc(&spi->dev, &ksz9477_spi_ops, spi); + if (!dev) + return -ENOMEM; + + if (spi->dev.platform_data) + dev->pdata = spi->dev.platform_data; + + dev->txbuf = devm_kzalloc(dev->dev, 4 + SPI_TX_BUF_LEN, GFP_KERNEL); + + ret = ksz9477_switch_register(dev); + + /* Main DSA driver may not be started yet. */ + if (ret) + return ret; + + spi_set_drvdata(spi, dev); + + return 0; +} + +static int ksz9477_spi_remove(struct spi_device *spi) +{ + struct ksz_device *dev = spi_get_drvdata(spi); + + if (dev) + ksz_switch_remove(dev); + + return 0; +} + +static void ksz9477_spi_shutdown(struct spi_device *spi) +{ + struct ksz_device *dev = spi_get_drvdata(spi); + + if (dev && dev->dev_ops->shutdown) + dev->dev_ops->shutdown(dev); +} + +static const struct of_device_id ksz9477_dt_ids[] = { + { .compatible = "microchip,ksz9477" }, + { .compatible = "microchip,ksz9897" }, + {}, +}; +MODULE_DEVICE_TABLE(of, ksz9477_dt_ids); + +static struct spi_driver ksz9477_spi_driver = { + .driver = { + .name = "ksz9477-switch", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(ksz9477_dt_ids), + }, + .probe = ksz9477_spi_probe, + .remove = ksz9477_spi_remove, + .shutdown = ksz9477_spi_shutdown, +}; + +module_spi_driver(ksz9477_spi_driver); + +MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>"); +MODULE_DESCRIPTION("Microchip KSZ9477 Series Switch SPI access Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 86b6464b4525..3b12e2dcff31 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -1,1145 +1,266 @@ +// SPDX-License-Identifier: GPL-2.0 /* * Microchip switch driver main logic * - * Copyright (C) 2017 - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * Copyright (C) 2017-2018 Microchip Technology Inc. */ #include <linux/delay.h> #include <linux/export.h> #include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_data/microchip-ksz.h> #include <linux/phy.h> #include <linux/etherdevice.h> #include <linux/if_bridge.h> +#include <linux/of_gpio.h> +#include <linux/of_net.h> #include <net/dsa.h> #include <net/switchdev.h> #include "ksz_priv.h" -static const struct { - int index; - char string[ETH_GSTRING_LEN]; -} mib_names[TOTAL_SWITCH_COUNTER_NUM] = { - { 0x00, "rx_hi" }, - { 0x01, "rx_undersize" }, - { 0x02, "rx_fragments" }, - { 0x03, "rx_oversize" }, - { 0x04, "rx_jabbers" }, - { 0x05, "rx_symbol_err" }, - { 0x06, "rx_crc_err" }, - { 0x07, "rx_align_err" }, - { 0x08, "rx_mac_ctrl" }, - { 0x09, "rx_pause" }, - { 0x0A, "rx_bcast" }, - { 0x0B, "rx_mcast" }, - { 0x0C, "rx_ucast" }, - { 0x0D, "rx_64_or_less" }, - { 0x0E, "rx_65_127" }, - { 0x0F, "rx_128_255" }, - { 0x10, "rx_256_511" }, - { 0x11, "rx_512_1023" }, - { 0x12, "rx_1024_1522" }, - { 0x13, "rx_1523_2000" }, - { 0x14, "rx_2001" }, - { 0x15, "tx_hi" }, - { 0x16, "tx_late_col" }, - { 0x17, "tx_pause" }, - { 0x18, "tx_bcast" }, - { 0x19, "tx_mcast" }, - { 0x1A, "tx_ucast" }, - { 0x1B, "tx_deferred" }, - { 0x1C, "tx_total_col" }, - { 0x1D, "tx_exc_col" }, - { 0x1E, "tx_single_col" }, - { 0x1F, "tx_mult_col" }, - { 0x80, "rx_total" }, - { 0x81, "tx_total" }, - { 0x82, "rx_discards" }, - { 0x83, "tx_discards" }, -}; - -static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set) -{ - u8 data; - - ksz_read8(dev, addr, &data); - if (set) - data |= bits; - else - data &= ~bits; - ksz_write8(dev, addr, data); -} - -static void ksz_cfg32(struct ksz_device *dev, u32 addr, u32 bits, bool set) -{ - u32 data; - - ksz_read32(dev, addr, &data); - if (set) - data |= bits; - else - data &= ~bits; - ksz_write32(dev, addr, data); -} - -static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits, - bool set) -{ - u32 addr; - u8 data; - - addr = PORT_CTRL_ADDR(port, offset); - ksz_read8(dev, addr, &data); - - if (set) - data |= bits; - else - data &= ~bits; - - ksz_write8(dev, addr, data); -} - -static void ksz_port_cfg32(struct ksz_device *dev, int port, int offset, - u32 bits, bool set) -{ - u32 addr; - u32 data; - - addr = PORT_CTRL_ADDR(port, offset); - ksz_read32(dev, addr, &data); - - if (set) - data |= bits; - else - data &= ~bits; - - ksz_write32(dev, addr, data); -} - -static int wait_vlan_ctrl_ready(struct ksz_device *dev, u32 waiton, int timeout) -{ - u8 data; - - do { - ksz_read8(dev, REG_SW_VLAN_CTRL, &data); - if (!(data & waiton)) - break; - usleep_range(1, 10); - } while (timeout-- > 0); - - if (timeout <= 0) - return -ETIMEDOUT; - - return 0; -} - -static int get_vlan_table(struct dsa_switch *ds, u16 vid, u32 *vlan_table) -{ - struct ksz_device *dev = ds->priv; - int ret; - - mutex_lock(&dev->vlan_mutex); - - ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M); - ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_READ | VLAN_START); - - /* wait to be cleared */ - ret = wait_vlan_ctrl_ready(dev, VLAN_START, 1000); - if (ret < 0) { - dev_dbg(dev->dev, "Failed to read vlan table\n"); - goto exit; - } - - ksz_read32(dev, REG_SW_VLAN_ENTRY__4, &vlan_table[0]); - ksz_read32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, &vlan_table[1]); - ksz_read32(dev, REG_SW_VLAN_ENTRY_PORTS__4, &vlan_table[2]); - - ksz_write8(dev, REG_SW_VLAN_CTRL, 0); - -exit: - mutex_unlock(&dev->vlan_mutex); - - return ret; -} - -static int set_vlan_table(struct dsa_switch *ds, u16 vid, u32 *vlan_table) -{ - struct ksz_device *dev = ds->priv; - int ret; - - mutex_lock(&dev->vlan_mutex); - - ksz_write32(dev, REG_SW_VLAN_ENTRY__4, vlan_table[0]); - ksz_write32(dev, REG_SW_VLAN_ENTRY_UNTAG__4, vlan_table[1]); - ksz_write32(dev, REG_SW_VLAN_ENTRY_PORTS__4, vlan_table[2]); - - ksz_write16(dev, REG_SW_VLAN_ENTRY_INDEX__2, vid & VLAN_INDEX_M); - ksz_write8(dev, REG_SW_VLAN_CTRL, VLAN_START | VLAN_WRITE); - - /* wait to be cleared */ - ret = wait_vlan_ctrl_ready(dev, VLAN_START, 1000); - if (ret < 0) { - dev_dbg(dev->dev, "Failed to write vlan table\n"); - goto exit; - } - - ksz_write8(dev, REG_SW_VLAN_CTRL, 0); - - /* update vlan cache table */ - dev->vlan_cache[vid].table[0] = vlan_table[0]; - dev->vlan_cache[vid].table[1] = vlan_table[1]; - dev->vlan_cache[vid].table[2] = vlan_table[2]; - -exit: - mutex_unlock(&dev->vlan_mutex); - - return ret; -} - -static void read_table(struct dsa_switch *ds, u32 *table) -{ - struct ksz_device *dev = ds->priv; - - ksz_read32(dev, REG_SW_ALU_VAL_A, &table[0]); - ksz_read32(dev, REG_SW_ALU_VAL_B, &table[1]); - ksz_read32(dev, REG_SW_ALU_VAL_C, &table[2]); - ksz_read32(dev, REG_SW_ALU_VAL_D, &table[3]); -} - -static void write_table(struct dsa_switch *ds, u32 *table) -{ - struct ksz_device *dev = ds->priv; - - ksz_write32(dev, REG_SW_ALU_VAL_A, table[0]); - ksz_write32(dev, REG_SW_ALU_VAL_B, table[1]); - ksz_write32(dev, REG_SW_ALU_VAL_C, table[2]); - ksz_write32(dev, REG_SW_ALU_VAL_D, table[3]); -} - -static int wait_alu_ready(struct ksz_device *dev, u32 waiton, int timeout) -{ - u32 data; - - do { - ksz_read32(dev, REG_SW_ALU_CTRL__4, &data); - if (!(data & waiton)) - break; - usleep_range(1, 10); - } while (timeout-- > 0); - - if (timeout <= 0) - return -ETIMEDOUT; - - return 0; -} - -static int wait_alu_sta_ready(struct ksz_device *dev, u32 waiton, int timeout) -{ - u32 data; - - do { - ksz_read32(dev, REG_SW_ALU_STAT_CTRL__4, &data); - if (!(data & waiton)) - break; - usleep_range(1, 10); - } while (timeout-- > 0); - - if (timeout <= 0) - return -ETIMEDOUT; - - return 0; -} - -static int ksz_reset_switch(struct dsa_switch *ds) +void ksz_update_port_member(struct ksz_device *dev, int port) { - struct ksz_device *dev = ds->priv; - u8 data8; - u16 data16; - u32 data32; - - /* reset switch */ - ksz_cfg(dev, REG_SW_OPERATION, SW_RESET, true); - - /* turn off SPI DO Edge select */ - ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8); - data8 &= ~SPI_AUTO_EDGE_DETECTION; - ksz_write8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, data8); - - /* default configuration */ - ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8); - data8 = SW_AGING_ENABLE | SW_LINK_AUTO_AGING | - SW_SRC_ADDR_FILTER | SW_FLUSH_STP_TABLE | SW_FLUSH_MSTP_TABLE; - ksz_write8(dev, REG_SW_LUE_CTRL_1, data8); - - /* disable interrupts */ - ksz_write32(dev, REG_SW_INT_MASK__4, SWITCH_INT_MASK); - ksz_write32(dev, REG_SW_PORT_INT_MASK__4, 0x7F); - ksz_read32(dev, REG_SW_PORT_INT_STATUS__4, &data32); - - /* set broadcast storm protection 10% rate */ - ksz_read16(dev, REG_SW_MAC_CTRL_2, &data16); - data16 &= ~BROADCAST_STORM_RATE; - data16 |= (BROADCAST_STORM_VALUE * BROADCAST_STORM_PROT_RATE) / 100; - ksz_write16(dev, REG_SW_MAC_CTRL_2, data16); - - return 0; -} - -static void port_setup(struct ksz_device *dev, int port, bool cpu_port) -{ - u8 data8; - u16 data16; - - /* enable tag tail for host port */ - if (cpu_port) - ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_TAIL_TAG_ENABLE, - true); - - ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, false); - - /* set back pressure */ - ksz_port_cfg(dev, port, REG_PORT_MAC_CTRL_1, PORT_BACK_PRESSURE, true); - - /* set flow control */ - ksz_port_cfg(dev, port, REG_PORT_CTRL_0, - PORT_FORCE_TX_FLOW_CTRL | PORT_FORCE_RX_FLOW_CTRL, true); - - /* enable broadcast storm limit */ - ksz_port_cfg(dev, port, P_BCAST_STORM_CTRL, PORT_BROADCAST_STORM, true); - - /* disable DiffServ priority */ - ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_DIFFSERV_PRIO_ENABLE, false); - - /* replace priority */ - ksz_port_cfg(dev, port, REG_PORT_MRI_MAC_CTRL, PORT_USER_PRIO_CEILING, - false); - ksz_port_cfg32(dev, port, REG_PORT_MTI_QUEUE_CTRL_0__4, - MTI_PVID_REPLACE, false); - - /* enable 802.1p priority */ - ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_PRIO_ENABLE, true); - - /* configure MAC to 1G & RGMII mode */ - ksz_pread8(dev, port, REG_PORT_XMII_CTRL_1, &data8); - data8 |= PORT_RGMII_ID_EG_ENABLE; - data8 &= ~PORT_MII_NOT_1GBIT; - data8 &= ~PORT_MII_SEL_M; - data8 |= PORT_RGMII_SEL; - ksz_pwrite8(dev, port, REG_PORT_XMII_CTRL_1, data8); - - /* clear pending interrupts */ - ksz_pread16(dev, port, REG_PORT_PHY_INT_ENABLE, &data16); -} - -static void ksz_config_cpu_port(struct dsa_switch *ds) -{ - struct ksz_device *dev = ds->priv; + struct ksz_port *p; int i; - ds->num_ports = dev->port_cnt; - - for (i = 0; i < ds->num_ports; i++) { - if (dsa_is_cpu_port(ds, i) && (dev->cpu_ports & (1 << i))) { - dev->cpu_port = i; - - /* enable cpu port */ - port_setup(dev, i, true); - } - } -} - -static int ksz_setup(struct dsa_switch *ds) -{ - struct ksz_device *dev = ds->priv; - int ret = 0; - - dev->vlan_cache = devm_kcalloc(dev->dev, sizeof(struct vlan_table), - dev->num_vlans, GFP_KERNEL); - if (!dev->vlan_cache) - return -ENOMEM; - - ret = ksz_reset_switch(ds); - if (ret) { - dev_err(ds->dev, "failed to reset switch\n"); - return ret; + for (i = 0; i < dev->port_cnt; i++) { + if (i == port || i == dev->cpu_port) + continue; + p = &dev->ports[i]; + if (!(dev->member & (1 << i))) + continue; + + /* Port is a member of the bridge and is forwarding. */ + if (p->stp_state == BR_STATE_FORWARDING && + p->member != dev->member) + dev->dev_ops->cfg_port_member(dev, i, dev->member); } - - /* accept packet up to 2000bytes */ - ksz_cfg(dev, REG_SW_MAC_CTRL_1, SW_LEGAL_PACKET_DISABLE, true); - - ksz_config_cpu_port(ds); - - ksz_cfg(dev, REG_SW_MAC_CTRL_1, MULTICAST_STORM_DISABLE, true); - - /* queue based egress rate limit */ - ksz_cfg(dev, REG_SW_MAC_CTRL_5, SW_OUT_RATE_LIMIT_QUEUE_BASED, true); - - /* start switch */ - ksz_cfg(dev, REG_SW_OPERATION, SW_START, true); - - return 0; } +EXPORT_SYMBOL_GPL(ksz_update_port_member); -static enum dsa_tag_protocol ksz_get_tag_protocol(struct dsa_switch *ds, - int port) -{ - return DSA_TAG_PROTO_KSZ; -} - -static int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg) +int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg) { struct ksz_device *dev = ds->priv; - u16 val = 0; + u16 val = 0xffff; - ksz_pread16(dev, addr, 0x100 + (reg << 1), &val); + dev->dev_ops->r_phy(dev, addr, reg, &val); return val; } +EXPORT_SYMBOL_GPL(ksz_phy_read16); -static int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val) +int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val) { struct ksz_device *dev = ds->priv; - ksz_pwrite16(dev, addr, 0x100 + (reg << 1), val); + dev->dev_ops->w_phy(dev, addr, reg, val); return 0; } +EXPORT_SYMBOL_GPL(ksz_phy_write16); -static int ksz_enable_port(struct dsa_switch *ds, int port, - struct phy_device *phy) +int ksz_sset_count(struct dsa_switch *ds, int port, int sset) { struct ksz_device *dev = ds->priv; - /* setup slave port */ - port_setup(dev, port, false); - - return 0; -} - -static void ksz_disable_port(struct dsa_switch *ds, int port, - struct phy_device *phy) -{ - struct ksz_device *dev = ds->priv; - - /* there is no port disable */ - ksz_port_cfg(dev, port, REG_PORT_CTRL_0, PORT_MAC_LOOPBACK, true); -} - -static int ksz_sset_count(struct dsa_switch *ds, int port, int sset) -{ if (sset != ETH_SS_STATS) return 0; - return TOTAL_SWITCH_COUNTER_NUM; + return dev->mib_cnt; } +EXPORT_SYMBOL_GPL(ksz_sset_count); -static void ksz_get_strings(struct dsa_switch *ds, int port, - u32 stringset, uint8_t *buf) -{ - int i; - - if (stringset != ETH_SS_STATS) - return; - - for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) { - memcpy(buf + i * ETH_GSTRING_LEN, mib_names[i].string, - ETH_GSTRING_LEN); - } -} - -static void ksz_get_ethtool_stats(struct dsa_switch *ds, int port, - uint64_t *buf) +int ksz_port_bridge_join(struct dsa_switch *ds, int port, + struct net_device *br) { struct ksz_device *dev = ds->priv; - int i; - u32 data; - int timeout; - - mutex_lock(&dev->stats_mutex); - - for (i = 0; i < TOTAL_SWITCH_COUNTER_NUM; i++) { - data = MIB_COUNTER_READ; - data |= ((mib_names[i].index & 0xFF) << MIB_COUNTER_INDEX_S); - ksz_pwrite32(dev, port, REG_PORT_MIB_CTRL_STAT__4, data); - - timeout = 1000; - do { - ksz_pread32(dev, port, REG_PORT_MIB_CTRL_STAT__4, - &data); - usleep_range(1, 10); - if (!(data & MIB_COUNTER_READ)) - break; - } while (timeout-- > 0); - - /* failed to read MIB. get out of loop */ - if (!timeout) { - dev_dbg(dev->dev, "Failed to get MIB\n"); - break; - } - /* count resets upon read */ - ksz_pread32(dev, port, REG_PORT_MIB_DATA, &data); + dev->br_member |= (1 << port); - dev->mib_value[i] += (uint64_t)data; - buf[i] = dev->mib_value[i]; - } - - mutex_unlock(&dev->stats_mutex); -} - -static void ksz_port_stp_state_set(struct dsa_switch *ds, int port, u8 state) -{ - struct ksz_device *dev = ds->priv; - u8 data; - - ksz_pread8(dev, port, P_STP_CTRL, &data); - data &= ~(PORT_TX_ENABLE | PORT_RX_ENABLE | PORT_LEARN_DISABLE); - - switch (state) { - case BR_STATE_DISABLED: - data |= PORT_LEARN_DISABLE; - break; - case BR_STATE_LISTENING: - data |= (PORT_RX_ENABLE | PORT_LEARN_DISABLE); - break; - case BR_STATE_LEARNING: - data |= PORT_RX_ENABLE; - break; - case BR_STATE_FORWARDING: - data |= (PORT_TX_ENABLE | PORT_RX_ENABLE); - break; - case BR_STATE_BLOCKING: - data |= PORT_LEARN_DISABLE; - break; - default: - dev_err(ds->dev, "invalid STP state: %d\n", state); - return; - } + /* port_stp_state_set() will be called after to put the port in + * appropriate state so there is no need to do anything. + */ - ksz_pwrite8(dev, port, P_STP_CTRL, data); + return 0; } +EXPORT_SYMBOL_GPL(ksz_port_bridge_join); -static void ksz_port_fast_age(struct dsa_switch *ds, int port) +void ksz_port_bridge_leave(struct dsa_switch *ds, int port, + struct net_device *br) { struct ksz_device *dev = ds->priv; - u8 data8; - ksz_read8(dev, REG_SW_LUE_CTRL_1, &data8); - data8 |= SW_FAST_AGING; - ksz_write8(dev, REG_SW_LUE_CTRL_1, data8); + dev->br_member &= ~(1 << port); + dev->member &= ~(1 << port); - data8 &= ~SW_FAST_AGING; - ksz_write8(dev, REG_SW_LUE_CTRL_1, data8); + /* port_stp_state_set() will be called after to put the port in + * forwarding state so there is no need to do anything. + */ } +EXPORT_SYMBOL_GPL(ksz_port_bridge_leave); -static int ksz_port_vlan_filtering(struct dsa_switch *ds, int port, bool flag) +void ksz_port_fast_age(struct dsa_switch *ds, int port) { struct ksz_device *dev = ds->priv; - if (flag) { - ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL, - PORT_VLAN_LOOKUP_VID_0, true); - ksz_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY, true); - ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, true); - } else { - ksz_cfg(dev, REG_SW_LUE_CTRL_0, SW_VLAN_ENABLE, false); - ksz_cfg32(dev, REG_SW_QM_CTRL__4, UNICAST_VLAN_BOUNDARY, false); - ksz_port_cfg(dev, port, REG_PORT_LUE_CTRL, - PORT_VLAN_LOOKUP_VID_0, false); - } - - return 0; + dev->dev_ops->flush_dyn_mac_table(dev, port); } +EXPORT_SYMBOL_GPL(ksz_port_fast_age); -static int ksz_port_vlan_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan) +int ksz_port_vlan_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan) { /* nothing needed */ return 0; } +EXPORT_SYMBOL_GPL(ksz_port_vlan_prepare); -static void ksz_port_vlan_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan) -{ - struct ksz_device *dev = ds->priv; - u32 vlan_table[3]; - u16 vid; - bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; - - for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { - if (get_vlan_table(ds, vid, vlan_table)) { - dev_dbg(dev->dev, "Failed to get vlan table\n"); - return; - } - - vlan_table[0] = VLAN_VALID | (vid & VLAN_FID_M); - if (untagged) - vlan_table[1] |= BIT(port); - else - vlan_table[1] &= ~BIT(port); - vlan_table[1] &= ~(BIT(dev->cpu_port)); - - vlan_table[2] |= BIT(port) | BIT(dev->cpu_port); - - if (set_vlan_table(ds, vid, vlan_table)) { - dev_dbg(dev->dev, "Failed to set vlan table\n"); - return; - } - - /* change PVID */ - if (vlan->flags & BRIDGE_VLAN_INFO_PVID) - ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, vid); - } -} - -static int ksz_port_vlan_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_vlan *vlan) -{ - struct ksz_device *dev = ds->priv; - bool untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; - u32 vlan_table[3]; - u16 vid; - u16 pvid; - - ksz_pread16(dev, port, REG_PORT_DEFAULT_VID, &pvid); - pvid = pvid & 0xFFF; - - for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { - if (get_vlan_table(ds, vid, vlan_table)) { - dev_dbg(dev->dev, "Failed to get vlan table\n"); - return -ETIMEDOUT; - } - - vlan_table[2] &= ~BIT(port); - - if (pvid == vid) - pvid = 1; - - if (untagged) - vlan_table[1] &= ~BIT(port); - - if (set_vlan_table(ds, vid, vlan_table)) { - dev_dbg(dev->dev, "Failed to set vlan table\n"); - return -ETIMEDOUT; - } - } - - ksz_pwrite16(dev, port, REG_PORT_DEFAULT_VID, pvid); - - return 0; -} - -struct alu_struct { - /* entry 1 */ - u8 is_static:1; - u8 is_src_filter:1; - u8 is_dst_filter:1; - u8 prio_age:3; - u32 _reserv_0_1:23; - u8 mstp:3; - /* entry 2 */ - u8 is_override:1; - u8 is_use_fid:1; - u32 _reserv_1_1:23; - u8 port_forward:7; - /* entry 3 & 4*/ - u32 _reserv_2_1:9; - u8 fid:7; - u8 mac[ETH_ALEN]; -}; - -static int ksz_port_fdb_add(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) -{ - struct ksz_device *dev = ds->priv; - u32 alu_table[4]; - u32 data; - int ret = 0; - - mutex_lock(&dev->alu_mutex); - - /* find any entry with mac & vid */ - data = vid << ALU_FID_INDEX_S; - data |= ((addr[0] << 8) | addr[1]); - ksz_write32(dev, REG_SW_ALU_INDEX_0, data); - - data = ((addr[2] << 24) | (addr[3] << 16)); - data |= ((addr[4] << 8) | addr[5]); - ksz_write32(dev, REG_SW_ALU_INDEX_1, data); - - /* start read operation */ - ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START); - - /* wait to be finished */ - ret = wait_alu_ready(dev, ALU_START, 1000); - if (ret < 0) { - dev_dbg(dev->dev, "Failed to read ALU\n"); - goto exit; - } - - /* read ALU entry */ - read_table(ds, alu_table); - - /* update ALU entry */ - alu_table[0] = ALU_V_STATIC_VALID; - alu_table[1] |= BIT(port); - if (vid) - alu_table[1] |= ALU_V_USE_FID; - alu_table[2] = (vid << ALU_V_FID_S); - alu_table[2] |= ((addr[0] << 8) | addr[1]); - alu_table[3] = ((addr[2] << 24) | (addr[3] << 16)); - alu_table[3] |= ((addr[4] << 8) | addr[5]); - - write_table(ds, alu_table); - - ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START); - - /* wait to be finished */ - ret = wait_alu_ready(dev, ALU_START, 1000); - if (ret < 0) - dev_dbg(dev->dev, "Failed to write ALU\n"); - -exit: - mutex_unlock(&dev->alu_mutex); - - return ret; -} - -static int ksz_port_fdb_del(struct dsa_switch *ds, int port, - const unsigned char *addr, u16 vid) +int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb, + void *data) { struct ksz_device *dev = ds->priv; - u32 alu_table[4]; - u32 data; int ret = 0; - - mutex_lock(&dev->alu_mutex); - - /* read any entry with mac & vid */ - data = vid << ALU_FID_INDEX_S; - data |= ((addr[0] << 8) | addr[1]); - ksz_write32(dev, REG_SW_ALU_INDEX_0, data); - - data = ((addr[2] << 24) | (addr[3] << 16)); - data |= ((addr[4] << 8) | addr[5]); - ksz_write32(dev, REG_SW_ALU_INDEX_1, data); - - /* start read operation */ - ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_READ | ALU_START); - - /* wait to be finished */ - ret = wait_alu_ready(dev, ALU_START, 1000); - if (ret < 0) { - dev_dbg(dev->dev, "Failed to read ALU\n"); - goto exit; - } - - ksz_read32(dev, REG_SW_ALU_VAL_A, &alu_table[0]); - if (alu_table[0] & ALU_V_STATIC_VALID) { - ksz_read32(dev, REG_SW_ALU_VAL_B, &alu_table[1]); - ksz_read32(dev, REG_SW_ALU_VAL_C, &alu_table[2]); - ksz_read32(dev, REG_SW_ALU_VAL_D, &alu_table[3]); - - /* clear forwarding port */ - alu_table[2] &= ~BIT(port); - - /* if there is no port to forward, clear table */ - if ((alu_table[2] & ALU_V_PORT_MAP) == 0) { - alu_table[0] = 0; - alu_table[1] = 0; - alu_table[2] = 0; - alu_table[3] = 0; - } - } else { - alu_table[0] = 0; - alu_table[1] = 0; - alu_table[2] = 0; - alu_table[3] = 0; - } - - write_table(ds, alu_table); - - ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_WRITE | ALU_START); - - /* wait to be finished */ - ret = wait_alu_ready(dev, ALU_START, 1000); - if (ret < 0) - dev_dbg(dev->dev, "Failed to write ALU\n"); - -exit: - mutex_unlock(&dev->alu_mutex); - - return ret; -} - -static void convert_alu(struct alu_struct *alu, u32 *alu_table) -{ - alu->is_static = !!(alu_table[0] & ALU_V_STATIC_VALID); - alu->is_src_filter = !!(alu_table[0] & ALU_V_SRC_FILTER); - alu->is_dst_filter = !!(alu_table[0] & ALU_V_DST_FILTER); - alu->prio_age = (alu_table[0] >> ALU_V_PRIO_AGE_CNT_S) & - ALU_V_PRIO_AGE_CNT_M; - alu->mstp = alu_table[0] & ALU_V_MSTP_M; - - alu->is_override = !!(alu_table[1] & ALU_V_OVERRIDE); - alu->is_use_fid = !!(alu_table[1] & ALU_V_USE_FID); - alu->port_forward = alu_table[1] & ALU_V_PORT_MAP; - - alu->fid = (alu_table[2] >> ALU_V_FID_S) & ALU_V_FID_M; - - alu->mac[0] = (alu_table[2] >> 8) & 0xFF; - alu->mac[1] = alu_table[2] & 0xFF; - alu->mac[2] = (alu_table[3] >> 24) & 0xFF; - alu->mac[3] = (alu_table[3] >> 16) & 0xFF; - alu->mac[4] = (alu_table[3] >> 8) & 0xFF; - alu->mac[5] = alu_table[3] & 0xFF; -} - -static int ksz_port_fdb_dump(struct dsa_switch *ds, int port, - dsa_fdb_dump_cb_t *cb, void *data) -{ - struct ksz_device *dev = ds->priv; - int ret = 0; - u32 ksz_data; - u32 alu_table[4]; + u16 i = 0; + u16 entries = 0; + u8 timestamp = 0; + u8 fid; + u8 member; struct alu_struct alu; - int timeout; - - mutex_lock(&dev->alu_mutex); - - /* start ALU search */ - ksz_write32(dev, REG_SW_ALU_CTRL__4, ALU_START | ALU_SEARCH); do { - timeout = 1000; - do { - ksz_read32(dev, REG_SW_ALU_CTRL__4, &ksz_data); - if ((ksz_data & ALU_VALID) || !(ksz_data & ALU_START)) - break; - usleep_range(1, 10); - } while (timeout-- > 0); - - if (!timeout) { - dev_dbg(dev->dev, "Failed to search ALU\n"); - ret = -ETIMEDOUT; - goto exit; - } - - /* read ALU table */ - read_table(ds, alu_table); - - convert_alu(&alu, alu_table); - - if (alu.port_forward & BIT(port)) { + alu.is_static = false; + ret = dev->dev_ops->r_dyn_mac_table(dev, i, alu.mac, &fid, + &member, ×tamp, + &entries); + if (!ret && (member & BIT(port))) { ret = cb(alu.mac, alu.fid, alu.is_static, data); if (ret) - goto exit; + break; } - } while (ksz_data & ALU_START); - -exit: - - /* stop ALU search */ - ksz_write32(dev, REG_SW_ALU_CTRL__4, 0); - - mutex_unlock(&dev->alu_mutex); + i++; + } while (i < entries); + if (i >= entries) + ret = 0; return ret; } +EXPORT_SYMBOL_GPL(ksz_port_fdb_dump); -static int ksz_port_mdb_prepare(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb) +int ksz_port_mdb_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb) { /* nothing to do */ return 0; } +EXPORT_SYMBOL_GPL(ksz_port_mdb_prepare); -static void ksz_port_mdb_add(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb) +void ksz_port_mdb_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb) { struct ksz_device *dev = ds->priv; - u32 static_table[4]; - u32 data; + struct alu_struct alu; int index; - u32 mac_hi, mac_lo; - - mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]); - mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16)); - mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]); - - mutex_lock(&dev->alu_mutex); + int empty = 0; + alu.port_forward = 0; for (index = 0; index < dev->num_statics; index++) { - /* find empty slot first */ - data = (index << ALU_STAT_INDEX_S) | - ALU_STAT_READ | ALU_STAT_START; - ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data); - - /* wait to be finished */ - if (wait_alu_sta_ready(dev, ALU_STAT_START, 1000) < 0) { - dev_dbg(dev->dev, "Failed to read ALU STATIC\n"); - goto exit; - } - - /* read ALU static table */ - read_table(ds, static_table); - - if (static_table[0] & ALU_V_STATIC_VALID) { - /* check this has same vid & mac address */ - if (((static_table[2] >> ALU_V_FID_S) == (mdb->vid)) && - ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) && - (static_table[3] == mac_lo)) { - /* found matching one */ + if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) { + /* Found one already in static MAC table. */ + if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) && + alu.fid == mdb->vid) break; - } - } else { - /* found empty one */ - break; + /* Remember the first empty entry. */ + } else if (!empty) { + empty = index + 1; } } /* no available entry */ - if (index == dev->num_statics) - goto exit; + if (index == dev->num_statics && !empty) + return; /* add entry */ - static_table[0] = ALU_V_STATIC_VALID; - static_table[1] |= BIT(port); - if (mdb->vid) - static_table[1] |= ALU_V_USE_FID; - static_table[2] = (mdb->vid << ALU_V_FID_S); - static_table[2] |= mac_hi; - static_table[3] = mac_lo; - - write_table(ds, static_table); - - data = (index << ALU_STAT_INDEX_S) | ALU_STAT_START; - ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data); - - /* wait to be finished */ - if (wait_alu_sta_ready(dev, ALU_STAT_START, 1000) < 0) - dev_dbg(dev->dev, "Failed to read ALU STATIC\n"); + if (index == dev->num_statics) { + index = empty - 1; + memset(&alu, 0, sizeof(alu)); + memcpy(alu.mac, mdb->addr, ETH_ALEN); + alu.is_static = true; + } + alu.port_forward |= BIT(port); + if (mdb->vid) { + alu.is_use_fid = true; -exit: - mutex_unlock(&dev->alu_mutex); + /* Need a way to map VID to FID. */ + alu.fid = mdb->vid; + } + dev->dev_ops->w_sta_mac_table(dev, index, &alu); } +EXPORT_SYMBOL_GPL(ksz_port_mdb_add); -static int ksz_port_mdb_del(struct dsa_switch *ds, int port, - const struct switchdev_obj_port_mdb *mdb) +int ksz_port_mdb_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb) { struct ksz_device *dev = ds->priv; - u32 static_table[4]; - u32 data; + struct alu_struct alu; int index; int ret = 0; - u32 mac_hi, mac_lo; - - mac_hi = ((mdb->addr[0] << 8) | mdb->addr[1]); - mac_lo = ((mdb->addr[2] << 24) | (mdb->addr[3] << 16)); - mac_lo |= ((mdb->addr[4] << 8) | mdb->addr[5]); - - mutex_lock(&dev->alu_mutex); for (index = 0; index < dev->num_statics; index++) { - /* find empty slot first */ - data = (index << ALU_STAT_INDEX_S) | - ALU_STAT_READ | ALU_STAT_START; - ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data); - - /* wait to be finished */ - ret = wait_alu_sta_ready(dev, ALU_STAT_START, 1000); - if (ret < 0) { - dev_dbg(dev->dev, "Failed to read ALU STATIC\n"); - goto exit; - } - - /* read ALU static table */ - read_table(ds, static_table); - - if (static_table[0] & ALU_V_STATIC_VALID) { - /* check this has same vid & mac address */ - - if (((static_table[2] >> ALU_V_FID_S) == (mdb->vid)) && - ((static_table[2] & ALU_V_MAC_ADDR_HI) == mac_hi) && - (static_table[3] == mac_lo)) { - /* found matching one */ + if (!dev->dev_ops->r_sta_mac_table(dev, index, &alu)) { + /* Found one already in static MAC table. */ + if (!memcmp(alu.mac, mdb->addr, ETH_ALEN) && + alu.fid == mdb->vid) break; - } } } /* no available entry */ - if (index == dev->num_statics) { - ret = -EINVAL; + if (index == dev->num_statics) goto exit; - } /* clear port */ - static_table[1] &= ~BIT(port); - - if ((static_table[1] & ALU_V_PORT_MAP) == 0) { - /* delete entry */ - static_table[0] = 0; - static_table[1] = 0; - static_table[2] = 0; - static_table[3] = 0; - } - - write_table(ds, static_table); - - data = (index << ALU_STAT_INDEX_S) | ALU_STAT_START; - ksz_write32(dev, REG_SW_ALU_STAT_CTRL__4, data); - - /* wait to be finished */ - ret = wait_alu_sta_ready(dev, ALU_STAT_START, 1000); - if (ret < 0) - dev_dbg(dev->dev, "Failed to read ALU STATIC\n"); + alu.port_forward &= ~BIT(port); + if (!alu.port_forward) + alu.is_static = false; + dev->dev_ops->w_sta_mac_table(dev, index, &alu); exit: - mutex_unlock(&dev->alu_mutex); - return ret; } +EXPORT_SYMBOL_GPL(ksz_port_mdb_del); -static int ksz_port_mirror_add(struct dsa_switch *ds, int port, - struct dsa_mall_mirror_tc_entry *mirror, - bool ingress) +int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy) { struct ksz_device *dev = ds->priv; - if (ingress) - ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, true); - else - ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, true); - - ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_SNIFFER, false); - - /* configure mirror port */ - ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL, - PORT_MIRROR_SNIFFER, true); + /* setup slave port */ + dev->dev_ops->port_setup(dev, port, false); - ksz_cfg(dev, S_MIRROR_CTRL, SW_MIRROR_RX_TX, false); + /* port_stp_state_set() will be called after to enable the port so + * there is no need to do anything. + */ return 0; } +EXPORT_SYMBOL_GPL(ksz_enable_port); -static void ksz_port_mirror_del(struct dsa_switch *ds, int port, - struct dsa_mall_mirror_tc_entry *mirror) +void ksz_disable_port(struct dsa_switch *ds, int port, struct phy_device *phy) { struct ksz_device *dev = ds->priv; - u8 data; - - if (mirror->ingress) - ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_RX, false); - else - ksz_port_cfg(dev, port, P_MIRROR_CTRL, PORT_MIRROR_TX, false); - ksz_pread8(dev, port, P_MIRROR_CTRL, &data); + dev->on_ports &= ~(1 << port); + dev->live_ports &= ~(1 << port); - if (!(data & (PORT_MIRROR_RX | PORT_MIRROR_TX))) - ksz_port_cfg(dev, mirror->to_local_port, P_MIRROR_CTRL, - PORT_MIRROR_SNIFFER, false); -} - -static const struct dsa_switch_ops ksz_switch_ops = { - .get_tag_protocol = ksz_get_tag_protocol, - .setup = ksz_setup, - .phy_read = ksz_phy_read16, - .phy_write = ksz_phy_write16, - .port_enable = ksz_enable_port, - .port_disable = ksz_disable_port, - .get_strings = ksz_get_strings, - .get_ethtool_stats = ksz_get_ethtool_stats, - .get_sset_count = ksz_sset_count, - .port_stp_state_set = ksz_port_stp_state_set, - .port_fast_age = ksz_port_fast_age, - .port_vlan_filtering = ksz_port_vlan_filtering, - .port_vlan_prepare = ksz_port_vlan_prepare, - .port_vlan_add = ksz_port_vlan_add, - .port_vlan_del = ksz_port_vlan_del, - .port_fdb_dump = ksz_port_fdb_dump, - .port_fdb_add = ksz_port_fdb_add, - .port_fdb_del = ksz_port_fdb_del, - .port_mdb_prepare = ksz_port_mdb_prepare, - .port_mdb_add = ksz_port_mdb_add, - .port_mdb_del = ksz_port_mdb_del, - .port_mirror_add = ksz_port_mirror_add, - .port_mirror_del = ksz_port_mirror_del, -}; - -struct ksz_chip_data { - u32 chip_id; - const char *dev_name; - int num_vlans; - int num_alus; - int num_statics; - int cpu_ports; - int port_cnt; -}; - -static const struct ksz_chip_data ksz_switch_chips[] = { - { - .chip_id = 0x00947700, - .dev_name = "KSZ9477", - .num_vlans = 4096, - .num_alus = 4096, - .num_statics = 16, - .cpu_ports = 0x7F, /* can be configured as cpu port */ - .port_cnt = 7, /* total physical port count */ - }, - { - .chip_id = 0x00989700, - .dev_name = "KSZ9897", - .num_vlans = 4096, - .num_alus = 4096, - .num_statics = 16, - .cpu_ports = 0x7F, /* can be configured as cpu port */ - .port_cnt = 7, /* total physical port count */ - }, -}; - -static int ksz_switch_init(struct ksz_device *dev) -{ - int i; - - dev->ds->ops = &ksz_switch_ops; - - for (i = 0; i < ARRAY_SIZE(ksz_switch_chips); i++) { - const struct ksz_chip_data *chip = &ksz_switch_chips[i]; - - if (dev->chip_id == chip->chip_id) { - dev->name = chip->dev_name; - dev->num_vlans = chip->num_vlans; - dev->num_alus = chip->num_alus; - dev->num_statics = chip->num_statics; - dev->port_cnt = chip->port_cnt; - dev->cpu_ports = chip->cpu_ports; - - break; - } - } - - /* no switch found */ - if (!dev->port_cnt) - return -ENODEV; - - return 0; + /* port_stp_state_set() will be called after to disable the port so + * there is no need to do anything. + */ } +EXPORT_SYMBOL_GPL(ksz_disable_port); struct ksz_device *ksz_switch_alloc(struct device *base, const struct ksz_io_ops *ops, @@ -1167,59 +288,64 @@ struct ksz_device *ksz_switch_alloc(struct device *base, } EXPORT_SYMBOL(ksz_switch_alloc); -int ksz_switch_detect(struct ksz_device *dev) -{ - u8 data8; - u32 id32; - int ret; - - /* turn off SPI DO Edge select */ - ret = ksz_read8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, &data8); - if (ret) - return ret; - - data8 &= ~SPI_AUTO_EDGE_DETECTION; - ret = ksz_write8(dev, REG_SW_GLOBAL_SERIAL_CTRL_0, data8); - if (ret) - return ret; - - /* read chip id */ - ret = ksz_read32(dev, REG_CHIP_ID0__1, &id32); - if (ret) - return ret; - - dev->chip_id = id32; - - return 0; -} -EXPORT_SYMBOL(ksz_switch_detect); - -int ksz_switch_register(struct ksz_device *dev) +int ksz_switch_register(struct ksz_device *dev, + const struct ksz_dev_ops *ops) { int ret; if (dev->pdata) dev->chip_id = dev->pdata->chip_id; + dev->reset_gpio = devm_gpiod_get_optional(dev->dev, "reset", + GPIOD_OUT_LOW); + if (IS_ERR(dev->reset_gpio)) + return PTR_ERR(dev->reset_gpio); + + if (dev->reset_gpio) { + gpiod_set_value(dev->reset_gpio, 1); + mdelay(10); + gpiod_set_value(dev->reset_gpio, 0); + } + mutex_init(&dev->reg_mutex); mutex_init(&dev->stats_mutex); mutex_init(&dev->alu_mutex); mutex_init(&dev->vlan_mutex); - if (ksz_switch_detect(dev)) + dev->dev_ops = ops; + + if (dev->dev_ops->detect(dev)) return -EINVAL; - ret = ksz_switch_init(dev); + ret = dev->dev_ops->init(dev); if (ret) return ret; - return dsa_register_switch(dev->ds); + dev->interface = PHY_INTERFACE_MODE_MII; + if (dev->dev->of_node) { + ret = of_get_phy_mode(dev->dev->of_node); + if (ret >= 0) + dev->interface = ret; + } + + ret = dsa_register_switch(dev->ds); + if (ret) { + dev->dev_ops->exit(dev); + return ret; + } + + return 0; } EXPORT_SYMBOL(ksz_switch_register); void ksz_switch_remove(struct ksz_device *dev) { + dev->dev_ops->exit(dev); dsa_unregister_switch(dev->ds); + + if (dev->reset_gpio) + gpiod_set_value(dev->reset_gpio, 1); + } EXPORT_SYMBOL(ksz_switch_remove); diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h new file mode 100644 index 000000000000..2dd832de0d52 --- /dev/null +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -0,0 +1,214 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Microchip switch driver common header + * + * Copyright (C) 2017-2018 Microchip Technology Inc. + */ + +#ifndef __KSZ_COMMON_H +#define __KSZ_COMMON_H + +void ksz_update_port_member(struct ksz_device *dev, int port); + +/* Common DSA access functions */ + +int ksz_phy_read16(struct dsa_switch *ds, int addr, int reg); +int ksz_phy_write16(struct dsa_switch *ds, int addr, int reg, u16 val); +int ksz_sset_count(struct dsa_switch *ds, int port, int sset); +int ksz_port_bridge_join(struct dsa_switch *ds, int port, + struct net_device *br); +void ksz_port_bridge_leave(struct dsa_switch *ds, int port, + struct net_device *br); +void ksz_port_fast_age(struct dsa_switch *ds, int port); +int ksz_port_vlan_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_vlan *vlan); +int ksz_port_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb, + void *data); +int ksz_port_mdb_prepare(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb); +void ksz_port_mdb_add(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb); +int ksz_port_mdb_del(struct dsa_switch *ds, int port, + const struct switchdev_obj_port_mdb *mdb); +int ksz_enable_port(struct dsa_switch *ds, int port, struct phy_device *phy); +void ksz_disable_port(struct dsa_switch *ds, int port, struct phy_device *phy); + +/* Common register access functions */ + +static inline int ksz_read8(struct ksz_device *dev, u32 reg, u8 *val) +{ + int ret; + + mutex_lock(&dev->reg_mutex); + ret = dev->ops->read8(dev, reg, val); + mutex_unlock(&dev->reg_mutex); + + return ret; +} + +static inline int ksz_read16(struct ksz_device *dev, u32 reg, u16 *val) +{ + int ret; + + mutex_lock(&dev->reg_mutex); + ret = dev->ops->read16(dev, reg, val); + mutex_unlock(&dev->reg_mutex); + + return ret; +} + +static inline int ksz_read24(struct ksz_device *dev, u32 reg, u32 *val) +{ + int ret; + + mutex_lock(&dev->reg_mutex); + ret = dev->ops->read24(dev, reg, val); + mutex_unlock(&dev->reg_mutex); + + return ret; +} + +static inline int ksz_read32(struct ksz_device *dev, u32 reg, u32 *val) +{ + int ret; + + mutex_lock(&dev->reg_mutex); + ret = dev->ops->read32(dev, reg, val); + mutex_unlock(&dev->reg_mutex); + + return ret; +} + +static inline int ksz_write8(struct ksz_device *dev, u32 reg, u8 value) +{ + int ret; + + mutex_lock(&dev->reg_mutex); + ret = dev->ops->write8(dev, reg, value); + mutex_unlock(&dev->reg_mutex); + + return ret; +} + +static inline int ksz_write16(struct ksz_device *dev, u32 reg, u16 value) +{ + int ret; + + mutex_lock(&dev->reg_mutex); + ret = dev->ops->write16(dev, reg, value); + mutex_unlock(&dev->reg_mutex); + + return ret; +} + +static inline int ksz_write24(struct ksz_device *dev, u32 reg, u32 value) +{ + int ret; + + mutex_lock(&dev->reg_mutex); + ret = dev->ops->write24(dev, reg, value); + mutex_unlock(&dev->reg_mutex); + + return ret; +} + +static inline int ksz_write32(struct ksz_device *dev, u32 reg, u32 value) +{ + int ret; + + mutex_lock(&dev->reg_mutex); + ret = dev->ops->write32(dev, reg, value); + mutex_unlock(&dev->reg_mutex); + + return ret; +} + +static inline int ksz_get(struct ksz_device *dev, u32 reg, void *data, + size_t len) +{ + int ret; + + mutex_lock(&dev->reg_mutex); + ret = dev->ops->get(dev, reg, data, len); + mutex_unlock(&dev->reg_mutex); + + return ret; +} + +static inline int ksz_set(struct ksz_device *dev, u32 reg, void *data, + size_t len) +{ + int ret; + + mutex_lock(&dev->reg_mutex); + ret = dev->ops->set(dev, reg, data, len); + mutex_unlock(&dev->reg_mutex); + + return ret; +} + +static inline void ksz_pread8(struct ksz_device *dev, int port, int offset, + u8 *data) +{ + ksz_read8(dev, dev->dev_ops->get_port_addr(port, offset), data); +} + +static inline void ksz_pread16(struct ksz_device *dev, int port, int offset, + u16 *data) +{ + ksz_read16(dev, dev->dev_ops->get_port_addr(port, offset), data); +} + +static inline void ksz_pread32(struct ksz_device *dev, int port, int offset, + u32 *data) +{ + ksz_read32(dev, dev->dev_ops->get_port_addr(port, offset), data); +} + +static inline void ksz_pwrite8(struct ksz_device *dev, int port, int offset, + u8 data) +{ + ksz_write8(dev, dev->dev_ops->get_port_addr(port, offset), data); +} + +static inline void ksz_pwrite16(struct ksz_device *dev, int port, int offset, + u16 data) +{ + ksz_write16(dev, dev->dev_ops->get_port_addr(port, offset), data); +} + +static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset, + u32 data) +{ + ksz_write32(dev, dev->dev_ops->get_port_addr(port, offset), data); +} + +static void ksz_cfg(struct ksz_device *dev, u32 addr, u8 bits, bool set) +{ + u8 data; + + ksz_read8(dev, addr, &data); + if (set) + data |= bits; + else + data &= ~bits; + ksz_write8(dev, addr, data); +} + +static void ksz_port_cfg(struct ksz_device *dev, int port, int offset, u8 bits, + bool set) +{ + u32 addr; + u8 data; + + addr = dev->dev_ops->get_port_addr(port, offset); + ksz_read8(dev, addr, &data); + + if (set) + data |= bits; + else + data &= ~bits; + + ksz_write8(dev, addr, data); +} + +#endif diff --git a/drivers/net/dsa/microchip/ksz_priv.h b/drivers/net/dsa/microchip/ksz_priv.h index 2a98dbd51456..60b49010904b 100644 --- a/drivers/net/dsa/microchip/ksz_priv.h +++ b/drivers/net/dsa/microchip/ksz_priv.h @@ -1,19 +1,8 @@ -/* - * Microchip KSZ series switch common definitions - * - * Copyright (C) 2017 +/* SPDX-License-Identifier: GPL-2.0 * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. + * Microchip KSZ series switch common definitions * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + * Copyright (C) 2017-2018 Microchip Technology Inc. */ #ifndef __KSZ_PRIV_H @@ -25,7 +14,7 @@ #include <linux/etherdevice.h> #include <net/dsa.h> -#include "ksz_9477_reg.h" +#include "ksz9477_reg.h" struct ksz_io_ops; @@ -33,6 +22,27 @@ struct vlan_table { u32 table[3]; }; +struct ksz_port_mib { + u8 cnt_ptr; + u64 *counters; +}; + +struct ksz_port { + u16 member; + u16 vid_member; + int stp_state; + struct phy_device phydev; + + u32 on:1; /* port is not disabled by hardware */ + u32 phy:1; /* port has a PHY */ + u32 fiber:1; /* port is fiber */ + u32 sgmii:1; /* port is SGMII */ + u32 force:1; + u32 link_just_down:1; /* link just goes down */ + + struct ksz_port_mib mib; +}; + struct ksz_device { struct dsa_switch *ds; struct ksz_platform_data *pdata; @@ -43,11 +53,14 @@ struct ksz_device { struct mutex alu_mutex; /* ALU access */ struct mutex vlan_mutex; /* vlan access */ const struct ksz_io_ops *ops; + const struct ksz_dev_ops *dev_ops; struct device *dev; void *priv; + struct gpio_desc *reset_gpio; /* Optional reset GPIO */ + /* chip specific data */ u32 chip_id; int num_vlans; @@ -55,11 +68,37 @@ struct ksz_device { int num_statics; int cpu_port; /* port connected to CPU */ int cpu_ports; /* port bitmap can be cpu port */ + int phy_port_cnt; int port_cnt; + int reg_mib_cnt; + int mib_cnt; + int mib_port_cnt; + int last_port; /* ports after that not used */ + phy_interface_t interface; + u32 regs_size; struct vlan_table *vlan_cache; u64 mib_value[TOTAL_SWITCH_COUNTER_NUM]; + + u8 *txbuf; + + struct ksz_port *ports; + struct timer_list mib_read_timer; + struct work_struct mib_read; + unsigned long mib_read_interval; + u16 br_member; + u16 member; + u16 live_ports; + u16 on_ports; /* ports enabled by DSA */ + u16 rx_ports; + u16 tx_ports; + u16 mirror_rx; + u16 mirror_tx; + u32 features; /* chip specific features */ + u32 overrides; /* chip functions set by user */ + u16 host_mask; + u16 port_mask; }; struct ksz_io_ops { @@ -71,140 +110,60 @@ struct ksz_io_ops { int (*write16)(struct ksz_device *dev, u32 reg, u16 value); int (*write24)(struct ksz_device *dev, u32 reg, u32 value); int (*write32)(struct ksz_device *dev, u32 reg, u32 value); - int (*phy_read16)(struct ksz_device *dev, int addr, int reg, - u16 *value); - int (*phy_write16)(struct ksz_device *dev, int addr, int reg, - u16 value); + int (*get)(struct ksz_device *dev, u32 reg, void *data, size_t len); + int (*set)(struct ksz_device *dev, u32 reg, void *data, size_t len); +}; + +struct alu_struct { + /* entry 1 */ + u8 is_static:1; + u8 is_src_filter:1; + u8 is_dst_filter:1; + u8 prio_age:3; + u32 _reserv_0_1:23; + u8 mstp:3; + /* entry 2 */ + u8 is_override:1; + u8 is_use_fid:1; + u32 _reserv_1_1:23; + u8 port_forward:7; + /* entry 3 & 4*/ + u32 _reserv_2_1:9; + u8 fid:7; + u8 mac[ETH_ALEN]; +}; + +struct ksz_dev_ops { + u32 (*get_port_addr)(int port, int offset); + void (*cfg_port_member)(struct ksz_device *dev, int port, u8 member); + void (*flush_dyn_mac_table)(struct ksz_device *dev, int port); + void (*port_setup)(struct ksz_device *dev, int port, bool cpu_port); + void (*r_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 *val); + void (*w_phy)(struct ksz_device *dev, u16 phy, u16 reg, u16 val); + int (*r_dyn_mac_table)(struct ksz_device *dev, u16 addr, u8 *mac_addr, + u8 *fid, u8 *src_port, u8 *timestamp, + u16 *entries); + int (*r_sta_mac_table)(struct ksz_device *dev, u16 addr, + struct alu_struct *alu); + void (*w_sta_mac_table)(struct ksz_device *dev, u16 addr, + struct alu_struct *alu); + void (*r_mib_cnt)(struct ksz_device *dev, int port, u16 addr, + u64 *cnt); + void (*r_mib_pkt)(struct ksz_device *dev, int port, u16 addr, + u64 *dropped, u64 *cnt); + void (*port_init_cnt)(struct ksz_device *dev, int port); + int (*shutdown)(struct ksz_device *dev); + int (*detect)(struct ksz_device *dev); + int (*init)(struct ksz_device *dev); + void (*exit)(struct ksz_device *dev); }; struct ksz_device *ksz_switch_alloc(struct device *base, const struct ksz_io_ops *ops, void *priv); -int ksz_switch_detect(struct ksz_device *dev); -int ksz_switch_register(struct ksz_device *dev); +int ksz_switch_register(struct ksz_device *dev, + const struct ksz_dev_ops *ops); void ksz_switch_remove(struct ksz_device *dev); -static inline int ksz_read8(struct ksz_device *dev, u32 reg, u8 *val) -{ - int ret; - - mutex_lock(&dev->reg_mutex); - ret = dev->ops->read8(dev, reg, val); - mutex_unlock(&dev->reg_mutex); - - return ret; -} - -static inline int ksz_read16(struct ksz_device *dev, u32 reg, u16 *val) -{ - int ret; - - mutex_lock(&dev->reg_mutex); - ret = dev->ops->read16(dev, reg, val); - mutex_unlock(&dev->reg_mutex); - - return ret; -} - -static inline int ksz_read24(struct ksz_device *dev, u32 reg, u32 *val) -{ - int ret; - - mutex_lock(&dev->reg_mutex); - ret = dev->ops->read24(dev, reg, val); - mutex_unlock(&dev->reg_mutex); - - return ret; -} - -static inline int ksz_read32(struct ksz_device *dev, u32 reg, u32 *val) -{ - int ret; - - mutex_lock(&dev->reg_mutex); - ret = dev->ops->read32(dev, reg, val); - mutex_unlock(&dev->reg_mutex); - - return ret; -} - -static inline int ksz_write8(struct ksz_device *dev, u32 reg, u8 value) -{ - int ret; - - mutex_lock(&dev->reg_mutex); - ret = dev->ops->write8(dev, reg, value); - mutex_unlock(&dev->reg_mutex); - - return ret; -} - -static inline int ksz_write16(struct ksz_device *dev, u32 reg, u16 value) -{ - int ret; - - mutex_lock(&dev->reg_mutex); - ret = dev->ops->write16(dev, reg, value); - mutex_unlock(&dev->reg_mutex); - - return ret; -} - -static inline int ksz_write24(struct ksz_device *dev, u32 reg, u32 value) -{ - int ret; - - mutex_lock(&dev->reg_mutex); - ret = dev->ops->write24(dev, reg, value); - mutex_unlock(&dev->reg_mutex); - - return ret; -} - -static inline int ksz_write32(struct ksz_device *dev, u32 reg, u32 value) -{ - int ret; - - mutex_lock(&dev->reg_mutex); - ret = dev->ops->write32(dev, reg, value); - mutex_unlock(&dev->reg_mutex); - - return ret; -} - -static inline void ksz_pread8(struct ksz_device *dev, int port, int offset, - u8 *data) -{ - ksz_read8(dev, PORT_CTRL_ADDR(port, offset), data); -} - -static inline void ksz_pread16(struct ksz_device *dev, int port, int offset, - u16 *data) -{ - ksz_read16(dev, PORT_CTRL_ADDR(port, offset), data); -} - -static inline void ksz_pread32(struct ksz_device *dev, int port, int offset, - u32 *data) -{ - ksz_read32(dev, PORT_CTRL_ADDR(port, offset), data); -} - -static inline void ksz_pwrite8(struct ksz_device *dev, int port, int offset, - u8 data) -{ - ksz_write8(dev, PORT_CTRL_ADDR(port, offset), data); -} - -static inline void ksz_pwrite16(struct ksz_device *dev, int port, int offset, - u16 data) -{ - ksz_write16(dev, PORT_CTRL_ADDR(port, offset), data); -} - -static inline void ksz_pwrite32(struct ksz_device *dev, int port, int offset, - u32 data) -{ - ksz_write32(dev, PORT_CTRL_ADDR(port, offset), data); -} +int ksz9477_switch_register(struct ksz_device *dev); #endif diff --git a/drivers/net/dsa/microchip/ksz_spi.c b/drivers/net/dsa/microchip/ksz_spi.c deleted file mode 100644 index 8c1778b42701..000000000000 --- a/drivers/net/dsa/microchip/ksz_spi.c +++ /dev/null @@ -1,217 +0,0 @@ -/* - * Microchip KSZ series register access through SPI - * - * Copyright (C) 2017 - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#include <asm/unaligned.h> - -#include <linux/delay.h> -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/spi/spi.h> - -#include "ksz_priv.h" - -/* SPI frame opcodes */ -#define KS_SPIOP_RD 3 -#define KS_SPIOP_WR 2 - -#define SPI_ADDR_SHIFT 24 -#define SPI_ADDR_MASK (BIT(SPI_ADDR_SHIFT) - 1) -#define SPI_TURNAROUND_SHIFT 5 - -static int ksz_spi_read_reg(struct spi_device *spi, u32 reg, u8 *val, - unsigned int len) -{ - u32 txbuf; - int ret; - - txbuf = reg & SPI_ADDR_MASK; - txbuf |= KS_SPIOP_RD << SPI_ADDR_SHIFT; - txbuf <<= SPI_TURNAROUND_SHIFT; - txbuf = cpu_to_be32(txbuf); - - ret = spi_write_then_read(spi, &txbuf, 4, val, len); - return ret; -} - -static int ksz_spi_read(struct ksz_device *dev, u32 reg, u8 *data, - unsigned int len) -{ - struct spi_device *spi = dev->priv; - - return ksz_spi_read_reg(spi, reg, data, len); -} - -static int ksz_spi_read8(struct ksz_device *dev, u32 reg, u8 *val) -{ - return ksz_spi_read(dev, reg, val, 1); -} - -static int ksz_spi_read16(struct ksz_device *dev, u32 reg, u16 *val) -{ - int ret = ksz_spi_read(dev, reg, (u8 *)val, 2); - - if (!ret) - *val = be16_to_cpu(*val); - - return ret; -} - -static int ksz_spi_read24(struct ksz_device *dev, u32 reg, u32 *val) -{ - int ret; - - *val = 0; - ret = ksz_spi_read(dev, reg, (u8 *)val, 3); - if (!ret) { - *val = be32_to_cpu(*val); - /* convert to 24bit */ - *val >>= 8; - } - - return ret; -} - -static int ksz_spi_read32(struct ksz_device *dev, u32 reg, u32 *val) -{ - int ret = ksz_spi_read(dev, reg, (u8 *)val, 4); - - if (!ret) - *val = be32_to_cpu(*val); - - return ret; -} - -static int ksz_spi_write_reg(struct spi_device *spi, u32 reg, u8 *val, - unsigned int len) -{ - u32 txbuf; - u8 data[12]; - int i; - - txbuf = reg & SPI_ADDR_MASK; - txbuf |= (KS_SPIOP_WR << SPI_ADDR_SHIFT); - txbuf <<= SPI_TURNAROUND_SHIFT; - txbuf = cpu_to_be32(txbuf); - - data[0] = txbuf & 0xFF; - data[1] = (txbuf & 0xFF00) >> 8; - data[2] = (txbuf & 0xFF0000) >> 16; - data[3] = (txbuf & 0xFF000000) >> 24; - for (i = 0; i < len; i++) - data[i + 4] = val[i]; - - return spi_write(spi, &data, 4 + len); -} - -static int ksz_spi_write8(struct ksz_device *dev, u32 reg, u8 value) -{ - struct spi_device *spi = dev->priv; - - return ksz_spi_write_reg(spi, reg, &value, 1); -} - -static int ksz_spi_write16(struct ksz_device *dev, u32 reg, u16 value) -{ - struct spi_device *spi = dev->priv; - - value = cpu_to_be16(value); - return ksz_spi_write_reg(spi, reg, (u8 *)&value, 2); -} - -static int ksz_spi_write24(struct ksz_device *dev, u32 reg, u32 value) -{ - struct spi_device *spi = dev->priv; - - /* make it to big endian 24bit from MSB */ - value <<= 8; - value = cpu_to_be32(value); - return ksz_spi_write_reg(spi, reg, (u8 *)&value, 3); -} - -static int ksz_spi_write32(struct ksz_device *dev, u32 reg, u32 value) -{ - struct spi_device *spi = dev->priv; - - value = cpu_to_be32(value); - return ksz_spi_write_reg(spi, reg, (u8 *)&value, 4); -} - -static const struct ksz_io_ops ksz_spi_ops = { - .read8 = ksz_spi_read8, - .read16 = ksz_spi_read16, - .read24 = ksz_spi_read24, - .read32 = ksz_spi_read32, - .write8 = ksz_spi_write8, - .write16 = ksz_spi_write16, - .write24 = ksz_spi_write24, - .write32 = ksz_spi_write32, -}; - -static int ksz_spi_probe(struct spi_device *spi) -{ - struct ksz_device *dev; - int ret; - - dev = ksz_switch_alloc(&spi->dev, &ksz_spi_ops, spi); - if (!dev) - return -ENOMEM; - - if (spi->dev.platform_data) - dev->pdata = spi->dev.platform_data; - - ret = ksz_switch_register(dev); - if (ret) - return ret; - - spi_set_drvdata(spi, dev); - - return 0; -} - -static int ksz_spi_remove(struct spi_device *spi) -{ - struct ksz_device *dev = spi_get_drvdata(spi); - - if (dev) - ksz_switch_remove(dev); - - return 0; -} - -static const struct of_device_id ksz_dt_ids[] = { - { .compatible = "microchip,ksz9477" }, - { .compatible = "microchip,ksz9897" }, - {}, -}; -MODULE_DEVICE_TABLE(of, ksz_dt_ids); - -static struct spi_driver ksz_spi_driver = { - .driver = { - .name = "ksz9477-switch", - .owner = THIS_MODULE, - .of_match_table = of_match_ptr(ksz_dt_ids), - }, - .probe = ksz_spi_probe, - .remove = ksz_spi_remove, -}; - -module_spi_driver(ksz_spi_driver); - -MODULE_AUTHOR("Woojung Huh <Woojung.Huh@microchip.com>"); -MODULE_DESCRIPTION("Microchip KSZ Series Switch SPI access Driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/net/dsa/microchip/ksz_spi.h b/drivers/net/dsa/microchip/ksz_spi.h new file mode 100644 index 000000000000..427811bd60b3 --- /dev/null +++ b/drivers/net/dsa/microchip/ksz_spi.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 + * Microchip KSZ series SPI access common header + * + * Copyright (C) 2017-2018 Microchip Technology Inc. + * Tristram Ha <Tristram.Ha@microchip.com> + */ + +#ifndef __KSZ_SPI_H +#define __KSZ_SPI_H + +/* Chip dependent SPI access */ +static int ksz_spi_read(struct ksz_device *dev, u32 reg, u8 *data, + unsigned int len); +static int ksz_spi_write(struct ksz_device *dev, u32 reg, void *data, + unsigned int len); + +static int ksz_spi_read8(struct ksz_device *dev, u32 reg, u8 *val) +{ + return ksz_spi_read(dev, reg, val, 1); +} + +static int ksz_spi_read16(struct ksz_device *dev, u32 reg, u16 *val) +{ + int ret = ksz_spi_read(dev, reg, (u8 *)val, 2); + + if (!ret) + *val = be16_to_cpu(*val); + + return ret; +} + +static int ksz_spi_read32(struct ksz_device *dev, u32 reg, u32 *val) +{ + int ret = ksz_spi_read(dev, reg, (u8 *)val, 4); + + if (!ret) + *val = be32_to_cpu(*val); + + return ret; +} + +static int ksz_spi_write8(struct ksz_device *dev, u32 reg, u8 value) +{ + return ksz_spi_write(dev, reg, &value, 1); +} + +static int ksz_spi_write16(struct ksz_device *dev, u32 reg, u16 value) +{ + value = cpu_to_be16(value); + return ksz_spi_write(dev, reg, &value, 2); +} + +static int ksz_spi_write32(struct ksz_device *dev, u32 reg, u32 value) +{ + value = cpu_to_be32(value); + return ksz_spi_write(dev, reg, &value, 4); +} + +static int ksz_spi_get(struct ksz_device *dev, u32 reg, void *data, size_t len) +{ + return ksz_spi_read(dev, reg, data, len); +} + +static int ksz_spi_set(struct ksz_device *dev, u32 reg, void *data, size_t len) +{ + return ksz_spi_write(dev, reg, data, len); +} + +#endif diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index a5de9bffe5be..74547f43b938 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -658,7 +658,8 @@ static void mt7530_adjust_link(struct dsa_switch *ds, int port, if (phydev->asym_pause) rmt_adv |= LPA_PAUSE_ASYM; - lcl_adv = ethtool_adv_to_lcl_adv_t(phydev->advertising); + lcl_adv = linkmode_adv_to_lcl_adv_t( + phydev->advertising); flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); if (flowctrl & FLOW_CTRL_TX) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 24fb6a685039..8a517d8fb9d1 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2524,11 +2524,22 @@ static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg) mutex_unlock(&chip->reg_lock); if (reg == MII_PHYSID2) { - /* Some internal PHYS don't have a model number. Use - * the mv88e6390 family model number instead. - */ - if (!(val & 0x3f0)) - val |= MV88E6XXX_PORT_SWITCH_ID_PROD_6390 >> 4; + /* Some internal PHYs don't have a model number. */ + if (chip->info->family != MV88E6XXX_FAMILY_6165) + /* Then there is the 6165 family. It gets is + * PHYs correct. But it can also have two + * SERDES interfaces in the PHY address + * space. And these don't have a model + * number. But they are not PHYs, so we don't + * want to give them something a PHY driver + * will recognise. + * + * Use the mv88e6390 family model number + * instead, for anything which really could be + * a PHY, + */ + if (!(val & 0x3f0)) + val |= MV88E6XXX_PORT_SWITCH_ID_PROD_6390 >> 4; } return err ? err : val; @@ -3234,6 +3245,7 @@ static const struct mv88e6xxx_ops mv88e6190_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, + .port_set_cmode = mv88e6390_port_set_cmode, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3276,6 +3288,7 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, + .port_set_cmode = mv88e6390x_port_set_cmode, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3291,8 +3304,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = { .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390x_serdes_power, - .serdes_irq_setup = mv88e6390_serdes_irq_setup, - .serdes_irq_free = mv88e6390_serdes_irq_free, + .serdes_irq_setup = mv88e6390x_serdes_irq_setup, + .serdes_irq_free = mv88e6390x_serdes_irq_free, .gpio_ops = &mv88e6352_gpio_ops, .phylink_validate = mv88e6390x_phylink_validate, }; @@ -3318,6 +3331,7 @@ static const struct mv88e6xxx_ops mv88e6191_ops = { .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, + .port_set_cmode = mv88e6390_port_set_cmode, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3405,11 +3419,11 @@ static const struct mv88e6xxx_ops mv88e6290_ops = { .port_set_egress_floods = mv88e6352_port_set_egress_floods, .port_set_ether_type = mv88e6351_port_set_ether_type, .port_pause_limit = mv88e6390_port_pause_limit, - .port_set_cmode = mv88e6390x_port_set_cmode, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, + .port_set_cmode = mv88e6390_port_set_cmode, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3464,6 +3478,7 @@ static const struct mv88e6xxx_ops mv88e6320_ops = { .stats_get_stats = mv88e6320_stats_get_stats, .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, + .watchdog_ops = &mv88e6390_watchdog_ops, .mgmt_rsvd2cpu = mv88e6352_g2_mgmt_rsvd2cpu, .pot_clear = mv88e6xxx_g2_pot_clear, .reset = mv88e6352_g1_reset, @@ -3506,6 +3521,7 @@ static const struct mv88e6xxx_ops mv88e6321_ops = { .stats_get_stats = mv88e6320_stats_get_stats, .set_cpu_port = mv88e6095_g1_set_cpu_port, .set_egress_port = mv88e6095_g1_set_egress_port, + .watchdog_ops = &mv88e6390_watchdog_ops, .reset = mv88e6352_g1_reset, .vtu_getnext = mv88e6185_g1_vtu_getnext, .vtu_loadpurge = mv88e6185_g1_vtu_loadpurge, @@ -3710,11 +3726,11 @@ static const struct mv88e6xxx_ops mv88e6390_ops = { .port_set_jumbo_size = mv88e6165_port_set_jumbo_size, .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, .port_pause_limit = mv88e6390_port_pause_limit, - .port_set_cmode = mv88e6390x_port_set_cmode, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, + .port_set_cmode = mv88e6390_port_set_cmode, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3757,11 +3773,11 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .port_set_jumbo_size = mv88e6165_port_set_jumbo_size, .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting, .port_pause_limit = mv88e6390_port_pause_limit, - .port_set_cmode = mv88e6390x_port_set_cmode, .port_disable_learn_limit = mv88e6xxx_port_disable_learn_limit, .port_disable_pri_override = mv88e6xxx_port_disable_pri_override, .port_link_state = mv88e6352_port_link_state, .port_get_cmode = mv88e6352_port_get_cmode, + .port_set_cmode = mv88e6390x_port_set_cmode, .stats_snapshot = mv88e6390_g1_stats_snapshot, .stats_set_histogram = mv88e6390_g1_stats_set_histogram, .stats_get_sset_count = mv88e6320_stats_get_sset_count, @@ -3777,8 +3793,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = { .vtu_getnext = mv88e6390_g1_vtu_getnext, .vtu_loadpurge = mv88e6390_g1_vtu_loadpurge, .serdes_power = mv88e6390x_serdes_power, - .serdes_irq_setup = mv88e6390_serdes_irq_setup, - .serdes_irq_free = mv88e6390_serdes_irq_free, + .serdes_irq_setup = mv88e6390x_serdes_irq_setup, + .serdes_irq_free = mv88e6390x_serdes_irq_free, .gpio_ops = &mv88e6352_gpio_ops, .avb_ops = &mv88e6390_avb_ops, .ptp_ops = &mv88e6352_ptp_ops, diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c index cd7db60a508b..ebd26b6a93e6 100644 --- a/drivers/net/dsa/mv88e6xxx/port.c +++ b/drivers/net/dsa/mv88e6xxx/port.c @@ -368,12 +368,15 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, u16 reg; int err; - if (mode == PHY_INTERFACE_MODE_NA) - return 0; - if (port != 9 && port != 10) return -EOPNOTSUPP; + /* Default to a slow mode, so freeing up SERDES interfaces for + * other ports which might use them for SFPs. + */ + if (mode == PHY_INTERFACE_MODE_NA) + mode = PHY_INTERFACE_MODE_1000BASEX; + switch (mode) { case PHY_INTERFACE_MODE_1000BASEX: cmode = MV88E6XXX_PORT_STS_CMODE_1000BASE_X; @@ -437,6 +440,21 @@ int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, return 0; } +int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port, + phy_interface_t mode) +{ + switch (mode) { + case PHY_INTERFACE_MODE_XGMII: + case PHY_INTERFACE_MODE_XAUI: + case PHY_INTERFACE_MODE_RXAUI: + return -EINVAL; + default: + break; + } + + return mv88e6390x_port_set_cmode(chip, port, mode); +} + int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode) { int err; diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h index 36904c9bf955..0d81866d0e4a 100644 --- a/drivers/net/dsa/mv88e6xxx/port.h +++ b/drivers/net/dsa/mv88e6xxx/port.h @@ -310,6 +310,8 @@ int mv88e6097_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in, u8 out); int mv88e6390_port_pause_limit(struct mv88e6xxx_chip *chip, int port, u8 in, u8 out); +int mv88e6390_port_set_cmode(struct mv88e6xxx_chip *chip, int port, + phy_interface_t mode); int mv88e6390x_port_set_cmode(struct mv88e6xxx_chip *chip, int port, phy_interface_t mode); int mv88e6185_port_get_cmode(struct mv88e6xxx_chip *chip, int port, u8 *cmode); diff --git a/drivers/net/dsa/mv88e6xxx/serdes.c b/drivers/net/dsa/mv88e6xxx/serdes.c index bb69650ff772..2caa8c8b4b55 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.c +++ b/drivers/net/dsa/mv88e6xxx/serdes.c @@ -619,15 +619,11 @@ out: return ret; } -int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port) +int mv88e6390x_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port) { int lane; int err; - /* Only support ports 9 and 10 at the moment */ - if (port < 9) - return 0; - lane = mv88e6390x_serdes_get_lane(chip, port); if (lane == -ENODEV) @@ -663,11 +659,19 @@ int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port) return mv88e6390_serdes_irq_enable(chip, port, lane); } -void mv88e6390_serdes_irq_free(struct mv88e6xxx_chip *chip, int port) +int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port) +{ + if (port < 9) + return 0; + + return mv88e6390_serdes_irq_setup(chip, port); +} + +void mv88e6390x_serdes_irq_free(struct mv88e6xxx_chip *chip, int port) { int lane = mv88e6390x_serdes_get_lane(chip, port); - if (port < 9) + if (lane == -ENODEV) return; if (lane < 0) @@ -685,6 +689,14 @@ void mv88e6390_serdes_irq_free(struct mv88e6xxx_chip *chip, int port) chip->ports[port].serdes_irq = 0; } +void mv88e6390_serdes_irq_free(struct mv88e6xxx_chip *chip, int port) +{ + if (port < 9) + return; + + mv88e6390x_serdes_irq_free(chip, port); +} + int mv88e6341_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on) { u8 cmode = chip->ports[port].cmode; diff --git a/drivers/net/dsa/mv88e6xxx/serdes.h b/drivers/net/dsa/mv88e6xxx/serdes.h index 7870c5a9ef12..573dce8b1eb4 100644 --- a/drivers/net/dsa/mv88e6xxx/serdes.h +++ b/drivers/net/dsa/mv88e6xxx/serdes.h @@ -77,6 +77,8 @@ int mv88e6390_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on); int mv88e6390x_serdes_power(struct mv88e6xxx_chip *chip, int port, bool on); int mv88e6390_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port); void mv88e6390_serdes_irq_free(struct mv88e6xxx_chip *chip, int port); +int mv88e6390x_serdes_irq_setup(struct mv88e6xxx_chip *chip, int port); +void mv88e6390x_serdes_irq_free(struct mv88e6xxx_chip *chip, int port); int mv88e6352_serdes_get_sset_count(struct mv88e6xxx_chip *chip, int port); int mv88e6352_serdes_get_strings(struct mv88e6xxx_chip *chip, int port, uint8_t *data); diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c index 5bc168314ea2..40f421dbdf57 100644 --- a/drivers/net/ethernet/3com/3c59x.c +++ b/drivers/net/ethernet/3com/3c59x.c @@ -1151,7 +1151,7 @@ static int vortex_probe1(struct device *gendev, void __iomem *ioaddr, int irq, print_info = (vortex_debug > 1); if (print_info) - pr_info("See Documentation/networking/vortex.txt\n"); + pr_info("See Documentation/networking/device_drivers/3com/vortex.txt\n"); pr_info("%s: 3Com %s %s at %p.\n", print_name, @@ -1956,7 +1956,7 @@ vortex_error(struct net_device *dev, int status) dev->name, tx_status); if (tx_status == 0x82) { pr_err("Probably a duplex mismatch. See " - "Documentation/networking/vortex.txt\n"); + "Documentation/networking/device_drivers/3com/vortex.txt\n"); } dump_tx_ring(dev); } diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig index 5c3ef9fc8207..0ac44ef1f7a9 100644 --- a/drivers/net/ethernet/3com/Kconfig +++ b/drivers/net/ethernet/3com/Kconfig @@ -75,8 +75,9 @@ config VORTEX "Hurricane" (3c555/3cSOHO) PCI If you have such a card, say Y here. More specific information is in - <file:Documentation/networking/vortex.txt> and in the comments at - the beginning of <file:drivers/net/ethernet/3com/3c59x.c>. + <file:Documentation/networking/device_drivers/3com/vortex.txt> and + in the comments at the beginning of + <file:drivers/net/ethernet/3com/3c59x.c>. To compile this support as a module, choose M here. diff --git a/drivers/net/ethernet/aeroflex/greth.c b/drivers/net/ethernet/aeroflex/greth.c index 7c9348a26cbb..91fc64c1145e 100644 --- a/drivers/net/ethernet/aeroflex/greth.c +++ b/drivers/net/ethernet/aeroflex/greth.c @@ -1283,7 +1283,7 @@ static int greth_mdio_probe(struct net_device *dev) else phy_set_max_speed(phy, SPEED_100); - phy->advertising = phy->supported; + linkmode_copy(phy->advertising, phy->supported); greth->link = 0; greth->speed = 0; diff --git a/drivers/net/ethernet/amd/au1000_eth.c b/drivers/net/ethernet/amd/au1000_eth.c index 7c1eb304c27e..e833d1b3fe18 100644 --- a/drivers/net/ethernet/amd/au1000_eth.c +++ b/drivers/net/ethernet/amd/au1000_eth.c @@ -940,11 +940,8 @@ static int au1000_open(struct net_device *dev) return retval; } - if (dev->phydev) { - /* cause the PHY state machine to schedule a link state check */ - dev->phydev->state = PHY_CHANGELINK; + if (dev->phydev) phy_start(dev->phydev); - } netif_start_queue(dev); diff --git a/drivers/net/ethernet/amd/sunlance.c b/drivers/net/ethernet/amd/sunlance.c index 9d4899826823..bd6589de93d9 100644 --- a/drivers/net/ethernet/amd/sunlance.c +++ b/drivers/net/ethernet/amd/sunlance.c @@ -1488,9 +1488,9 @@ static int sunlance_sbus_probe(struct platform_device *op) struct device_node *parent_dp = parent->dev.of_node; int err; - if (!strcmp(parent_dp->name, "ledma")) { + if (of_node_name_eq(parent_dp, "ledma")) { err = sparc_lance_probe_one(op, parent, NULL); - } else if (!strcmp(parent_dp->name, "lebuffer")) { + } else if (of_node_name_eq(parent_dp, "lebuffer")) { err = sparc_lance_probe_one(op, NULL, parent); } else err = sparc_lance_probe_one(op, NULL, NULL); diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index 151bdb629e8a..128cd648ba99 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -857,6 +857,7 @@ static void xgbe_phy_free_phy_device(struct xgbe_prv_data *pdata) static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, }; struct xgbe_phy_data *phy_data = pdata->phy_data; unsigned int phy_id = phy_data->phydev->phy_id; @@ -878,9 +879,15 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata) phy_write(phy_data->phydev, 0x04, 0x0d01); phy_write(phy_data->phydev, 0x00, 0x9140); - phy_data->phydev->supported = PHY_10BT_FEATURES | - PHY_100BT_FEATURES | - PHY_1000BT_FEATURES; + linkmode_set_bit_array(phy_10_100_features_array, + ARRAY_SIZE(phy_10_100_features_array), + supported); + linkmode_set_bit_array(phy_gbit_features_array, + ARRAY_SIZE(phy_gbit_features_array), + supported); + + linkmode_copy(phy_data->phydev->supported, supported); + phy_support_asym_pause(phy_data->phydev); netif_dbg(pdata, drv, pdata->netdev, @@ -891,6 +898,7 @@ static bool xgbe_phy_finisar_phy_quirks(struct xgbe_prv_data *pdata) static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, }; struct xgbe_phy_data *phy_data = pdata->phy_data; struct xgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom; unsigned int phy_id = phy_data->phydev->phy_id; @@ -951,9 +959,13 @@ static bool xgbe_phy_belfuse_phy_quirks(struct xgbe_prv_data *pdata) reg = phy_read(phy_data->phydev, 0x00); phy_write(phy_data->phydev, 0x00, reg & ~0x00800); - phy_data->phydev->supported = (PHY_10BT_FEATURES | - PHY_100BT_FEATURES | - PHY_1000BT_FEATURES); + linkmode_set_bit_array(phy_10_100_features_array, + ARRAY_SIZE(phy_10_100_features_array), + supported); + linkmode_set_bit_array(phy_gbit_features_array, + ARRAY_SIZE(phy_gbit_features_array), + supported); + linkmode_copy(phy_data->phydev->supported, supported); phy_support_asym_pause(phy_data->phydev); netif_dbg(pdata, drv, pdata->netdev, @@ -976,7 +988,6 @@ static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata) struct ethtool_link_ksettings *lks = &pdata->phy.lks; struct xgbe_phy_data *phy_data = pdata->phy_data; struct phy_device *phydev; - u32 advertising; int ret; /* If we already have a PHY, just return */ @@ -1036,9 +1047,8 @@ static int xgbe_phy_find_phy_device(struct xgbe_prv_data *pdata) xgbe_phy_external_phy_quirks(pdata); - ethtool_convert_link_mode_to_legacy_u32(&advertising, - lks->link_modes.advertising); - phydev->advertising &= advertising; + linkmode_and(phydev->advertising, phydev->advertising, + lks->link_modes.advertising); phy_start_aneg(phy_data->phydev); @@ -1497,7 +1507,7 @@ static void xgbe_phy_phydev_flowctrl(struct xgbe_prv_data *pdata) if (!phy_data->phydev) return; - lcl_adv = ethtool_adv_to_lcl_adv_t(phy_data->phydev->advertising); + lcl_adv = linkmode_adv_to_lcl_adv_t(phy_data->phydev->advertising); if (phy_data->phydev->pause) { XGBE_SET_LP_ADV(lks, Pause); @@ -1815,7 +1825,6 @@ static int xgbe_phy_an_config(struct xgbe_prv_data *pdata) { struct ethtool_link_ksettings *lks = &pdata->phy.lks; struct xgbe_phy_data *phy_data = pdata->phy_data; - u32 advertising; int ret; ret = xgbe_phy_find_phy_device(pdata); @@ -1825,12 +1834,10 @@ static int xgbe_phy_an_config(struct xgbe_prv_data *pdata) if (!phy_data->phydev) return 0; - ethtool_convert_link_mode_to_legacy_u32(&advertising, - lks->link_modes.advertising); - phy_data->phydev->autoneg = pdata->phy.autoneg; - phy_data->phydev->advertising = phy_data->phydev->supported & - advertising; + linkmode_and(phy_data->phydev->advertising, + phy_data->phydev->supported, + lks->link_modes.advertising); if (pdata->phy.autoneg != AUTONEG_ENABLE) { phy_data->phydev->speed = pdata->phy.speed; diff --git a/drivers/net/ethernet/apm/xgene-v2/mdio.c b/drivers/net/ethernet/apm/xgene-v2/mdio.c index f5fe3bb2e59d..53529cd85162 100644 --- a/drivers/net/ethernet/apm/xgene-v2/mdio.c +++ b/drivers/net/ethernet/apm/xgene-v2/mdio.c @@ -109,6 +109,7 @@ void xge_mdio_remove(struct net_device *ndev) int xge_mdio_config(struct net_device *ndev) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; struct xge_pdata *pdata = netdev_priv(ndev); struct device *dev = &pdata->pdev->dev; struct mii_bus *mdio_bus; @@ -148,16 +149,17 @@ int xge_mdio_config(struct net_device *ndev) goto err; } - phydev->supported &= ~(SUPPORTED_10baseT_Half | - SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_1000baseT_Half | - SUPPORTED_AUI | - SUPPORTED_MII | - SUPPORTED_FIBRE | - SUPPORTED_BNC); - phydev->advertising = phydev->supported; + linkmode_set_bit_array(phy_10_100_features_array, + ARRAY_SIZE(phy_10_100_features_array), + mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_AUI_BIT, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_BNC_BIT, mask); + + linkmode_andnot(phydev->supported, phydev->supported, mask); + linkmode_copy(phydev->advertising, phydev->supported); pdata->phy_speed = SPEED_UNKNOWN; return 0; diff --git a/drivers/net/ethernet/aquantia/atlantic/Makefile b/drivers/net/ethernet/aquantia/atlantic/Makefile index 686f6d8c9e79..4556630ee286 100644 --- a/drivers/net/ethernet/aquantia/atlantic/Makefile +++ b/drivers/net/ethernet/aquantia/atlantic/Makefile @@ -36,6 +36,7 @@ atlantic-objs := aq_main.o \ aq_ring.o \ aq_hw_utils.o \ aq_ethtool.o \ + aq_filters.o \ hw_atl/hw_atl_a0.o \ hw_atl/hw_atl_b0.o \ hw_atl/hw_atl_utils.o \ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h index 91eb8910b1c9..3944ce7f0870 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h @@ -12,7 +12,7 @@ #ifndef AQ_CFG_H #define AQ_CFG_H -#define AQ_CFG_VECS_DEF 4U +#define AQ_CFG_VECS_DEF 8U #define AQ_CFG_TCS_DEF 1U #define AQ_CFG_TXDS_DEF 4096U @@ -42,8 +42,8 @@ #define AQ_CFG_IS_LRO_DEF 1U /* RSS */ -#define AQ_CFG_RSS_INDIRECTION_TABLE_MAX 128U -#define AQ_CFG_RSS_HASHKEY_SIZE 320U +#define AQ_CFG_RSS_INDIRECTION_TABLE_MAX 64U +#define AQ_CFG_RSS_HASHKEY_SIZE 40U #define AQ_CFG_IS_RSS_DEF 1U #define AQ_CFG_NUM_RSS_QUEUES_DEF AQ_CFG_VECS_DEF diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h index becb578211ed..6b6d1724676e 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_common.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h @@ -14,7 +14,7 @@ #include <linux/etherdevice.h> #include <linux/pci.h> - +#include <linux/if_vlan.h> #include "ver.h" #include "aq_cfg.h" #include "aq_utils.h" diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c index 99ef1daaa4d8..38e87eed76b9 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c @@ -12,6 +12,7 @@ #include "aq_ethtool.h" #include "aq_nic.h" #include "aq_vec.h" +#include "aq_filters.h" static void aq_ethtool_get_regs(struct net_device *ndev, struct ethtool_regs *regs, void *p) @@ -201,6 +202,41 @@ static int aq_ethtool_get_rss(struct net_device *ndev, u32 *indir, u8 *key, return 0; } +static int aq_ethtool_set_rss(struct net_device *netdev, const u32 *indir, + const u8 *key, const u8 hfunc) +{ + struct aq_nic_s *aq_nic = netdev_priv(netdev); + struct aq_nic_cfg_s *cfg; + unsigned int i = 0U; + u32 rss_entries; + int err = 0; + + cfg = aq_nic_get_cfg(aq_nic); + rss_entries = cfg->aq_rss.indirection_table_size; + + /* We do not allow change in unsupported parameters */ + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) + return -EOPNOTSUPP; + /* Fill out the redirection table */ + if (indir) + for (i = 0; i < rss_entries; i++) + cfg->aq_rss.indirection_table[i] = indir[i]; + + /* Fill out the rss hash key */ + if (key) { + memcpy(cfg->aq_rss.hash_secret_key, key, + sizeof(cfg->aq_rss.hash_secret_key)); + err = aq_nic->aq_hw_ops->hw_rss_hash_set(aq_nic->aq_hw, + &cfg->aq_rss); + if (err) + return err; + } + + err = aq_nic->aq_hw_ops->hw_rss_set(aq_nic->aq_hw, &cfg->aq_rss); + + return err; +} + static int aq_ethtool_get_rxnfc(struct net_device *ndev, struct ethtool_rxnfc *cmd, u32 *rule_locs) @@ -213,7 +249,36 @@ static int aq_ethtool_get_rxnfc(struct net_device *ndev, case ETHTOOL_GRXRINGS: cmd->data = cfg->vecs; break; + case ETHTOOL_GRXCLSRLCNT: + cmd->rule_cnt = aq_get_rxnfc_count_all_rules(aq_nic); + break; + case ETHTOOL_GRXCLSRULE: + err = aq_get_rxnfc_rule(aq_nic, cmd); + break; + case ETHTOOL_GRXCLSRLALL: + err = aq_get_rxnfc_all_rules(aq_nic, cmd, rule_locs); + break; + default: + err = -EOPNOTSUPP; + break; + } + + return err; +} + +static int aq_ethtool_set_rxnfc(struct net_device *ndev, + struct ethtool_rxnfc *cmd) +{ + int err = 0; + struct aq_nic_s *aq_nic = netdev_priv(ndev); + switch (cmd->cmd) { + case ETHTOOL_SRXCLSRLINS: + err = aq_add_rxnfc_rule(aq_nic, cmd); + break; + case ETHTOOL_SRXCLSRLDEL: + err = aq_del_rxnfc_rule(aq_nic, cmd); + break; default: err = -EOPNOTSUPP; break; @@ -495,7 +560,7 @@ static int aq_set_ringparam(struct net_device *ndev, } } if (ndev_running) - err = dev_open(ndev); + err = dev_open(ndev, NULL); err_exit: return err; @@ -519,7 +584,9 @@ const struct ethtool_ops aq_ethtool_ops = { .set_pauseparam = aq_ethtool_set_pauseparam, .get_rxfh_key_size = aq_ethtool_get_rss_key_size, .get_rxfh = aq_ethtool_get_rss, + .set_rxfh = aq_ethtool_set_rss, .get_rxnfc = aq_ethtool_get_rxnfc, + .set_rxnfc = aq_ethtool_set_rxnfc, .get_sset_count = aq_ethtool_get_sset_count, .get_ethtool_stats = aq_ethtool_stats, .get_link_ksettings = aq_ethtool_get_link_ksettings, diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c new file mode 100644 index 000000000000..18bc035da850 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c @@ -0,0 +1,876 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Copyright (C) 2014-2017 aQuantia Corporation. */ + +/* File aq_filters.c: RX filters related functions. */ + +#include "aq_filters.h" + +static bool __must_check +aq_rule_is_approve(struct ethtool_rx_flow_spec *fsp) +{ + if (fsp->flow_type & FLOW_MAC_EXT) + return false; + + switch (fsp->flow_type & ~FLOW_EXT) { + case ETHER_FLOW: + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + case TCP_V6_FLOW: + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + case IPV4_FLOW: + case IPV6_FLOW: + return true; + case IP_USER_FLOW: + switch (fsp->h_u.usr_ip4_spec.proto) { + case IPPROTO_TCP: + case IPPROTO_UDP: + case IPPROTO_SCTP: + case IPPROTO_IP: + return true; + default: + return false; + } + case IPV6_USER_FLOW: + switch (fsp->h_u.usr_ip6_spec.l4_proto) { + case IPPROTO_TCP: + case IPPROTO_UDP: + case IPPROTO_SCTP: + case IPPROTO_IP: + return true; + default: + return false; + } + default: + return false; + } + + return false; +} + +static bool __must_check +aq_match_filter(struct ethtool_rx_flow_spec *fsp1, + struct ethtool_rx_flow_spec *fsp2) +{ + if (fsp1->flow_type != fsp2->flow_type || + memcmp(&fsp1->h_u, &fsp2->h_u, sizeof(fsp2->h_u)) || + memcmp(&fsp1->h_ext, &fsp2->h_ext, sizeof(fsp2->h_ext)) || + memcmp(&fsp1->m_u, &fsp2->m_u, sizeof(fsp2->m_u)) || + memcmp(&fsp1->m_ext, &fsp2->m_ext, sizeof(fsp2->m_ext))) + return false; + + return true; +} + +static bool __must_check +aq_rule_already_exists(struct aq_nic_s *aq_nic, + struct ethtool_rx_flow_spec *fsp) +{ + struct aq_rx_filter *rule; + struct hlist_node *aq_node2; + struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic); + + hlist_for_each_entry_safe(rule, aq_node2, + &rx_fltrs->filter_list, aq_node) { + if (rule->aq_fsp.location == fsp->location) + continue; + if (aq_match_filter(&rule->aq_fsp, fsp)) { + netdev_err(aq_nic->ndev, + "ethtool: This filter is already set\n"); + return true; + } + } + + return false; +} + +static int aq_check_approve_fl3l4(struct aq_nic_s *aq_nic, + struct aq_hw_rx_fltrs_s *rx_fltrs, + struct ethtool_rx_flow_spec *fsp) +{ + if (fsp->location < AQ_RX_FIRST_LOC_FL3L4 || + fsp->location > AQ_RX_LAST_LOC_FL3L4) { + netdev_err(aq_nic->ndev, + "ethtool: location must be in range [%d, %d]", + AQ_RX_FIRST_LOC_FL3L4, + AQ_RX_LAST_LOC_FL3L4); + return -EINVAL; + } + if (rx_fltrs->fl3l4.is_ipv6 && rx_fltrs->fl3l4.active_ipv4) { + rx_fltrs->fl3l4.is_ipv6 = false; + netdev_err(aq_nic->ndev, + "ethtool: mixing ipv4 and ipv6 is not allowed"); + return -EINVAL; + } else if (!rx_fltrs->fl3l4.is_ipv6 && rx_fltrs->fl3l4.active_ipv6) { + rx_fltrs->fl3l4.is_ipv6 = true; + netdev_err(aq_nic->ndev, + "ethtool: mixing ipv4 and ipv6 is not allowed"); + return -EINVAL; + } else if (rx_fltrs->fl3l4.is_ipv6 && + fsp->location != AQ_RX_FIRST_LOC_FL3L4 + 4 && + fsp->location != AQ_RX_FIRST_LOC_FL3L4) { + netdev_err(aq_nic->ndev, + "ethtool: The specified location for ipv6 must be %d or %d", + AQ_RX_FIRST_LOC_FL3L4, AQ_RX_FIRST_LOC_FL3L4 + 4); + return -EINVAL; + } + + return 0; +} + +static int __must_check +aq_check_approve_fl2(struct aq_nic_s *aq_nic, + struct aq_hw_rx_fltrs_s *rx_fltrs, + struct ethtool_rx_flow_spec *fsp) +{ + if (fsp->location < AQ_RX_FIRST_LOC_FETHERT || + fsp->location > AQ_RX_LAST_LOC_FETHERT) { + netdev_err(aq_nic->ndev, + "ethtool: location must be in range [%d, %d]", + AQ_RX_FIRST_LOC_FETHERT, + AQ_RX_LAST_LOC_FETHERT); + return -EINVAL; + } + + if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_PRIO_MASK && + fsp->m_u.ether_spec.h_proto == 0U) { + netdev_err(aq_nic->ndev, + "ethtool: proto (ether_type) parameter must be specified"); + return -EINVAL; + } + + return 0; +} + +static int __must_check +aq_check_approve_fvlan(struct aq_nic_s *aq_nic, + struct aq_hw_rx_fltrs_s *rx_fltrs, + struct ethtool_rx_flow_spec *fsp) +{ + if (fsp->location < AQ_RX_FIRST_LOC_FVLANID || + fsp->location > AQ_RX_LAST_LOC_FVLANID) { + netdev_err(aq_nic->ndev, + "ethtool: location must be in range [%d, %d]", + AQ_RX_FIRST_LOC_FVLANID, + AQ_RX_LAST_LOC_FVLANID); + return -EINVAL; + } + + if ((aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) && + (!test_bit(be16_to_cpu(fsp->h_ext.vlan_tci), + aq_nic->active_vlans))) { + netdev_err(aq_nic->ndev, + "ethtool: unknown vlan-id specified"); + return -EINVAL; + } + + if (fsp->ring_cookie > aq_nic->aq_nic_cfg.num_rss_queues) { + netdev_err(aq_nic->ndev, + "ethtool: queue number must be in range [0, %d]", + aq_nic->aq_nic_cfg.num_rss_queues - 1); + return -EINVAL; + } + return 0; +} + +static int __must_check +aq_check_filter(struct aq_nic_s *aq_nic, + struct ethtool_rx_flow_spec *fsp) +{ + int err = 0; + struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic); + + if (fsp->flow_type & FLOW_EXT) { + if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_VID_MASK) { + err = aq_check_approve_fvlan(aq_nic, rx_fltrs, fsp); + } else if (be16_to_cpu(fsp->m_ext.vlan_tci) == VLAN_PRIO_MASK) { + err = aq_check_approve_fl2(aq_nic, rx_fltrs, fsp); + } else { + netdev_err(aq_nic->ndev, + "ethtool: invalid vlan mask 0x%x specified", + be16_to_cpu(fsp->m_ext.vlan_tci)); + err = -EINVAL; + } + } else { + switch (fsp->flow_type & ~FLOW_EXT) { + case ETHER_FLOW: + err = aq_check_approve_fl2(aq_nic, rx_fltrs, fsp); + break; + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + case IPV4_FLOW: + case IP_USER_FLOW: + rx_fltrs->fl3l4.is_ipv6 = false; + err = aq_check_approve_fl3l4(aq_nic, rx_fltrs, fsp); + break; + case TCP_V6_FLOW: + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + case IPV6_FLOW: + case IPV6_USER_FLOW: + rx_fltrs->fl3l4.is_ipv6 = true; + err = aq_check_approve_fl3l4(aq_nic, rx_fltrs, fsp); + break; + default: + netdev_err(aq_nic->ndev, + "ethtool: unknown flow-type specified"); + err = -EINVAL; + } + } + + return err; +} + +static bool __must_check +aq_rule_is_not_support(struct aq_nic_s *aq_nic, + struct ethtool_rx_flow_spec *fsp) +{ + bool rule_is_not_support = false; + + if (!(aq_nic->ndev->features & NETIF_F_NTUPLE)) { + netdev_err(aq_nic->ndev, + "ethtool: Please, to enable the RX flow control:\n" + "ethtool -K %s ntuple on\n", aq_nic->ndev->name); + rule_is_not_support = true; + } else if (!aq_rule_is_approve(fsp)) { + netdev_err(aq_nic->ndev, + "ethtool: The specified flow type is not supported\n"); + rule_is_not_support = true; + } else if ((fsp->flow_type & ~FLOW_EXT) != ETHER_FLOW && + (fsp->h_u.tcp_ip4_spec.tos || + fsp->h_u.tcp_ip6_spec.tclass)) { + netdev_err(aq_nic->ndev, + "ethtool: The specified tos tclass are not supported\n"); + rule_is_not_support = true; + } else if (fsp->flow_type & FLOW_MAC_EXT) { + netdev_err(aq_nic->ndev, + "ethtool: MAC_EXT is not supported"); + rule_is_not_support = true; + } + + return rule_is_not_support; +} + +static bool __must_check +aq_rule_is_not_correct(struct aq_nic_s *aq_nic, + struct ethtool_rx_flow_spec *fsp) +{ + bool rule_is_not_correct = false; + + if (!aq_nic) { + rule_is_not_correct = true; + } else if (fsp->location > AQ_RX_MAX_RXNFC_LOC) { + netdev_err(aq_nic->ndev, + "ethtool: The specified number %u rule is invalid\n", + fsp->location); + rule_is_not_correct = true; + } else if (aq_check_filter(aq_nic, fsp)) { + rule_is_not_correct = true; + } else if (fsp->ring_cookie != RX_CLS_FLOW_DISC) { + if (fsp->ring_cookie >= aq_nic->aq_nic_cfg.num_rss_queues) { + netdev_err(aq_nic->ndev, + "ethtool: The specified action is invalid.\n" + "Maximum allowable value action is %u.\n", + aq_nic->aq_nic_cfg.num_rss_queues - 1); + rule_is_not_correct = true; + } + } + + return rule_is_not_correct; +} + +static int __must_check +aq_check_rule(struct aq_nic_s *aq_nic, + struct ethtool_rx_flow_spec *fsp) +{ + int err = 0; + + if (aq_rule_is_not_correct(aq_nic, fsp)) + err = -EINVAL; + else if (aq_rule_is_not_support(aq_nic, fsp)) + err = -EOPNOTSUPP; + else if (aq_rule_already_exists(aq_nic, fsp)) + err = -EEXIST; + + return err; +} + +static void aq_set_data_fl2(struct aq_nic_s *aq_nic, + struct aq_rx_filter *aq_rx_fltr, + struct aq_rx_filter_l2 *data, bool add) +{ + const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp; + + memset(data, 0, sizeof(*data)); + + data->location = fsp->location - AQ_RX_FIRST_LOC_FETHERT; + + if (fsp->ring_cookie != RX_CLS_FLOW_DISC) + data->queue = fsp->ring_cookie; + else + data->queue = -1; + + data->ethertype = be16_to_cpu(fsp->h_u.ether_spec.h_proto); + data->user_priority_en = be16_to_cpu(fsp->m_ext.vlan_tci) + == VLAN_PRIO_MASK; + data->user_priority = (be16_to_cpu(fsp->h_ext.vlan_tci) + & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; +} + +static int aq_add_del_fether(struct aq_nic_s *aq_nic, + struct aq_rx_filter *aq_rx_fltr, bool add) +{ + struct aq_rx_filter_l2 data; + struct aq_hw_s *aq_hw = aq_nic->aq_hw; + const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops; + + aq_set_data_fl2(aq_nic, aq_rx_fltr, &data, add); + + if (unlikely(!aq_hw_ops->hw_filter_l2_set)) + return -EOPNOTSUPP; + if (unlikely(!aq_hw_ops->hw_filter_l2_clear)) + return -EOPNOTSUPP; + + if (add) + return aq_hw_ops->hw_filter_l2_set(aq_hw, &data); + else + return aq_hw_ops->hw_filter_l2_clear(aq_hw, &data); +} + +static bool aq_fvlan_is_busy(struct aq_rx_filter_vlan *aq_vlans, int vlan) +{ + int i; + + for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) { + if (aq_vlans[i].enable && + aq_vlans[i].queue != AQ_RX_QUEUE_NOT_ASSIGNED && + aq_vlans[i].vlan_id == vlan) { + return true; + } + } + + return false; +} + +/* Function rebuilds array of vlan filters so that filters with assigned + * queue have a precedence over just vlans on the interface. + */ +static void aq_fvlan_rebuild(struct aq_nic_s *aq_nic, + unsigned long *active_vlans, + struct aq_rx_filter_vlan *aq_vlans) +{ + bool vlan_busy = false; + int vlan = -1; + int i; + + for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) { + if (aq_vlans[i].enable && + aq_vlans[i].queue != AQ_RX_QUEUE_NOT_ASSIGNED) + continue; + do { + vlan = find_next_bit(active_vlans, + VLAN_N_VID, + vlan + 1); + if (vlan == VLAN_N_VID) { + aq_vlans[i].enable = 0U; + aq_vlans[i].queue = AQ_RX_QUEUE_NOT_ASSIGNED; + aq_vlans[i].vlan_id = 0; + continue; + } + + vlan_busy = aq_fvlan_is_busy(aq_vlans, vlan); + if (!vlan_busy) { + aq_vlans[i].enable = 1U; + aq_vlans[i].queue = AQ_RX_QUEUE_NOT_ASSIGNED; + aq_vlans[i].vlan_id = vlan; + } + } while (vlan_busy && vlan != VLAN_N_VID); + } +} + +static int aq_set_data_fvlan(struct aq_nic_s *aq_nic, + struct aq_rx_filter *aq_rx_fltr, + struct aq_rx_filter_vlan *aq_vlans, bool add) +{ + const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp; + int location = fsp->location - AQ_RX_FIRST_LOC_FVLANID; + int i; + + memset(&aq_vlans[location], 0, sizeof(aq_vlans[location])); + + if (!add) + return 0; + + /* remove vlan if it was in table without queue assignment */ + for (i = 0; i < AQ_VLAN_MAX_FILTERS; ++i) { + if (aq_vlans[i].vlan_id == + (be16_to_cpu(fsp->h_ext.vlan_tci) & VLAN_VID_MASK)) { + aq_vlans[i].enable = false; + } + } + + aq_vlans[location].location = location; + aq_vlans[location].vlan_id = be16_to_cpu(fsp->h_ext.vlan_tci) + & VLAN_VID_MASK; + aq_vlans[location].queue = fsp->ring_cookie & 0x1FU; + aq_vlans[location].enable = 1U; + + return 0; +} + +int aq_del_fvlan_by_vlan(struct aq_nic_s *aq_nic, u16 vlan_id) +{ + struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic); + struct aq_rx_filter *rule = NULL; + struct hlist_node *aq_node2; + + hlist_for_each_entry_safe(rule, aq_node2, + &rx_fltrs->filter_list, aq_node) { + if (be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) + break; + } + if (rule && be16_to_cpu(rule->aq_fsp.h_ext.vlan_tci) == vlan_id) { + struct ethtool_rxnfc cmd; + + cmd.fs.location = rule->aq_fsp.location; + return aq_del_rxnfc_rule(aq_nic, &cmd); + } + + return -ENOENT; +} + +static int aq_add_del_fvlan(struct aq_nic_s *aq_nic, + struct aq_rx_filter *aq_rx_fltr, bool add) +{ + const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops; + + if (unlikely(!aq_hw_ops->hw_filter_vlan_set)) + return -EOPNOTSUPP; + + aq_set_data_fvlan(aq_nic, + aq_rx_fltr, + aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans, + add); + + return aq_filters_vlans_update(aq_nic); +} + +static int aq_set_data_fl3l4(struct aq_nic_s *aq_nic, + struct aq_rx_filter *aq_rx_fltr, + struct aq_rx_filter_l3l4 *data, bool add) +{ + struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic); + const struct ethtool_rx_flow_spec *fsp = &aq_rx_fltr->aq_fsp; + + memset(data, 0, sizeof(*data)); + + data->is_ipv6 = rx_fltrs->fl3l4.is_ipv6; + data->location = HW_ATL_GET_REG_LOCATION_FL3L4(fsp->location); + + if (!add) { + if (!data->is_ipv6) + rx_fltrs->fl3l4.active_ipv4 &= ~BIT(data->location); + else + rx_fltrs->fl3l4.active_ipv6 &= + ~BIT((data->location) / 4); + + return 0; + } + + data->cmd |= HW_ATL_RX_ENABLE_FLTR_L3L4; + + switch (fsp->flow_type) { + case TCP_V4_FLOW: + case TCP_V6_FLOW: + data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4; + break; + case UDP_V4_FLOW: + case UDP_V6_FLOW: + data->cmd |= HW_ATL_RX_UDP; + data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4; + break; + case SCTP_V4_FLOW: + case SCTP_V6_FLOW: + data->cmd |= HW_ATL_RX_SCTP; + data->cmd |= HW_ATL_RX_ENABLE_CMP_PROT_L4; + break; + default: + break; + } + + if (!data->is_ipv6) { + data->ip_src[0] = + ntohl(fsp->h_u.tcp_ip4_spec.ip4src); + data->ip_dst[0] = + ntohl(fsp->h_u.tcp_ip4_spec.ip4dst); + rx_fltrs->fl3l4.active_ipv4 |= BIT(data->location); + } else { + int i; + + rx_fltrs->fl3l4.active_ipv6 |= BIT((data->location) / 4); + for (i = 0; i < HW_ATL_RX_CNT_REG_ADDR_IPV6; ++i) { + data->ip_dst[i] = + ntohl(fsp->h_u.tcp_ip6_spec.ip6dst[i]); + data->ip_src[i] = + ntohl(fsp->h_u.tcp_ip6_spec.ip6src[i]); + } + data->cmd |= HW_ATL_RX_ENABLE_L3_IPV6; + } + if (fsp->flow_type != IP_USER_FLOW && + fsp->flow_type != IPV6_USER_FLOW) { + if (!data->is_ipv6) { + data->p_dst = + ntohs(fsp->h_u.tcp_ip4_spec.pdst); + data->p_src = + ntohs(fsp->h_u.tcp_ip4_spec.psrc); + } else { + data->p_dst = + ntohs(fsp->h_u.tcp_ip6_spec.pdst); + data->p_src = + ntohs(fsp->h_u.tcp_ip6_spec.psrc); + } + } + if (data->ip_src[0] && !data->is_ipv6) + data->cmd |= HW_ATL_RX_ENABLE_CMP_SRC_ADDR_L3; + if (data->ip_dst[0] && !data->is_ipv6) + data->cmd |= HW_ATL_RX_ENABLE_CMP_DEST_ADDR_L3; + if (data->p_dst) + data->cmd |= HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4; + if (data->p_src) + data->cmd |= HW_ATL_RX_ENABLE_CMP_SRC_PORT_L4; + if (fsp->ring_cookie != RX_CLS_FLOW_DISC) { + data->cmd |= HW_ATL_RX_HOST << HW_ATL_RX_ACTION_FL3F4_SHIFT; + data->cmd |= fsp->ring_cookie << HW_ATL_RX_QUEUE_FL3L4_SHIFT; + data->cmd |= HW_ATL_RX_ENABLE_QUEUE_L3L4; + } else { + data->cmd |= HW_ATL_RX_DISCARD << HW_ATL_RX_ACTION_FL3F4_SHIFT; + } + + return 0; +} + +static int aq_set_fl3l4(struct aq_hw_s *aq_hw, + const struct aq_hw_ops *aq_hw_ops, + struct aq_rx_filter_l3l4 *data) +{ + if (unlikely(!aq_hw_ops->hw_filter_l3l4_set)) + return -EOPNOTSUPP; + + return aq_hw_ops->hw_filter_l3l4_set(aq_hw, data); +} + +static int aq_add_del_fl3l4(struct aq_nic_s *aq_nic, + struct aq_rx_filter *aq_rx_fltr, bool add) +{ + const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops; + struct aq_hw_s *aq_hw = aq_nic->aq_hw; + struct aq_rx_filter_l3l4 data; + + if (unlikely(aq_rx_fltr->aq_fsp.location < AQ_RX_FIRST_LOC_FL3L4 || + aq_rx_fltr->aq_fsp.location > AQ_RX_LAST_LOC_FL3L4 || + aq_set_data_fl3l4(aq_nic, aq_rx_fltr, &data, add))) + return -EINVAL; + + return aq_set_fl3l4(aq_hw, aq_hw_ops, &data); +} + +static int aq_add_del_rule(struct aq_nic_s *aq_nic, + struct aq_rx_filter *aq_rx_fltr, bool add) +{ + int err = -EINVAL; + + if (aq_rx_fltr->aq_fsp.flow_type & FLOW_EXT) { + if (be16_to_cpu(aq_rx_fltr->aq_fsp.m_ext.vlan_tci) + == VLAN_VID_MASK) { + aq_rx_fltr->type = aq_rx_filter_vlan; + err = aq_add_del_fvlan(aq_nic, aq_rx_fltr, add); + } else if (be16_to_cpu(aq_rx_fltr->aq_fsp.m_ext.vlan_tci) + == VLAN_PRIO_MASK) { + aq_rx_fltr->type = aq_rx_filter_ethertype; + err = aq_add_del_fether(aq_nic, aq_rx_fltr, add); + } + } else { + switch (aq_rx_fltr->aq_fsp.flow_type & ~FLOW_EXT) { + case ETHER_FLOW: + aq_rx_fltr->type = aq_rx_filter_ethertype; + err = aq_add_del_fether(aq_nic, aq_rx_fltr, add); + break; + case TCP_V4_FLOW: + case UDP_V4_FLOW: + case SCTP_V4_FLOW: + case IP_USER_FLOW: + case TCP_V6_FLOW: + case UDP_V6_FLOW: + case SCTP_V6_FLOW: + case IPV6_USER_FLOW: + aq_rx_fltr->type = aq_rx_filter_l3l4; + err = aq_add_del_fl3l4(aq_nic, aq_rx_fltr, add); + break; + default: + err = -EINVAL; + break; + } + } + + return err; +} + +static int aq_update_table_filters(struct aq_nic_s *aq_nic, + struct aq_rx_filter *aq_rx_fltr, u16 index, + struct ethtool_rxnfc *cmd) +{ + struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic); + struct aq_rx_filter *rule = NULL, *parent = NULL; + struct hlist_node *aq_node2; + int err = -EINVAL; + + hlist_for_each_entry_safe(rule, aq_node2, + &rx_fltrs->filter_list, aq_node) { + if (rule->aq_fsp.location >= index) + break; + parent = rule; + } + + if (rule && rule->aq_fsp.location == index) { + err = aq_add_del_rule(aq_nic, rule, false); + hlist_del(&rule->aq_node); + kfree(rule); + --rx_fltrs->active_filters; + } + + if (unlikely(!aq_rx_fltr)) + return err; + + INIT_HLIST_NODE(&aq_rx_fltr->aq_node); + + if (parent) + hlist_add_behind(&aq_rx_fltr->aq_node, &parent->aq_node); + else + hlist_add_head(&aq_rx_fltr->aq_node, &rx_fltrs->filter_list); + + ++rx_fltrs->active_filters; + + return 0; +} + +u16 aq_get_rxnfc_count_all_rules(struct aq_nic_s *aq_nic) +{ + struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic); + + return rx_fltrs->active_filters; +} + +struct aq_hw_rx_fltrs_s *aq_get_hw_rx_fltrs(struct aq_nic_s *aq_nic) +{ + return &aq_nic->aq_hw_rx_fltrs; +} + +int aq_add_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd) +{ + struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic); + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct aq_rx_filter *aq_rx_fltr; + int err = 0; + + err = aq_check_rule(aq_nic, fsp); + if (err) + goto err_exit; + + aq_rx_fltr = kzalloc(sizeof(*aq_rx_fltr), GFP_KERNEL); + if (unlikely(!aq_rx_fltr)) { + err = -ENOMEM; + goto err_exit; + } + + memcpy(&aq_rx_fltr->aq_fsp, fsp, sizeof(*fsp)); + + err = aq_update_table_filters(aq_nic, aq_rx_fltr, fsp->location, NULL); + if (unlikely(err)) + goto err_free; + + err = aq_add_del_rule(aq_nic, aq_rx_fltr, true); + if (unlikely(err)) { + hlist_del(&aq_rx_fltr->aq_node); + --rx_fltrs->active_filters; + goto err_free; + } + + return 0; + +err_free: + kfree(aq_rx_fltr); +err_exit: + return err; +} + +int aq_del_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd) +{ + struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic); + struct aq_rx_filter *rule = NULL; + struct hlist_node *aq_node2; + int err = -EINVAL; + + hlist_for_each_entry_safe(rule, aq_node2, + &rx_fltrs->filter_list, aq_node) { + if (rule->aq_fsp.location == cmd->fs.location) + break; + } + + if (rule && rule->aq_fsp.location == cmd->fs.location) { + err = aq_add_del_rule(aq_nic, rule, false); + hlist_del(&rule->aq_node); + kfree(rule); + --rx_fltrs->active_filters; + } + return err; +} + +int aq_get_rxnfc_rule(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd) +{ + struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic); + struct ethtool_rx_flow_spec *fsp = + (struct ethtool_rx_flow_spec *)&cmd->fs; + struct aq_rx_filter *rule = NULL; + struct hlist_node *aq_node2; + + hlist_for_each_entry_safe(rule, aq_node2, + &rx_fltrs->filter_list, aq_node) + if (fsp->location <= rule->aq_fsp.location) + break; + + if (unlikely(!rule || fsp->location != rule->aq_fsp.location)) + return -EINVAL; + + memcpy(fsp, &rule->aq_fsp, sizeof(*fsp)); + + return 0; +} + +int aq_get_rxnfc_all_rules(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd, + u32 *rule_locs) +{ + struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic); + struct hlist_node *aq_node2; + struct aq_rx_filter *rule; + int count = 0; + + cmd->data = aq_get_rxnfc_count_all_rules(aq_nic); + + hlist_for_each_entry_safe(rule, aq_node2, + &rx_fltrs->filter_list, aq_node) { + if (unlikely(count == cmd->rule_cnt)) + return -EMSGSIZE; + + rule_locs[count++] = rule->aq_fsp.location; + } + + cmd->rule_cnt = count; + + return 0; +} + +int aq_clear_rxnfc_all_rules(struct aq_nic_s *aq_nic) +{ + struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic); + struct hlist_node *aq_node2; + struct aq_rx_filter *rule; + int err = 0; + + hlist_for_each_entry_safe(rule, aq_node2, + &rx_fltrs->filter_list, aq_node) { + err = aq_add_del_rule(aq_nic, rule, false); + if (err) + goto err_exit; + hlist_del(&rule->aq_node); + kfree(rule); + --rx_fltrs->active_filters; + } + +err_exit: + return err; +} + +int aq_reapply_rxnfc_all_rules(struct aq_nic_s *aq_nic) +{ + struct aq_hw_rx_fltrs_s *rx_fltrs = aq_get_hw_rx_fltrs(aq_nic); + struct hlist_node *aq_node2; + struct aq_rx_filter *rule; + int err = 0; + + hlist_for_each_entry_safe(rule, aq_node2, + &rx_fltrs->filter_list, aq_node) { + err = aq_add_del_rule(aq_nic, rule, true); + if (err) + goto err_exit; + } + +err_exit: + return err; +} + +int aq_filters_vlans_update(struct aq_nic_s *aq_nic) +{ + const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops; + struct aq_hw_s *aq_hw = aq_nic->aq_hw; + int hweight = 0; + int err = 0; + int i; + + if (unlikely(!aq_hw_ops->hw_filter_vlan_set)) + return -EOPNOTSUPP; + if (unlikely(!aq_hw_ops->hw_filter_vlan_ctrl)) + return -EOPNOTSUPP; + + aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans, + aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans); + + if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) { + for (i = 0; i < BITS_TO_LONGS(VLAN_N_VID); i++) + hweight += hweight_long(aq_nic->active_vlans[i]); + + err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false); + if (err) + return err; + } + + err = aq_hw_ops->hw_filter_vlan_set(aq_hw, + aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans + ); + if (err) + return err; + + if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) { + if (hweight < AQ_VLAN_MAX_FILTERS) + err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, true); + /* otherwise left in promiscue mode */ + } + + return err; +} + +int aq_filters_vlan_offload_off(struct aq_nic_s *aq_nic) +{ + const struct aq_hw_ops *aq_hw_ops = aq_nic->aq_hw_ops; + struct aq_hw_s *aq_hw = aq_nic->aq_hw; + int err = 0; + + memset(aq_nic->active_vlans, 0, sizeof(aq_nic->active_vlans)); + aq_fvlan_rebuild(aq_nic, aq_nic->active_vlans, + aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans); + + if (unlikely(!aq_hw_ops->hw_filter_vlan_set)) + return -EOPNOTSUPP; + if (unlikely(!aq_hw_ops->hw_filter_vlan_ctrl)) + return -EOPNOTSUPP; + + err = aq_hw_ops->hw_filter_vlan_ctrl(aq_hw, false); + if (err) + return err; + err = aq_hw_ops->hw_filter_vlan_set(aq_hw, + aq_nic->aq_hw_rx_fltrs.fl2.aq_vlans + ); + return err; +} diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.h b/drivers/net/ethernet/aquantia/atlantic/aq_filters.h new file mode 100644 index 000000000000..c6a08c6585d5 --- /dev/null +++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* Copyright (C) 2014-2017 aQuantia Corporation. */ + +/* File aq_filters.h: RX filters related functions. */ + +#ifndef AQ_FILTERS_H +#define AQ_FILTERS_H + +#include "aq_nic.h" + +enum aq_rx_filter_type { + aq_rx_filter_ethertype, + aq_rx_filter_vlan, + aq_rx_filter_l3l4 +}; + +struct aq_rx_filter { + struct hlist_node aq_node; + enum aq_rx_filter_type type; + struct ethtool_rx_flow_spec aq_fsp; +}; + +u16 aq_get_rxnfc_count_all_rules(struct aq_nic_s *aq_nic); +struct aq_hw_rx_fltrs_s *aq_get_hw_rx_fltrs(struct aq_nic_s *aq_nic); +int aq_add_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd); +int aq_del_rxnfc_rule(struct aq_nic_s *aq_nic, const struct ethtool_rxnfc *cmd); +int aq_get_rxnfc_rule(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd); +int aq_get_rxnfc_all_rules(struct aq_nic_s *aq_nic, struct ethtool_rxnfc *cmd, + u32 *rule_locs); +int aq_del_fvlan_by_vlan(struct aq_nic_s *aq_nic, u16 vlan_id); +int aq_clear_rxnfc_all_rules(struct aq_nic_s *aq_nic); +int aq_reapply_rxnfc_all_rules(struct aq_nic_s *aq_nic); +int aq_filters_vlans_update(struct aq_nic_s *aq_nic); +int aq_filters_vlan_offload_off(struct aq_nic_s *aq_nic); + +#endif /* AQ_FILTERS_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h index a1e70da358ca..81aab73dc22f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h @@ -18,6 +18,17 @@ #include "aq_rss.h" #include "hw_atl/hw_atl_utils.h" +#define AQ_RX_FIRST_LOC_FVLANID 0U +#define AQ_RX_LAST_LOC_FVLANID 15U +#define AQ_RX_FIRST_LOC_FETHERT 16U +#define AQ_RX_LAST_LOC_FETHERT 31U +#define AQ_RX_FIRST_LOC_FL3L4 32U +#define AQ_RX_LAST_LOC_FL3L4 39U +#define AQ_RX_MAX_RXNFC_LOC AQ_RX_LAST_LOC_FL3L4 +#define AQ_VLAN_MAX_FILTERS \ + (AQ_RX_LAST_LOC_FVLANID - AQ_RX_FIRST_LOC_FVLANID + 1U) +#define AQ_RX_QUEUE_NOT_ASSIGNED 0xFFU + /* NIC H/W capabilities */ struct aq_hw_caps_s { u64 hw_features; @@ -130,6 +141,7 @@ struct aq_hw_s { struct aq_ring_s; struct aq_ring_param_s; struct sk_buff; +struct aq_rx_filter_l3l4; struct aq_hw_ops { @@ -183,6 +195,23 @@ struct aq_hw_ops { int (*hw_packet_filter_set)(struct aq_hw_s *self, unsigned int packet_filter); + int (*hw_filter_l3l4_set)(struct aq_hw_s *self, + struct aq_rx_filter_l3l4 *data); + + int (*hw_filter_l3l4_clear)(struct aq_hw_s *self, + struct aq_rx_filter_l3l4 *data); + + int (*hw_filter_l2_set)(struct aq_hw_s *self, + struct aq_rx_filter_l2 *data); + + int (*hw_filter_l2_clear)(struct aq_hw_s *self, + struct aq_rx_filter_l2 *data); + + int (*hw_filter_vlan_set)(struct aq_hw_s *self, + struct aq_rx_filter_vlan *aq_vlans); + + int (*hw_filter_vlan_ctrl)(struct aq_hw_s *self, bool enable); + int (*hw_multicast_list_set)(struct aq_hw_s *self, u8 ar_mac[AQ_HW_MULTICAST_ADDRESS_MAX] [ETH_ALEN], diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c index 7c07eef275eb..2a11c1eefd8f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c @@ -13,6 +13,7 @@ #include "aq_nic.h" #include "aq_pci_func.h" #include "aq_ethtool.h" +#include "aq_filters.h" #include <linux/netdevice.h> #include <linux/module.h> @@ -49,6 +50,11 @@ static int aq_ndev_open(struct net_device *ndev) err = aq_nic_init(aq_nic); if (err < 0) goto err_exit; + + err = aq_reapply_rxnfc_all_rules(aq_nic); + if (err < 0) + goto err_exit; + err = aq_nic_start(aq_nic); if (err < 0) goto err_exit; @@ -101,6 +107,21 @@ static int aq_ndev_set_features(struct net_device *ndev, bool is_lro = false; int err = 0; + if (!(features & NETIF_F_NTUPLE)) { + if (aq_nic->ndev->features & NETIF_F_NTUPLE) { + err = aq_clear_rxnfc_all_rules(aq_nic); + if (unlikely(err)) + goto err_exit; + } + } + if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER)) { + if (aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) { + err = aq_filters_vlan_offload_off(aq_nic); + if (unlikely(err)) + goto err_exit; + } + } + aq_cfg->features = features; if (aq_cfg->aq_hw_caps->hw_features & NETIF_F_LRO) { @@ -119,6 +140,7 @@ static int aq_ndev_set_features(struct net_device *ndev, err = aq_nic->aq_hw_ops->hw_set_offload(aq_nic->aq_hw, aq_cfg); +err_exit: return err; } @@ -147,6 +169,35 @@ static void aq_ndev_set_multicast_settings(struct net_device *ndev) aq_nic_set_multicast_list(aq_nic, ndev); } +static int aq_ndo_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, + u16 vid) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + + if (!aq_nic->aq_hw_ops->hw_filter_vlan_set) + return -EOPNOTSUPP; + + set_bit(vid, aq_nic->active_vlans); + + return aq_filters_vlans_update(aq_nic); +} + +static int aq_ndo_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, + u16 vid) +{ + struct aq_nic_s *aq_nic = netdev_priv(ndev); + + if (!aq_nic->aq_hw_ops->hw_filter_vlan_set) + return -EOPNOTSUPP; + + clear_bit(vid, aq_nic->active_vlans); + + if (-ENOENT == aq_del_fvlan_by_vlan(aq_nic, vid)) + return aq_filters_vlans_update(aq_nic); + + return 0; +} + static const struct net_device_ops aq_ndev_ops = { .ndo_open = aq_ndev_open, .ndo_stop = aq_ndev_close, @@ -154,5 +205,7 @@ static const struct net_device_ops aq_ndev_ops = { .ndo_set_rx_mode = aq_ndev_set_multicast_settings, .ndo_change_mtu = aq_ndev_change_mtu, .ndo_set_mac_address = aq_ndev_set_mac_address, - .ndo_set_features = aq_ndev_set_features + .ndo_set_features = aq_ndev_set_features, + .ndo_vlan_rx_add_vid = aq_ndo_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = aq_ndo_vlan_rx_kill_vid, }; diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c index 7abdc0952425..0147c037ca96 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c @@ -44,7 +44,7 @@ static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues) struct aq_rss_parameters *rss_params = &cfg->aq_rss; int i = 0; - static u8 rss_key[40] = { + static u8 rss_key[AQ_CFG_RSS_HASHKEY_SIZE] = { 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d, 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18, 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8, @@ -84,10 +84,6 @@ void aq_nic_cfg_start(struct aq_nic_s *self) cfg->is_lro = AQ_CFG_IS_LRO_DEF; - cfg->vlan_id = 0U; - - aq_nic_rss_init(self, cfg->num_rss_queues); - /*descriptors */ cfg->rxds = min(cfg->aq_hw_caps->rxds_max, AQ_CFG_RXDS_DEF); cfg->txds = min(cfg->aq_hw_caps->txds_max, AQ_CFG_TXDS_DEF); @@ -108,6 +104,8 @@ void aq_nic_cfg_start(struct aq_nic_s *self) cfg->num_rss_queues = min(cfg->vecs, AQ_CFG_NUM_RSS_QUEUES_DEF); + aq_nic_rss_init(self, cfg->num_rss_queues); + cfg->irq_type = aq_pci_func_get_irq_type(self); if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) || diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index 44ec47a3d60a..8e34c1e49bf2 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h @@ -35,7 +35,6 @@ struct aq_nic_cfg_s { u32 mtu; u32 flow_control; u32 link_speed_msk; - u32 vlan_id; u32 wol; u16 is_mc_list_enabled; u16 mc_list_count; @@ -61,6 +60,23 @@ struct aq_nic_cfg_s { #define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \ ((_TC_) * AQ_CFG_TCS_MAX + (_VEC_)) +struct aq_hw_rx_fl2 { + struct aq_rx_filter_vlan aq_vlans[AQ_VLAN_MAX_FILTERS]; +}; + +struct aq_hw_rx_fl3l4 { + u8 active_ipv4; + u8 active_ipv6:2; + u8 is_ipv6; +}; + +struct aq_hw_rx_fltrs_s { + struct hlist_head filter_list; + u16 active_filters; + struct aq_hw_rx_fl2 fl2; + struct aq_hw_rx_fl3l4 fl3l4; +}; + struct aq_nic_s { atomic_t flags; struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX]; @@ -81,10 +97,13 @@ struct aq_nic_s { u32 count; u8 ar[AQ_HW_MULTICAST_ADDRESS_MAX][ETH_ALEN]; } mc_list; + /* Bitmask of currently assigned vlans from linux */ + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; struct pci_dev *pdev; unsigned int msix_entry_mask; u32 irqvecs; + struct aq_hw_rx_fltrs_s aq_hw_rx_fltrs; }; static inline struct device *aq_nic_get_dev(struct aq_nic_s *self) diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c index 1d5d6b8df855..c8b44cdb91c1 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c @@ -19,6 +19,7 @@ #include "aq_pci_func.h" #include "hw_atl/hw_atl_a0.h" #include "hw_atl/hw_atl_b0.h" +#include "aq_filters.h" static const struct pci_device_id aq_pci_tbl[] = { { PCI_VDEVICE(AQUANTIA, AQ_DEVICE_ID_0001), }, @@ -309,6 +310,7 @@ static void aq_pci_remove(struct pci_dev *pdev) struct aq_nic_s *self = pci_get_drvdata(pdev); if (self->ndev) { + aq_clear_rxnfc_all_rules(self); if (self->ndev->reg_state == NETREG_REGISTERED) unregister_netdev(self->ndev); aq_nic_free_vectors(self); diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c index a7e853fa43c2..b58ca7cb8e9d 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c @@ -21,7 +21,7 @@ #define DEFAULT_B0_BOARD_BASIC_CAPABILITIES \ .is_64_dma = true, \ - .msix_irqs = 4U, \ + .msix_irqs = 8U, \ .irq_mask = ~0U, \ .vecs = HW_ATL_B0_RSS_MAX, \ .tcs = HW_ATL_B0_TC_MAX, \ @@ -41,7 +41,9 @@ NETIF_F_RXHASH | \ NETIF_F_SG | \ NETIF_F_TSO | \ - NETIF_F_LRO, \ + NETIF_F_LRO | \ + NETIF_F_NTUPLE | \ + NETIF_F_HW_VLAN_CTAG_FILTER, \ .hw_priv_flags = IFF_UNICAST_FLT, \ .flow_control = true, \ .mtu = HW_ATL_B0_MTU_JUMBO, \ @@ -319,20 +321,11 @@ static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self) hw_atl_rpf_vlan_outer_etht_set(self, 0x88A8U); hw_atl_rpf_vlan_inner_etht_set(self, 0x8100U); - if (cfg->vlan_id) { - hw_atl_rpf_vlan_flr_act_set(self, 1U, 0U); - hw_atl_rpf_vlan_id_flr_set(self, 0U, 0U); - hw_atl_rpf_vlan_flr_en_set(self, 0U, 0U); + hw_atl_rpf_vlan_prom_mode_en_set(self, 1); - hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U); - hw_atl_rpf_vlan_untagged_act_set(self, 1U); - - hw_atl_rpf_vlan_flr_act_set(self, 1U, 1U); - hw_atl_rpf_vlan_id_flr_set(self, cfg->vlan_id, 0U); - hw_atl_rpf_vlan_flr_en_set(self, 1U, 1U); - } else { - hw_atl_rpf_vlan_prom_mode_en_set(self, 1); - } + // Always accept untagged packets + hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U); + hw_atl_rpf_vlan_untagged_act_set(self, 1U); /* Rx Interrupts */ hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U); @@ -945,6 +938,142 @@ static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self, return aq_hw_err_from_flags(self); } +static int hw_atl_b0_hw_fl3l4_clear(struct aq_hw_s *self, + struct aq_rx_filter_l3l4 *data) +{ + u8 location = data->location; + + if (!data->is_ipv6) { + hw_atl_rpfl3l4_cmd_clear(self, location); + hw_atl_rpf_l4_spd_set(self, 0U, location); + hw_atl_rpf_l4_dpd_set(self, 0U, location); + hw_atl_rpfl3l4_ipv4_src_addr_clear(self, location); + hw_atl_rpfl3l4_ipv4_dest_addr_clear(self, location); + } else { + int i; + + for (i = 0; i < HW_ATL_RX_CNT_REG_ADDR_IPV6; ++i) { + hw_atl_rpfl3l4_cmd_clear(self, location + i); + hw_atl_rpf_l4_spd_set(self, 0U, location + i); + hw_atl_rpf_l4_dpd_set(self, 0U, location + i); + } + hw_atl_rpfl3l4_ipv6_src_addr_clear(self, location); + hw_atl_rpfl3l4_ipv6_dest_addr_clear(self, location); + } + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_fl3l4_set(struct aq_hw_s *self, + struct aq_rx_filter_l3l4 *data) +{ + u8 location = data->location; + + hw_atl_b0_hw_fl3l4_clear(self, data); + + if (data->cmd) { + if (!data->is_ipv6) { + hw_atl_rpfl3l4_ipv4_dest_addr_set(self, + location, + data->ip_dst[0]); + hw_atl_rpfl3l4_ipv4_src_addr_set(self, + location, + data->ip_src[0]); + } else { + hw_atl_rpfl3l4_ipv6_dest_addr_set(self, + location, + data->ip_dst); + hw_atl_rpfl3l4_ipv6_src_addr_set(self, + location, + data->ip_src); + } + } + hw_atl_rpf_l4_dpd_set(self, data->p_dst, location); + hw_atl_rpf_l4_spd_set(self, data->p_src, location); + hw_atl_rpfl3l4_cmd_set(self, location, data->cmd); + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_fl2_set(struct aq_hw_s *self, + struct aq_rx_filter_l2 *data) +{ + hw_atl_rpf_etht_flr_en_set(self, 1U, data->location); + hw_atl_rpf_etht_flr_set(self, data->ethertype, data->location); + hw_atl_rpf_etht_user_priority_en_set(self, + !!data->user_priority_en, + data->location); + if (data->user_priority_en) + hw_atl_rpf_etht_user_priority_set(self, + data->user_priority, + data->location); + + if (data->queue < 0) { + hw_atl_rpf_etht_flr_act_set(self, 0U, data->location); + hw_atl_rpf_etht_rx_queue_en_set(self, 0U, data->location); + } else { + hw_atl_rpf_etht_flr_act_set(self, 1U, data->location); + hw_atl_rpf_etht_rx_queue_en_set(self, 1U, data->location); + hw_atl_rpf_etht_rx_queue_set(self, data->queue, data->location); + } + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_fl2_clear(struct aq_hw_s *self, + struct aq_rx_filter_l2 *data) +{ + hw_atl_rpf_etht_flr_en_set(self, 0U, data->location); + hw_atl_rpf_etht_flr_set(self, 0U, data->location); + hw_atl_rpf_etht_user_priority_en_set(self, 0U, data->location); + + return aq_hw_err_from_flags(self); +} + +/** + * @brief Set VLAN filter table + * @details Configure VLAN filter table to accept (and assign the queue) traffic + * for the particular vlan ids. + * Note: use this function under vlan promisc mode not to lost the traffic + * + * @param aq_hw_s + * @param aq_rx_filter_vlan VLAN filter configuration + * @return 0 - OK, <0 - error + */ +static int hw_atl_b0_hw_vlan_set(struct aq_hw_s *self, + struct aq_rx_filter_vlan *aq_vlans) +{ + int i; + + for (i = 0; i < AQ_VLAN_MAX_FILTERS; i++) { + hw_atl_rpf_vlan_flr_en_set(self, 0U, i); + hw_atl_rpf_vlan_rxq_en_flr_set(self, 0U, i); + if (aq_vlans[i].enable) { + hw_atl_rpf_vlan_id_flr_set(self, + aq_vlans[i].vlan_id, + i); + hw_atl_rpf_vlan_flr_act_set(self, 1U, i); + hw_atl_rpf_vlan_flr_en_set(self, 1U, i); + if (aq_vlans[i].queue != 0xFF) { + hw_atl_rpf_vlan_rxq_flr_set(self, + aq_vlans[i].queue, + i); + hw_atl_rpf_vlan_rxq_en_flr_set(self, 1U, i); + } + } + } + + return aq_hw_err_from_flags(self); +} + +static int hw_atl_b0_hw_vlan_ctrl(struct aq_hw_s *self, bool enable) +{ + /* set promisc in case of disabing the vland filter */ + hw_atl_rpf_vlan_prom_mode_en_set(self, !!!enable); + + return aq_hw_err_from_flags(self); +} + const struct aq_hw_ops hw_atl_ops_b0 = { .hw_set_mac_address = hw_atl_b0_hw_mac_addr_set, .hw_init = hw_atl_b0_hw_init, @@ -969,6 +1098,11 @@ const struct aq_hw_ops hw_atl_ops_b0 = { .hw_ring_rx_init = hw_atl_b0_hw_ring_rx_init, .hw_ring_tx_init = hw_atl_b0_hw_ring_tx_init, .hw_packet_filter_set = hw_atl_b0_hw_packet_filter_set, + .hw_filter_l2_set = hw_atl_b0_hw_fl2_set, + .hw_filter_l2_clear = hw_atl_b0_hw_fl2_clear, + .hw_filter_l3l4_set = hw_atl_b0_hw_fl3l4_set, + .hw_filter_vlan_set = hw_atl_b0_hw_vlan_set, + .hw_filter_vlan_ctrl = hw_atl_b0_hw_vlan_ctrl, .hw_multicast_list_set = hw_atl_b0_hw_multicast_list_set, .hw_interrupt_moderation_set = hw_atl_b0_hw_interrupt_moderation_set, .hw_rss_set = hw_atl_b0_hw_rss_set, diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c index 5502ec5f0f69..939f77e2e117 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c @@ -898,6 +898,24 @@ void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, vlan_id_flr); } +void hw_atl_rpf_vlan_rxq_en_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq_en, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_RXQ_EN_F_ADR(filter), + HW_ATL_RPF_VL_RXQ_EN_F_MSK, + HW_ATL_RPF_VL_RXQ_EN_F_SHIFT, + vlan_rxq_en); +} + +void hw_atl_rpf_vlan_rxq_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq, + u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_VL_RXQ_F_ADR(filter), + HW_ATL_RPF_VL_RXQ_F_MSK, + HW_ATL_RPF_VL_RXQ_F_SHIFT, + vlan_rxq); +}; + void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, u32 filter) { @@ -965,6 +983,20 @@ void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter) HW_ATL_RPF_ET_VALF_SHIFT, etht_flr); } +void hw_atl_rpf_l4_spd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L4_SPD_ADR(filter), + HW_ATL_RPF_L4_SPD_MSK, + HW_ATL_RPF_L4_SPD_SHIFT, val); +} + +void hw_atl_rpf_l4_dpd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter) +{ + aq_hw_write_reg_bit(aq_hw, HW_ATL_RPF_L4_DPD_ADR(filter), + HW_ATL_RPF_L4_DPD_MSK, + HW_ATL_RPF_L4_DPD_SHIFT, val); +} + /* RPO: rx packet offload */ void hw_atl_rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw, u32 ipv4header_crc_offload_en) @@ -1476,3 +1508,80 @@ void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr) HW_ATL_MCP_UP_FORCE_INTERRUPT_SHIFT, up_force_intr); } + +void hw_atl_rpfl3l4_ipv4_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location) +{ + aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_DSTA_ADR(location), 0U); +} + +void hw_atl_rpfl3l4_ipv4_src_addr_clear(struct aq_hw_s *aq_hw, u8 location) +{ + aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_SRCA_ADR(location), 0U); +} + +void hw_atl_rpfl3l4_cmd_clear(struct aq_hw_s *aq_hw, u8 location) +{ + aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_REG_CTRL_ADR(location), 0U); +} + +void hw_atl_rpfl3l4_ipv6_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location) +{ + int i; + + for (i = 0; i < 4; ++i) + aq_hw_write_reg(aq_hw, + HW_ATL_RPF_L3_DSTA_ADR(location + i), + 0U); +} + +void hw_atl_rpfl3l4_ipv6_src_addr_clear(struct aq_hw_s *aq_hw, u8 location) +{ + int i; + + for (i = 0; i < 4; ++i) + aq_hw_write_reg(aq_hw, + HW_ATL_RPF_L3_SRCA_ADR(location + i), + 0U); +} + +void hw_atl_rpfl3l4_ipv4_dest_addr_set(struct aq_hw_s *aq_hw, u8 location, + u32 ipv4_dest) +{ + aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_DSTA_ADR(location), + ipv4_dest); +} + +void hw_atl_rpfl3l4_ipv4_src_addr_set(struct aq_hw_s *aq_hw, u8 location, + u32 ipv4_src) +{ + aq_hw_write_reg(aq_hw, + HW_ATL_RPF_L3_SRCA_ADR(location), + ipv4_src); +} + +void hw_atl_rpfl3l4_cmd_set(struct aq_hw_s *aq_hw, u8 location, u32 cmd) +{ + aq_hw_write_reg(aq_hw, HW_ATL_RPF_L3_REG_CTRL_ADR(location), cmd); +} + +void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw_s *aq_hw, u8 location, + u32 *ipv6_src) +{ + int i; + + for (i = 0; i < 4; ++i) + aq_hw_write_reg(aq_hw, + HW_ATL_RPF_L3_SRCA_ADR(location + i), + ipv6_src[i]); +} + +void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location, + u32 *ipv6_dest) +{ + int i; + + for (i = 0; i < 4; ++i) + aq_hw_write_reg(aq_hw, + HW_ATL_RPF_L3_DSTA_ADR(location + i), + ipv6_dest[i]); +} diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h index 41f239928c15..03c570d115fe 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h @@ -441,6 +441,14 @@ void hw_atl_rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act, void hw_atl_rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, u32 filter); +/* Set VLAN RX queue assignment enable */ +void hw_atl_rpf_vlan_rxq_en_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq_en, + u32 filter); + +/* Set VLAN RX queue */ +void hw_atl_rpf_vlan_rxq_flr_set(struct aq_hw_s *aq_hw, u32 vlan_rxq, + u32 filter); + /* set ethertype filter enable */ void hw_atl_rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, u32 filter); @@ -475,6 +483,12 @@ void hw_atl_rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act, /* set ethertype filter */ void hw_atl_rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter); +/* set L4 source port */ +void hw_atl_rpf_l4_spd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter); + +/* set L4 destination port */ +void hw_atl_rpf_l4_dpd_set(struct aq_hw_s *aq_hw, u32 val, u32 filter); + /* rpo */ /* set ipv4 header checksum offload enable */ @@ -704,4 +718,38 @@ void hw_atl_pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis); /* set uP Force Interrupt */ void hw_atl_mcp_up_force_intr_set(struct aq_hw_s *aq_hw, u32 up_force_intr); +/* clear ipv4 filter destination address */ +void hw_atl_rpfl3l4_ipv4_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location); + +/* clear ipv4 filter source address */ +void hw_atl_rpfl3l4_ipv4_src_addr_clear(struct aq_hw_s *aq_hw, u8 location); + +/* clear command for filter l3-l4 */ +void hw_atl_rpfl3l4_cmd_clear(struct aq_hw_s *aq_hw, u8 location); + +/* clear ipv6 filter destination address */ +void hw_atl_rpfl3l4_ipv6_dest_addr_clear(struct aq_hw_s *aq_hw, u8 location); + +/* clear ipv6 filter source address */ +void hw_atl_rpfl3l4_ipv6_src_addr_clear(struct aq_hw_s *aq_hw, u8 location); + +/* set ipv4 filter destination address */ +void hw_atl_rpfl3l4_ipv4_dest_addr_set(struct aq_hw_s *aq_hw, u8 location, + u32 ipv4_dest); + +/* set ipv4 filter source address */ +void hw_atl_rpfl3l4_ipv4_src_addr_set(struct aq_hw_s *aq_hw, u8 location, + u32 ipv4_src); + +/* set command for filter l3-l4 */ +void hw_atl_rpfl3l4_cmd_set(struct aq_hw_s *aq_hw, u8 location, u32 cmd); + +/* set ipv6 filter source address */ +void hw_atl_rpfl3l4_ipv6_src_addr_set(struct aq_hw_s *aq_hw, u8 location, + u32 *ipv6_src); + +/* set ipv6 filter destination address */ +void hw_atl_rpfl3l4_ipv6_dest_addr_set(struct aq_hw_s *aq_hw, u8 location, + u32 *ipv6_dest); + #endif /* HW_ATL_LLH_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h index a715fa317b1c..8470d92db812 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h @@ -1092,24 +1092,43 @@ /* Default value of bitfield vl_id{F}[B:0] */ #define HW_ATL_RPF_VL_ID_F_DEFAULT 0x0 -/* RX et_en{F} Bitfield Definitions - * Preprocessor definitions for the bitfield "et_en{F}". +/* RX vl_rxq_en{F} Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_rxq{F}". * Parameter: filter {F} | stride size 0x4 | range [0, 15] - * PORT="pif_rpf_et_en_i[0]" - */ - -/* Register address for bitfield et_en{F} */ -#define HW_ATL_RPF_ET_EN_F_ADR(filter) (0x00005300 + (filter) * 0x4) -/* Bitmask for bitfield et_en{F} */ -#define HW_ATL_RPF_ET_EN_F_MSK 0x80000000 -/* Inverted bitmask for bitfield et_en{F} */ -#define HW_ATL_RPF_ET_EN_F_MSKN 0x7FFFFFFF -/* Lower bit position of bitfield et_en{F} */ -#define HW_ATL_RPF_ET_EN_F_SHIFT 31 -/* Width of bitfield et_en{F} */ -#define HW_ATL_RPF_ET_EN_F_WIDTH 1 -/* Default value of bitfield et_en{F} */ -#define HW_ATL_RPF_ET_EN_F_DEFAULT 0x0 + * PORT="pif_rpf_vl_rxq_en_i" + */ + +/* Register address for bitfield vl_rxq_en{F} */ +#define HW_ATL_RPF_VL_RXQ_EN_F_ADR(filter) (0x00005290 + (filter) * 0x4) +/* Bitmask for bitfield vl_rxq_en{F} */ +#define HW_ATL_RPF_VL_RXQ_EN_F_MSK 0x10000000 +/* Inverted bitmask for bitfield vl_rxq_en{F}[ */ +#define HW_ATL_RPF_VL_RXQ_EN_F_MSKN 0xEFFFFFFF +/* Lower bit position of bitfield vl_rxq_en{F} */ +#define HW_ATL_RPF_VL_RXQ_EN_F_SHIFT 28 +/* Width of bitfield vl_rxq_en{F} */ +#define HW_ATL_RPF_VL_RXQ_EN_F_WIDTH 1 +/* Default value of bitfield vl_rxq_en{F} */ +#define HW_ATL_RPF_VL_RXQ_EN_F_DEFAULT 0x0 + +/* RX vl_rxq{F}[4:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "vl_rxq{F}[4:0]". + * Parameter: filter {F} | stride size 0x4 | range [0, 15] + * PORT="pif_rpf_vl_rxq0_i[4:0]" + */ + +/* Register address for bitfield vl_rxq{F}[4:0] */ +#define HW_ATL_RPF_VL_RXQ_F_ADR(filter) (0x00005290 + (filter) * 0x4) +/* Bitmask for bitfield vl_rxq{F}[4:0] */ +#define HW_ATL_RPF_VL_RXQ_F_MSK 0x01F00000 +/* Inverted bitmask for bitfield vl_rxq{F}[4:0] */ +#define HW_ATL_RPF_VL_RXQ_F_MSKN 0xFE0FFFFF +/* Lower bit position of bitfield vl_rxq{F}[4:0] */ +#define HW_ATL_RPF_VL_RXQ_F_SHIFT 20 +/* Width of bitfield vl_rxw{F}[4:0] */ +#define HW_ATL_RPF_VL_RXQ_F_WIDTH 5 +/* Default value of bitfield vl_rxq{F}[4:0] */ +#define HW_ATL_RPF_VL_RXQ_F_DEFAULT 0x0 /* rx et_en{f} bitfield definitions * preprocessor definitions for the bitfield "et_en{f}". @@ -1263,6 +1282,44 @@ /* default value of bitfield et_val{f}[f:0] */ #define HW_ATL_RPF_ET_VALF_DEFAULT 0x0 +/* RX l4_sp{D}[F:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "l4_sp{D}[F:0]". + * Parameter: srcport {D} | stride size 0x4 | range [0, 7] + * PORT="pif_rpf_l4_sp0_i[15:0]" + */ + +/* Register address for bitfield l4_sp{D}[F:0] */ +#define HW_ATL_RPF_L4_SPD_ADR(srcport) (0x00005400u + (srcport) * 0x4) +/* Bitmask for bitfield l4_sp{D}[F:0] */ +#define HW_ATL_RPF_L4_SPD_MSK 0x0000FFFFu +/* Inverted bitmask for bitfield l4_sp{D}[F:0] */ +#define HW_ATL_RPF_L4_SPD_MSKN 0xFFFF0000u +/* Lower bit position of bitfield l4_sp{D}[F:0] */ +#define HW_ATL_RPF_L4_SPD_SHIFT 0 +/* Width of bitfield l4_sp{D}[F:0] */ +#define HW_ATL_RPF_L4_SPD_WIDTH 16 +/* Default value of bitfield l4_sp{D}[F:0] */ +#define HW_ATL_RPF_L4_SPD_DEFAULT 0x0 + +/* RX l4_dp{D}[F:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "l4_dp{D}[F:0]". + * Parameter: destport {D} | stride size 0x4 | range [0, 7] + * PORT="pif_rpf_l4_dp0_i[15:0]" + */ + +/* Register address for bitfield l4_dp{D}[F:0] */ +#define HW_ATL_RPF_L4_DPD_ADR(destport) (0x00005420u + (destport) * 0x4) +/* Bitmask for bitfield l4_dp{D}[F:0] */ +#define HW_ATL_RPF_L4_DPD_MSK 0x0000FFFFu +/* Inverted bitmask for bitfield l4_dp{D}[F:0] */ +#define HW_ATL_RPF_L4_DPD_MSKN 0xFFFF0000u +/* Lower bit position of bitfield l4_dp{D}[F:0] */ +#define HW_ATL_RPF_L4_DPD_SHIFT 0 +/* Width of bitfield l4_dp{D}[F:0] */ +#define HW_ATL_RPF_L4_DPD_WIDTH 16 +/* Default value of bitfield l4_dp{D}[F:0] */ +#define HW_ATL_RPF_L4_DPD_DEFAULT 0x0 + /* rx ipv4_chk_en bitfield definitions * preprocessor definitions for the bitfield "ipv4_chk_en". * port="pif_rpo_ipv4_chk_en_i" @@ -2418,4 +2475,48 @@ /* default value of bitfield uP Force Interrupt */ #define HW_ATL_MCP_UP_FORCE_INTERRUPT_DEFAULT 0x0 +#define HW_ATL_RX_CTRL_ADDR_BEGIN_FL3L4 0x00005380 +#define HW_ATL_RX_SRCA_ADDR_BEGIN_FL3L4 0x000053B0 +#define HW_ATL_RX_DESTA_ADDR_BEGIN_FL3L4 0x000053D0 + +#define HW_ATL_RPF_L3_REG_CTRL_ADR(location) (0x00005380 + (location) * 0x4) + +/* RX rpf_l3_sa{D}[1F:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "l3_sa{D}[1F:0]". + * Parameter: location {D} | stride size 0x4 | range [0, 7] + * PORT="pif_rpf_l3_sa0_i[31:0]" + */ + +/* Register address for bitfield pif_rpf_l3_sa0_i[31:0] */ +#define HW_ATL_RPF_L3_SRCA_ADR(location) (0x000053B0 + (location) * 0x4) +/* Bitmask for bitfield l3_sa0[1F:0] */ +#define HW_ATL_RPF_L3_SRCA_MSK 0xFFFFFFFFu +/* Inverted bitmask for bitfield l3_sa0[1F:0] */ +#define HW_ATL_RPF_L3_SRCA_MSKN 0xFFFFFFFFu +/* Lower bit position of bitfield l3_sa0[1F:0] */ +#define HW_ATL_RPF_L3_SRCA_SHIFT 0 +/* Width of bitfield l3_sa0[1F:0] */ +#define HW_ATL_RPF_L3_SRCA_WIDTH 32 +/* Default value of bitfield l3_sa0[1F:0] */ +#define HW_ATL_RPF_L3_SRCA_DEFAULT 0x0 + +/* RX rpf_l3_da{D}[1F:0] Bitfield Definitions + * Preprocessor definitions for the bitfield "l3_da{D}[1F:0]". + * Parameter: location {D} | stride size 0x4 | range [0, 7] + * PORT="pif_rpf_l3_da0_i[31:0]" + */ + + /* Register address for bitfield pif_rpf_l3_da0_i[31:0] */ +#define HW_ATL_RPF_L3_DSTA_ADR(location) (0x000053B0 + (location) * 0x4) +/* Bitmask for bitfield l3_da0[1F:0] */ +#define HW_ATL_RPF_L3_DSTA_MSK 0xFFFFFFFFu +/* Inverted bitmask for bitfield l3_da0[1F:0] */ +#define HW_ATL_RPF_L3_DSTA_MSKN 0xFFFFFFFFu +/* Lower bit position of bitfield l3_da0[1F:0] */ +#define HW_ATL_RPF_L3_DSTA_SHIFT 0 +/* Width of bitfield l3_da0[1F:0] */ +#define HW_ATL_RPF_L3_DSTA_WIDTH 32 +/* Default value of bitfield l3_da0[1F:0] */ +#define HW_ATL_RPF_L3_DSTA_DEFAULT 0x0 + #endif /* HW_ATL_LLH_INTERNAL_H */ diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c index 7def1cb8ab9d..9b74a3197d7f 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c @@ -263,6 +263,8 @@ int hw_atl_utils_soft_reset(struct aq_hw_s *self) AQ_HW_WAIT_FOR((aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR) & HW_ATL_MPI_STATE_MSK) == MPI_DEINIT, 10, 1000U); + if (err) + return err; } if (self->rbl_enabled) @@ -454,8 +456,6 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, (fw.val = aq_hw_read_reg(self, HW_ATL_RPC_STATE_ADR), fw.tid), 1000U, 100U); - if (err < 0) - goto err_exit; if (fw.len == 0xFFFFU) { err = hw_atl_utils_fw_rpc_call(self, sw.len); @@ -463,8 +463,6 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self, goto err_exit; } } while (sw.tid != fw.tid || 0xFFFFU == fw.len); - if (err < 0) - goto err_exit; if (rpc) { if (fw.len) { diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h index 3613fca64b58..48278e333462 100644 --- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h +++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h @@ -240,6 +240,64 @@ struct __packed offload_info { u8 buf[0]; }; +enum hw_atl_rx_action_with_traffic { + HW_ATL_RX_DISCARD, + HW_ATL_RX_HOST, +}; + +struct aq_rx_filter_vlan { + u8 enable; + u8 location; + u16 vlan_id; + u8 queue; +}; + +struct aq_rx_filter_l2 { + s8 queue; + u8 location; + u8 user_priority_en; + u8 user_priority; + u16 ethertype; +}; + +struct aq_rx_filter_l3l4 { + u32 cmd; + u8 location; + u32 ip_dst[4]; + u32 ip_src[4]; + u16 p_dst; + u16 p_src; + u8 is_ipv6; +}; + +enum hw_atl_rx_protocol_value_l3l4 { + HW_ATL_RX_TCP, + HW_ATL_RX_UDP, + HW_ATL_RX_SCTP, + HW_ATL_RX_ICMP +}; + +enum hw_atl_rx_ctrl_registers_l3l4 { + HW_ATL_RX_ENABLE_MNGMNT_QUEUE_L3L4 = BIT(22), + HW_ATL_RX_ENABLE_QUEUE_L3L4 = BIT(23), + HW_ATL_RX_ENABLE_ARP_FLTR_L3 = BIT(24), + HW_ATL_RX_ENABLE_CMP_PROT_L4 = BIT(25), + HW_ATL_RX_ENABLE_CMP_DEST_PORT_L4 = BIT(26), + HW_ATL_RX_ENABLE_CMP_SRC_PORT_L4 = BIT(27), + HW_ATL_RX_ENABLE_CMP_DEST_ADDR_L3 = BIT(28), + HW_ATL_RX_ENABLE_CMP_SRC_ADDR_L3 = BIT(29), + HW_ATL_RX_ENABLE_L3_IPV6 = BIT(30), + HW_ATL_RX_ENABLE_FLTR_L3L4 = BIT(31) +}; + +#define HW_ATL_RX_QUEUE_FL3L4_SHIFT 8U +#define HW_ATL_RX_ACTION_FL3F4_SHIFT 16U + +#define HW_ATL_RX_CNT_REG_ADDR_IPV6 4U + +#define HW_ATL_GET_REG_LOCATION_FL3L4(location) \ + ((location) - AQ_RX_FIRST_LOC_FL3L4) + #define HAL_ATLANTIC_UTILS_CHIP_MIPS 0x00000001U #define HAL_ATLANTIC_UTILS_CHIP_TPO2 0x00000002U #define HAL_ATLANTIC_UTILS_CHIP_RPF2 0x00000004U diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c index bd277b0dc615..4406325fdd9f 100644 --- a/drivers/net/ethernet/arc/emac_main.c +++ b/drivers/net/ethernet/arc/emac_main.c @@ -432,7 +432,8 @@ static int arc_emac_open(struct net_device *ndev) phy_dev->autoneg = AUTONEG_ENABLE; phy_dev->speed = 0; phy_dev->duplex = 0; - phy_dev->advertising &= phy_dev->supported; + linkmode_and(phy_dev->advertising, phy_dev->advertising, + phy_dev->supported); priv->last_rx_bd = 0; diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c index e445ab724827..f44808959ff3 100644 --- a/drivers/net/ethernet/broadcom/b44.c +++ b/drivers/net/ethernet/broadcom/b44.c @@ -2248,6 +2248,7 @@ static void b44_adjust_link(struct net_device *dev) static int b44_register_phy_one(struct b44 *bp) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; struct mii_bus *mii_bus; struct ssb_device *sdev = bp->sdev; struct phy_device *phydev; @@ -2303,11 +2304,12 @@ static int b44_register_phy_one(struct b44 *bp) } /* mask with MAC supported features */ - phydev->supported &= (SUPPORTED_100baseT_Half | - SUPPORTED_100baseT_Full | - SUPPORTED_Autoneg | - SUPPORTED_MII); - phydev->advertising = phydev->supported; + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask); + linkmode_and(phydev->supported, phydev->supported, mask); + linkmode_copy(phydev->advertising, phydev->supported); bp->old_link = 0; bp->phy_addr = phydev->mdio.addr; diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 0e2d99c737e3..4574275ef445 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1068,6 +1068,7 @@ static void mpd_enable_set(struct bcm_sysport_priv *priv, bool enable) static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv) { + unsigned int index; u32 reg; /* Disable RXCHK, active filters and Broadcom tag matching */ @@ -1076,6 +1077,15 @@ static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv) RXCHK_BRCM_TAG_MATCH_SHIFT | RXCHK_EN | RXCHK_BRCM_TAG_EN); rxchk_writel(priv, reg, RXCHK_CONTROL); + /* Make sure we restore correct CID index in case HW lost + * its context during deep idle state + */ + for_each_set_bit(index, priv->filters, RXCHK_BRCM_TAG_MAX) { + rxchk_writel(priv, priv->filters_loc[index] << + RXCHK_BRCM_TAG_CID_SHIFT, RXCHK_BRCM_TAG(index)); + rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index)); + } + /* Clear the MagicPacket detection logic */ mpd_enable_set(priv, false); @@ -2189,6 +2199,7 @@ static int bcm_sysport_rule_set(struct bcm_sysport_priv *priv, rxchk_writel(priv, reg, RXCHK_BRCM_TAG(index)); rxchk_writel(priv, 0xff00ffff, RXCHK_BRCM_TAG_MASK(index)); + priv->filters_loc[index] = nfc->fs.location; set_bit(index, priv->filters); return 0; @@ -2208,6 +2219,7 @@ static int bcm_sysport_rule_del(struct bcm_sysport_priv *priv, * be taken care of during suspend time by bcm_sysport_suspend_to_wol */ clear_bit(index, priv->filters); + priv->filters_loc[index] = 0; return 0; } @@ -2312,7 +2324,7 @@ static int bcm_sysport_map_queues(struct notifier_block *nb, struct bcm_sysport_priv *priv; struct net_device *slave_dev; unsigned int num_tx_queues; - unsigned int q, start, port; + unsigned int q, qp, port; struct net_device *dev; priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier); @@ -2351,20 +2363,61 @@ static int bcm_sysport_map_queues(struct notifier_block *nb, priv->per_port_num_tx_queues = num_tx_queues; - start = find_first_zero_bit(&priv->queue_bitmap, dev->num_tx_queues); - for (q = 0; q < num_tx_queues; q++) { - ring = &priv->tx_rings[q + start]; + for (q = 0, qp = 0; q < dev->num_tx_queues && qp < num_tx_queues; + q++) { + ring = &priv->tx_rings[q]; + + if (ring->inspect) + continue; /* Just remember the mapping actual programming done * during bcm_sysport_init_tx_ring */ - ring->switch_queue = q; + ring->switch_queue = qp; ring->switch_port = port; ring->inspect = true; priv->ring_map[q + port * num_tx_queues] = ring; + qp++; + } + + return 0; +} + +static int bcm_sysport_unmap_queues(struct notifier_block *nb, + struct dsa_notifier_register_info *info) +{ + struct bcm_sysport_tx_ring *ring; + struct bcm_sysport_priv *priv; + struct net_device *slave_dev; + unsigned int num_tx_queues; + struct net_device *dev; + unsigned int q, port; + + priv = container_of(nb, struct bcm_sysport_priv, dsa_notifier); + if (priv->netdev != info->master) + return 0; + + dev = info->master; + + if (dev->netdev_ops != &bcm_sysport_netdev_ops) + return 0; + + port = info->port_number; + slave_dev = info->info.dev; + + num_tx_queues = slave_dev->real_num_tx_queues; + + for (q = 0; q < dev->num_tx_queues; q++) { + ring = &priv->tx_rings[q]; - /* Set all queues as being used now */ - set_bit(q + start, &priv->queue_bitmap); + if (ring->switch_port != port) + continue; + + if (!ring->inspect) + continue; + + ring->inspect = false; + priv->ring_map[q + port * num_tx_queues] = NULL; } return 0; @@ -2373,14 +2426,18 @@ static int bcm_sysport_map_queues(struct notifier_block *nb, static int bcm_sysport_dsa_notifier(struct notifier_block *nb, unsigned long event, void *ptr) { - struct dsa_notifier_register_info *info; - - if (event != DSA_PORT_REGISTER) - return NOTIFY_DONE; + int ret = NOTIFY_DONE; - info = ptr; + switch (event) { + case DSA_PORT_REGISTER: + ret = bcm_sysport_map_queues(nb, ptr); + break; + case DSA_PORT_UNREGISTER: + ret = bcm_sysport_unmap_queues(nb, ptr); + break; + } - return notifier_from_errno(bcm_sysport_map_queues(nb, info)); + return notifier_from_errno(ret); } #define REV_FMT "v%2x.%02x" diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h index a7a230884a87..0887e6356649 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.h +++ b/drivers/net/ethernet/broadcom/bcmsysport.h @@ -786,6 +786,7 @@ struct bcm_sysport_priv { /* Ethtool */ u32 msg_enable; DECLARE_BITMAP(filters, RXCHK_BRCM_TAG_MAX); + u32 filters_loc[RXCHK_BRCM_TAG_MAX]; struct bcm_sysport_stats64 stats64; @@ -795,7 +796,6 @@ struct bcm_sysport_priv { /* map information between switch port queues and local queues */ struct notifier_block dsa_notifier; unsigned int per_port_num_tx_queues; - unsigned long queue_bitmap; struct bcm_sysport_tx_ring *ring_map[DSA_MAX_PORTS * 8]; }; diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c index a4a90b6cdb46..749d0ef44371 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c @@ -1105,11 +1105,39 @@ static void bnx2x_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct bnx2x *bp = netdev_priv(dev); + char version[ETHTOOL_FWVERS_LEN]; + int ext_dev_info_offset; + u32 mbi; strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); - bnx2x_fill_fw_str(bp, info->fw_version, sizeof(info->fw_version)); + memset(version, 0, sizeof(version)); + snprintf(version, ETHTOOL_FWVERS_LEN, " storm %d.%d.%d.%d", + BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION, + BCM_5710_FW_REVISION_VERSION, BCM_5710_FW_ENGINEERING_VERSION); + strlcat(info->version, version, sizeof(info->version)); + + if (SHMEM2_HAS(bp, extended_dev_info_shared_addr)) { + ext_dev_info_offset = SHMEM2_RD(bp, + extended_dev_info_shared_addr); + mbi = REG_RD(bp, ext_dev_info_offset + + offsetof(struct extended_dev_info_shared_cfg, + mbi_version)); + if (mbi) { + memset(version, 0, sizeof(version)); + snprintf(version, ETHTOOL_FWVERS_LEN, "mbi %d.%d.%d ", + (mbi & 0xff000000) >> 24, + (mbi & 0x00ff0000) >> 16, + (mbi & 0x0000ff00) >> 8); + strlcpy(info->fw_version, version, + sizeof(info->fw_version)); + } + } + + memset(version, 0, sizeof(version)); + bnx2x_fill_fw_str(bp, version, ETHTOOL_FWVERS_LEN); + strlcat(info->fw_version, version, sizeof(info->fw_version)); strlcpy(info->bus_info, pci_name(bp->pdev), sizeof(info->bus_info)); } diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h index f8b810313094..d9057c8bbeef 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_hsi.h @@ -1140,6 +1140,11 @@ struct shm_dev_info { /* size */ }; +struct extended_dev_info_shared_cfg { + u32 reserved[18]; + u32 mbi_version; + u32 mbi_date; +}; #if !defined(__LITTLE_ENDIAN) && !defined(__BIG_ENDIAN) #error "Missing either LITTLE_ENDIAN or BIG_ENDIAN definition." diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index b164f705709d..3b5b47e98c73 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -9360,10 +9360,16 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link) BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n", rc); - /* Remove all currently configured VLANs */ - rc = bnx2x_del_all_vlans(bp); - if (rc < 0) - BNX2X_ERR("Failed to delete all VLANs\n"); + /* The whole *vlan_obj structure may be not initialized if VLAN + * filtering offload is not supported by hardware. Currently this is + * true for all hardware covered by CHIP_IS_E1x(). + */ + if (!CHIP_IS_E1x(bp)) { + /* Remove all currently configured VLANs */ + rc = bnx2x_del_all_vlans(bp); + if (rc < 0) + BNX2X_ERR("Failed to delete all VLANs\n"); + } /* Disable LLH */ if (!CHIP_IS_E1(bp)) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 5d21c14853ac..3aa80da973d7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -118,6 +118,7 @@ enum board_idx { NETXTREME_E_VF, NETXTREME_C_VF, NETXTREME_S_VF, + NETXTREME_E_P5_VF, }; /* indexed by enum above */ @@ -160,6 +161,7 @@ static const struct { [NETXTREME_E_VF] = { "Broadcom NetXtreme-E Ethernet Virtual Function" }, [NETXTREME_C_VF] = { "Broadcom NetXtreme-C Ethernet Virtual Function" }, [NETXTREME_S_VF] = { "Broadcom NetXtreme-S Ethernet Virtual Function" }, + [NETXTREME_E_P5_VF] = { "Broadcom BCM5750X NetXtreme-E Ethernet Virtual Function" }, }; static const struct pci_device_id bnxt_pci_tbl[] = { @@ -210,6 +212,7 @@ static const struct pci_device_id bnxt_pci_tbl[] = { { PCI_VDEVICE(BROADCOM, 0x16dc), .driver_data = NETXTREME_E_VF }, { PCI_VDEVICE(BROADCOM, 0x16e1), .driver_data = NETXTREME_C_VF }, { PCI_VDEVICE(BROADCOM, 0x16e5), .driver_data = NETXTREME_C_VF }, + { PCI_VDEVICE(BROADCOM, 0x1807), .driver_data = NETXTREME_E_P5_VF }, { PCI_VDEVICE(BROADCOM, 0xd800), .driver_data = NETXTREME_S_VF }, #endif { 0 } @@ -237,7 +240,7 @@ static struct workqueue_struct *bnxt_pf_wq; static bool bnxt_vf_pciid(enum board_idx idx) { return (idx == NETXTREME_C_VF || idx == NETXTREME_E_VF || - idx == NETXTREME_S_VF); + idx == NETXTREME_S_VF || idx == NETXTREME_E_P5_VF); } #define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) @@ -1809,7 +1812,7 @@ static int bnxt_hwrm_handler(struct bnxt *bp, struct tx_cmp *txcmp) case CMPL_BASE_TYPE_HWRM_DONE: seq_id = le16_to_cpu(h_cmpl->sequence_id); if (seq_id == bp->hwrm_intr_seq_id) - bp->hwrm_intr_seq_id = HWRM_SEQ_ID_INVALID; + bp->hwrm_intr_seq_id = (u16)~bp->hwrm_intr_seq_id; else netdev_err(bp->dev, "Invalid hwrm seq id %d\n", seq_id); break; @@ -2372,7 +2375,11 @@ static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) rmem->pg_arr[i] = NULL; } if (rmem->pg_tbl) { - dma_free_coherent(&pdev->dev, rmem->nr_pages * 8, + size_t pg_tbl_size = rmem->nr_pages * 8; + + if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) + pg_tbl_size = rmem->page_size; + dma_free_coherent(&pdev->dev, pg_tbl_size, rmem->pg_tbl, rmem->pg_tbl_map); rmem->pg_tbl = NULL; } @@ -2390,9 +2397,12 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) if (rmem->flags & (BNXT_RMEM_VALID_PTE_FLAG | BNXT_RMEM_RING_PTE_FLAG)) valid_bit = PTU_PTE_VALID; - if (rmem->nr_pages > 1) { - rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, - rmem->nr_pages * 8, + if ((rmem->nr_pages > 1 || rmem->depth > 0) && !rmem->pg_tbl) { + size_t pg_tbl_size = rmem->nr_pages * 8; + + if (rmem->flags & BNXT_RMEM_USE_FULL_PAGE_FLAG) + pg_tbl_size = rmem->page_size; + rmem->pg_tbl = dma_alloc_coherent(&pdev->dev, pg_tbl_size, &rmem->pg_tbl_map, GFP_KERNEL); if (!rmem->pg_tbl) @@ -2409,7 +2419,7 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem) if (!rmem->pg_arr[i]) return -ENOMEM; - if (rmem->nr_pages > 1) { + if (rmem->nr_pages > 1 || rmem->depth > 0) { if (i == rmem->nr_pages - 2 && (rmem->flags & BNXT_RMEM_RING_PTE_FLAG)) extra_bits |= PTU_PTE_NEXT_TO_LAST; @@ -3276,6 +3286,27 @@ static void bnxt_free_hwrm_resources(struct bnxt *bp) bp->hwrm_cmd_resp_dma_addr); bp->hwrm_cmd_resp_addr = NULL; } + + if (bp->hwrm_cmd_kong_resp_addr) { + dma_free_coherent(&pdev->dev, PAGE_SIZE, + bp->hwrm_cmd_kong_resp_addr, + bp->hwrm_cmd_kong_resp_dma_addr); + bp->hwrm_cmd_kong_resp_addr = NULL; + } +} + +static int bnxt_alloc_kong_hwrm_resources(struct bnxt *bp) +{ + struct pci_dev *pdev = bp->pdev; + + bp->hwrm_cmd_kong_resp_addr = + dma_alloc_coherent(&pdev->dev, PAGE_SIZE, + &bp->hwrm_cmd_kong_resp_dma_addr, + GFP_KERNEL); + if (!bp->hwrm_cmd_kong_resp_addr) + return -ENOMEM; + + return 0; } static int bnxt_alloc_hwrm_resources(struct bnxt *bp) @@ -3317,9 +3348,8 @@ static int bnxt_alloc_hwrm_short_cmd_req(struct bnxt *bp) return 0; } -static void bnxt_free_stats(struct bnxt *bp) +static void bnxt_free_port_stats(struct bnxt *bp) { - u32 size, i; struct pci_dev *pdev = bp->pdev; bp->flags &= ~BNXT_FLAG_PORT_STATS; @@ -3345,6 +3375,12 @@ static void bnxt_free_stats(struct bnxt *bp) bp->hw_rx_port_stats_ext_map); bp->hw_rx_port_stats_ext = NULL; } +} + +static void bnxt_free_ring_stats(struct bnxt *bp) +{ + struct pci_dev *pdev = bp->pdev; + int size, i; if (!bp->bnapi) return; @@ -3384,6 +3420,9 @@ static int bnxt_alloc_stats(struct bnxt *bp) } if (BNXT_PF(bp) && bp->chip_num != CHIP_NUM_58700) { + if (bp->hw_rx_port_stats) + goto alloc_ext_stats; + bp->hw_port_stats_size = sizeof(struct rx_port_stats) + sizeof(struct tx_port_stats) + 1024; @@ -3400,11 +3439,15 @@ static int bnxt_alloc_stats(struct bnxt *bp) sizeof(struct rx_port_stats) + 512; bp->flags |= BNXT_FLAG_PORT_STATS; +alloc_ext_stats: /* Display extended statistics only if FW supports it */ if (bp->hwrm_spec_code < 0x10804 || bp->hwrm_spec_code == 0x10900) return 0; + if (bp->hw_rx_port_stats_ext) + goto alloc_tx_ext_stats; + bp->hw_rx_port_stats_ext = dma_zalloc_coherent(&pdev->dev, sizeof(struct rx_port_stats_ext), @@ -3413,6 +3456,10 @@ static int bnxt_alloc_stats(struct bnxt *bp) if (!bp->hw_rx_port_stats_ext) return 0; +alloc_tx_ext_stats: + if (bp->hw_tx_port_stats_ext) + return 0; + if (bp->hwrm_spec_code >= 0x10902) { bp->hw_tx_port_stats_ext = dma_zalloc_coherent(&pdev->dev, @@ -3520,7 +3567,7 @@ static void bnxt_free_mem(struct bnxt *bp, bool irq_re_init) bnxt_free_cp_rings(bp); bnxt_free_ntp_fltrs(bp, irq_re_init); if (irq_re_init) { - bnxt_free_stats(bp); + bnxt_free_ring_stats(bp); bnxt_free_ring_grps(bp); bnxt_free_vnics(bp); kfree(bp->tx_ring_map); @@ -3721,7 +3768,10 @@ void bnxt_hwrm_cmd_hdr_init(struct bnxt *bp, void *request, u16 req_type, req->req_type = cpu_to_le16(req_type); req->cmpl_ring = cpu_to_le16(cmpl_ring); req->target_id = cpu_to_le16(target_id); - req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); + if (bnxt_kong_hwrm_message(bp, req)) + req->resp_addr = cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr); + else + req->resp_addr = cpu_to_le64(bp->hwrm_cmd_resp_dma_addr); } static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, @@ -3736,11 +3786,10 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, struct hwrm_err_output *resp = bp->hwrm_cmd_resp_addr; u16 max_req_len = BNXT_HWRM_MAX_REQ_LEN; struct hwrm_short_input short_input = {0}; - - req->seq_id = cpu_to_le16(bp->hwrm_cmd_seq++); - memset(resp, 0, PAGE_SIZE); - cp_ring_id = le16_to_cpu(req->cmpl_ring); - intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1; + u32 doorbell_offset = BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER; + u8 *resp_addr = (u8 *)bp->hwrm_cmd_resp_addr; + u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM; + u16 dst = BNXT_HWRM_CHNL_CHIMP; if (msg_len > BNXT_HWRM_MAX_REQ_LEN) { if (msg_len > bp->hwrm_max_ext_req_len || @@ -3748,6 +3797,23 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, return -EINVAL; } + if (bnxt_hwrm_kong_chnl(bp, req)) { + dst = BNXT_HWRM_CHNL_KONG; + bar_offset = BNXT_GRCPF_REG_KONG_COMM; + doorbell_offset = BNXT_GRCPF_REG_KONG_COMM_TRIGGER; + resp = bp->hwrm_cmd_kong_resp_addr; + resp_addr = (u8 *)bp->hwrm_cmd_kong_resp_addr; + } + + memset(resp, 0, PAGE_SIZE); + cp_ring_id = le16_to_cpu(req->cmpl_ring); + intr_process = (cp_ring_id == INVALID_HW_RING_ID) ? 0 : 1; + + req->seq_id = cpu_to_le16(bnxt_get_hwrm_seq_id(bp, dst)); + /* currently supports only one outstanding message */ + if (intr_process) + bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id); + if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || msg_len > BNXT_HWRM_MAX_REQ_LEN) { void *short_cmd_req = bp->hwrm_short_cmd_req_addr; @@ -3781,17 +3847,13 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, } /* Write request msg to hwrm channel */ - __iowrite32_copy(bp->bar0, data, msg_len / 4); + __iowrite32_copy(bp->bar0 + bar_offset, data, msg_len / 4); for (i = msg_len; i < max_req_len; i += 4) - writel(0, bp->bar0 + i); - - /* currently supports only one outstanding message */ - if (intr_process) - bp->hwrm_intr_seq_id = le16_to_cpu(req->seq_id); + writel(0, bp->bar0 + bar_offset + i); /* Ring channel doorbell */ - writel(1, bp->bar0 + 0x100); + writel(1, bp->bar0 + doorbell_offset); if (!timeout) timeout = DFLT_HWRM_CMD_TIMEOUT; @@ -3806,10 +3868,13 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, tmo_count = HWRM_SHORT_TIMEOUT_COUNTER; timeout = timeout - HWRM_SHORT_MIN_TIMEOUT * HWRM_SHORT_TIMEOUT_COUNTER; tmo_count += DIV_ROUND_UP(timeout, HWRM_MIN_TIMEOUT); - resp_len = bp->hwrm_cmd_resp_addr + HWRM_RESP_LEN_OFFSET; + resp_len = (__le32 *)(resp_addr + HWRM_RESP_LEN_OFFSET); + if (intr_process) { + u16 seq_id = bp->hwrm_intr_seq_id; + /* Wait until hwrm response cmpl interrupt is processed */ - while (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID && + while (bp->hwrm_intr_seq_id != (u16)~seq_id && i++ < tmo_count) { /* on first few passes, just barely sleep */ if (i < HWRM_SHORT_TIMEOUT_COUNTER) @@ -3820,14 +3885,14 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, HWRM_MAX_TIMEOUT); } - if (bp->hwrm_intr_seq_id != HWRM_SEQ_ID_INVALID) { + if (bp->hwrm_intr_seq_id != (u16)~seq_id) { netdev_err(bp->dev, "Resp cmpl intr err msg: 0x%x\n", le16_to_cpu(req->req_type)); return -1; } len = (le32_to_cpu(*resp_len) & HWRM_RESP_LEN_MASK) >> HWRM_RESP_LEN_SFT; - valid = bp->hwrm_cmd_resp_addr + len - 1; + valid = resp_addr + len - 1; } else { int j; @@ -3855,7 +3920,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, } /* Last byte of resp contains valid bit */ - valid = bp->hwrm_cmd_resp_addr + len - 1; + valid = resp_addr + len - 1; for (j = 0; j < HWRM_VALID_BIT_DELAY_USEC; j++) { /* make sure we read from updated DMA memory */ dma_rmb(); @@ -3990,6 +4055,10 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp) cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_VF_REQ_FWD); } + if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) + req.flags |= cpu_to_le32( + FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE); + mutex_lock(&bp->hwrm_cmd_lock); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) @@ -4118,12 +4187,11 @@ static int bnxt_hwrm_cfa_ntuple_filter_free(struct bnxt *bp, static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, struct bnxt_ntuple_filter *fltr) { - int rc = 0; + struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1]; struct hwrm_cfa_ntuple_filter_alloc_input req = {0}; - struct hwrm_cfa_ntuple_filter_alloc_output *resp = - bp->hwrm_cmd_resp_addr; + struct hwrm_cfa_ntuple_filter_alloc_output *resp; struct flow_keys *keys = &fltr->fkeys; - struct bnxt_vnic_info *vnic = &bp->vnic_info[fltr->rxq + 1]; + int rc = 0; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_NTUPLE_FILTER_ALLOC, -1, -1); req.l2_filter_id = bp->vnic_info[0].fw_l2_filter_id[fltr->l2_fltr_idx]; @@ -4169,8 +4237,10 @@ static int bnxt_hwrm_cfa_ntuple_filter_alloc(struct bnxt *bp, req.dst_id = cpu_to_le16(vnic->fw_vnic_id); mutex_lock(&bp->hwrm_cmd_lock); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (!rc) + if (!rc) { + resp = bnxt_get_hwrm_resp_addr(bp, &req); fltr->filter_id = resp->ntuple_filter_id; + } mutex_unlock(&bp->hwrm_cmd_lock); return rc; } @@ -5161,7 +5231,6 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp) hw_resc->resv_vnics = le16_to_cpu(resp->alloc_vnics); cp = le16_to_cpu(resp->alloc_cmpl_rings); stats = le16_to_cpu(resp->alloc_stat_ctx); - cp = min_t(u16, cp, stats); hw_resc->resv_irqs = cp; if (bp->flags & BNXT_FLAG_CHIP_P5) { int rx = hw_resc->resv_rx_rings; @@ -5180,6 +5249,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp) hw_resc->resv_hw_ring_grps = rx; } hw_resc->resv_cp_rings = cp; + hw_resc->resv_stat_ctxs = stats; } mutex_unlock(&bp->hwrm_cmd_lock); return 0; @@ -5209,7 +5279,7 @@ static bool bnxt_rfs_supported(struct bnxt *bp); static void __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req, int tx_rings, int rx_rings, int ring_grps, - int cp_rings, int vnics) + int cp_rings, int stats, int vnics) { u32 enables = 0; @@ -5251,7 +5321,7 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, struct hwrm_func_cfg_input *req, req->num_rsscos_ctxs = cpu_to_le16(ring_grps + 1); } - req->num_stat_ctxs = req->num_cmpl_rings; + req->num_stat_ctxs = cpu_to_le16(stats); req->num_vnics = cpu_to_le16(vnics); } req->enables = cpu_to_le32(enables); @@ -5261,7 +5331,7 @@ static void __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, struct hwrm_func_vf_cfg_input *req, int tx_rings, int rx_rings, int ring_grps, int cp_rings, - int vnics) + int stats, int vnics) { u32 enables = 0; @@ -5294,7 +5364,7 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, req->num_hw_ring_grps = cpu_to_le16(ring_grps); req->num_rsscos_ctxs = cpu_to_le16(BNXT_VF_MAX_RSS_CTX); } - req->num_stat_ctxs = req->num_cmpl_rings; + req->num_stat_ctxs = cpu_to_le16(stats); req->num_vnics = cpu_to_le16(vnics); req->enables = cpu_to_le32(enables); @@ -5302,13 +5372,13 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, static int bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings, int vnics) + int ring_grps, int cp_rings, int stats, int vnics) { struct hwrm_func_cfg_input req = {0}; int rc; __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, - cp_rings, vnics); + cp_rings, stats, vnics); if (!req.enables) return 0; @@ -5325,7 +5395,7 @@ bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, static int bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings, int vnics) + int ring_grps, int cp_rings, int stats, int vnics) { struct hwrm_func_vf_cfg_input req = {0}; int rc; @@ -5336,7 +5406,7 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, } __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, - cp_rings, vnics); + cp_rings, stats, vnics); rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) return -ENOMEM; @@ -5346,15 +5416,17 @@ bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, } static int bnxt_hwrm_reserve_rings(struct bnxt *bp, int tx, int rx, int grp, - int cp, int vnic) + int cp, int stat, int vnic) { if (BNXT_PF(bp)) - return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, vnic); + return bnxt_hwrm_reserve_pf_rings(bp, tx, rx, grp, cp, stat, + vnic); else - return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, vnic); + return bnxt_hwrm_reserve_vf_rings(bp, tx, rx, grp, cp, stat, + vnic); } -static int bnxt_nq_rings_in_use(struct bnxt *bp) +int bnxt_nq_rings_in_use(struct bnxt *bp) { int cp = bp->cp_nr_rings; int ulp_msix, ulp_base; @@ -5380,12 +5452,17 @@ static int bnxt_cp_rings_in_use(struct bnxt *bp) return cp; } +static int bnxt_get_func_stat_ctxs(struct bnxt *bp) +{ + return bp->cp_nr_rings + bnxt_get_ulp_stat_ctxs(bp); +} + static bool bnxt_need_reserve_rings(struct bnxt *bp) { struct bnxt_hw_resc *hw_resc = &bp->hw_resc; int cp = bnxt_cp_rings_in_use(bp); int nq = bnxt_nq_rings_in_use(bp); - int rx = bp->rx_nr_rings; + int rx = bp->rx_nr_rings, stat; int vnic = 1, grp = rx; if (bp->hwrm_spec_code < 0x10601) @@ -5398,9 +5475,11 @@ static bool bnxt_need_reserve_rings(struct bnxt *bp) vnic = rx + 1; if (bp->flags & BNXT_FLAG_AGG_RINGS) rx <<= 1; + stat = bnxt_get_func_stat_ctxs(bp); if (BNXT_NEW_RM(bp) && (hw_resc->resv_rx_rings != rx || hw_resc->resv_cp_rings != cp || hw_resc->resv_irqs < nq || hw_resc->resv_vnics != vnic || + hw_resc->resv_stat_ctxs != stat || (hw_resc->resv_hw_ring_grps != grp && !(bp->flags & BNXT_FLAG_CHIP_P5)))) return true; @@ -5414,8 +5493,8 @@ static int __bnxt_reserve_rings(struct bnxt *bp) int tx = bp->tx_nr_rings; int rx = bp->rx_nr_rings; int grp, rx_rings, rc; + int vnic = 1, stat; bool sh = false; - int vnic = 1; if (!bnxt_need_reserve_rings(bp)) return 0; @@ -5427,8 +5506,9 @@ static int __bnxt_reserve_rings(struct bnxt *bp) if (bp->flags & BNXT_FLAG_AGG_RINGS) rx <<= 1; grp = bp->rx_nr_rings; + stat = bnxt_get_func_stat_ctxs(bp); - rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, vnic); + rc = bnxt_hwrm_reserve_rings(bp, tx, rx, grp, cp, stat, vnic); if (rc) return rc; @@ -5438,6 +5518,7 @@ static int __bnxt_reserve_rings(struct bnxt *bp) cp = hw_resc->resv_irqs; grp = hw_resc->resv_hw_ring_grps; vnic = hw_resc->resv_vnics; + stat = hw_resc->resv_stat_ctxs; } rx_rings = rx; @@ -5456,6 +5537,10 @@ static int __bnxt_reserve_rings(struct bnxt *bp) } } rx_rings = min_t(int, rx_rings, grp); + cp = min_t(int, cp, bp->cp_nr_rings); + if (stat > bnxt_get_ulp_stat_ctxs(bp)) + stat -= bnxt_get_ulp_stat_ctxs(bp); + cp = min_t(int, cp, stat); rc = bnxt_trim_rings(bp, &rx_rings, &tx, cp, sh); if (bp->flags & BNXT_FLAG_AGG_RINGS) rx = rx_rings << 1; @@ -5464,14 +5549,15 @@ static int __bnxt_reserve_rings(struct bnxt *bp) bp->rx_nr_rings = rx_rings; bp->cp_nr_rings = cp; - if (!tx || !rx || !cp || !grp || !vnic) + if (!tx || !rx || !cp || !grp || !vnic || !stat) return -ENOMEM; return rc; } static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings, int vnics) + int ring_grps, int cp_rings, int stats, + int vnics) { struct hwrm_func_vf_cfg_input req = {0}; u32 flags; @@ -5481,7 +5567,7 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, return 0; __bnxt_hwrm_reserve_vf_rings(bp, &req, tx_rings, rx_rings, ring_grps, - cp_rings, vnics); + cp_rings, stats, vnics); flags = FUNC_VF_CFG_REQ_FLAGS_TX_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_RX_ASSETS_TEST | FUNC_VF_CFG_REQ_FLAGS_CMPL_ASSETS_TEST | @@ -5499,14 +5585,15 @@ static int bnxt_hwrm_check_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings, } static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings, int vnics) + int ring_grps, int cp_rings, int stats, + int vnics) { struct hwrm_func_cfg_input req = {0}; u32 flags; int rc; __bnxt_hwrm_reserve_pf_rings(bp, &req, tx_rings, rx_rings, ring_grps, - cp_rings, vnics); + cp_rings, stats, vnics); flags = FUNC_CFG_REQ_FLAGS_TX_ASSETS_TEST; if (BNXT_NEW_RM(bp)) { flags |= FUNC_CFG_REQ_FLAGS_RX_ASSETS_TEST | @@ -5527,17 +5614,19 @@ static int bnxt_hwrm_check_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings, } static int bnxt_hwrm_check_rings(struct bnxt *bp, int tx_rings, int rx_rings, - int ring_grps, int cp_rings, int vnics) + int ring_grps, int cp_rings, int stats, + int vnics) { if (bp->hwrm_spec_code < 0x10801) return 0; if (BNXT_PF(bp)) return bnxt_hwrm_check_pf_rings(bp, tx_rings, rx_rings, - ring_grps, cp_rings, vnics); + ring_grps, cp_rings, stats, + vnics); return bnxt_hwrm_check_vf_rings(bp, tx_rings, rx_rings, ring_grps, - cp_rings, vnics); + cp_rings, stats, vnics); } static void bnxt_hwrm_coal_params_qcaps(struct bnxt *bp) @@ -5962,8 +6051,11 @@ static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr, pg_size = 2 << 4; *pg_attr = pg_size; - if (rmem->nr_pages > 1) { - *pg_attr |= 1; + if (rmem->depth >= 1) { + if (rmem->depth == 2) + *pg_attr |= 2; + else + *pg_attr |= 1; *pg_dir = cpu_to_le64(rmem->pg_tbl_map); } else { *pg_dir = cpu_to_le64(rmem->dma_arr[0]); @@ -6040,6 +6132,22 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) &req.stat_pg_size_stat_lvl, &req.stat_page_dir); } + if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV) { + ctx_pg = &ctx->mrav_mem; + req.mrav_num_entries = cpu_to_le32(ctx_pg->entries); + req.mrav_entry_size = cpu_to_le16(ctx->mrav_entry_size); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req.mrav_pg_size_mrav_lvl, + &req.mrav_page_dir); + } + if (enables & FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM) { + ctx_pg = &ctx->tim_mem; + req.tim_num_entries = cpu_to_le32(ctx_pg->entries); + req.tim_entry_size = cpu_to_le16(ctx->tim_entry_size); + bnxt_hwrm_set_pg_attr(&ctx_pg->ring_mem, + &req.tim_pg_size_tim_lvl, + &req.tim_page_dir); + } for (i = 0, num_entries = &req.tqm_sp_num_entries, pg_attr = &req.tqm_sp_pg_size_tqm_sp_lvl, pg_dir = &req.tqm_sp_page_dir, @@ -6060,25 +6168,104 @@ static int bnxt_hwrm_func_backing_store_cfg(struct bnxt *bp, u32 enables) } static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp, - struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size) + struct bnxt_ctx_pg_info *ctx_pg) { struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; - if (!mem_size) - return 0; - - rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); - if (rmem->nr_pages > MAX_CTX_PAGES) { - rmem->nr_pages = 0; - return -EINVAL; - } rmem->page_size = BNXT_PAGE_SIZE; rmem->pg_arr = ctx_pg->ctx_pg_arr; rmem->dma_arr = ctx_pg->ctx_dma_arr; rmem->flags = BNXT_RMEM_VALID_PTE_FLAG; + if (rmem->depth >= 1) + rmem->flags |= BNXT_RMEM_USE_FULL_PAGE_FLAG; return bnxt_alloc_ring(bp, rmem); } +static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp, + struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size, + u8 depth) +{ + struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; + int rc; + + if (!mem_size) + return 0; + + ctx_pg->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); + if (ctx_pg->nr_pages > MAX_CTX_TOTAL_PAGES) { + ctx_pg->nr_pages = 0; + return -EINVAL; + } + if (ctx_pg->nr_pages > MAX_CTX_PAGES || depth > 1) { + int nr_tbls, i; + + rmem->depth = 2; + ctx_pg->ctx_pg_tbl = kcalloc(MAX_CTX_PAGES, sizeof(ctx_pg), + GFP_KERNEL); + if (!ctx_pg->ctx_pg_tbl) + return -ENOMEM; + nr_tbls = DIV_ROUND_UP(ctx_pg->nr_pages, MAX_CTX_PAGES); + rmem->nr_pages = nr_tbls; + rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); + if (rc) + return rc; + for (i = 0; i < nr_tbls; i++) { + struct bnxt_ctx_pg_info *pg_tbl; + + pg_tbl = kzalloc(sizeof(*pg_tbl), GFP_KERNEL); + if (!pg_tbl) + return -ENOMEM; + ctx_pg->ctx_pg_tbl[i] = pg_tbl; + rmem = &pg_tbl->ring_mem; + rmem->pg_tbl = ctx_pg->ctx_pg_arr[i]; + rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i]; + rmem->depth = 1; + rmem->nr_pages = MAX_CTX_PAGES; + if (i == (nr_tbls - 1)) + rmem->nr_pages = ctx_pg->nr_pages % + MAX_CTX_PAGES; + rc = bnxt_alloc_ctx_mem_blk(bp, pg_tbl); + if (rc) + break; + } + } else { + rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE); + if (rmem->nr_pages > 1 || depth) + rmem->depth = 1; + rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg); + } + return rc; +} + +static void bnxt_free_ctx_pg_tbls(struct bnxt *bp, + struct bnxt_ctx_pg_info *ctx_pg) +{ + struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem; + + if (rmem->depth > 1 || ctx_pg->nr_pages > MAX_CTX_PAGES || + ctx_pg->ctx_pg_tbl) { + int i, nr_tbls = rmem->nr_pages; + + for (i = 0; i < nr_tbls; i++) { + struct bnxt_ctx_pg_info *pg_tbl; + struct bnxt_ring_mem_info *rmem2; + + pg_tbl = ctx_pg->ctx_pg_tbl[i]; + if (!pg_tbl) + continue; + rmem2 = &pg_tbl->ring_mem; + bnxt_free_ring(bp, rmem2); + ctx_pg->ctx_pg_arr[i] = NULL; + kfree(pg_tbl); + ctx_pg->ctx_pg_tbl[i] = NULL; + } + kfree(ctx_pg->ctx_pg_tbl); + ctx_pg->ctx_pg_tbl = NULL; + } + bnxt_free_ring(bp, rmem); + ctx_pg->nr_pages = 0; +} + static void bnxt_free_ctx_mem(struct bnxt *bp) { struct bnxt_ctx_mem_info *ctx = bp->ctx; @@ -6089,16 +6276,18 @@ static void bnxt_free_ctx_mem(struct bnxt *bp) if (ctx->tqm_mem[0]) { for (i = 0; i < bp->max_q + 1; i++) - bnxt_free_ring(bp, &ctx->tqm_mem[i]->ring_mem); + bnxt_free_ctx_pg_tbls(bp, ctx->tqm_mem[i]); kfree(ctx->tqm_mem[0]); ctx->tqm_mem[0] = NULL; } - bnxt_free_ring(bp, &ctx->stat_mem.ring_mem); - bnxt_free_ring(bp, &ctx->vnic_mem.ring_mem); - bnxt_free_ring(bp, &ctx->cq_mem.ring_mem); - bnxt_free_ring(bp, &ctx->srq_mem.ring_mem); - bnxt_free_ring(bp, &ctx->qp_mem.ring_mem); + bnxt_free_ctx_pg_tbls(bp, &ctx->tim_mem); + bnxt_free_ctx_pg_tbls(bp, &ctx->mrav_mem); + bnxt_free_ctx_pg_tbls(bp, &ctx->stat_mem); + bnxt_free_ctx_pg_tbls(bp, &ctx->vnic_mem); + bnxt_free_ctx_pg_tbls(bp, &ctx->cq_mem); + bnxt_free_ctx_pg_tbls(bp, &ctx->srq_mem); + bnxt_free_ctx_pg_tbls(bp, &ctx->qp_mem); ctx->flags &= ~BNXT_CTX_FLAG_INITED; } @@ -6107,6 +6296,9 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp) struct bnxt_ctx_pg_info *ctx_pg; struct bnxt_ctx_mem_info *ctx; u32 mem_size, ena, entries; + u32 extra_srqs = 0; + u32 extra_qps = 0; + u8 pg_lvl = 1; int i, rc; rc = bnxt_hwrm_func_backing_store_qcaps(bp); @@ -6119,24 +6311,31 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp) if (!ctx || (ctx->flags & BNXT_CTX_FLAG_INITED)) return 0; + if (bp->flags & BNXT_FLAG_ROCE_CAP) { + pg_lvl = 2; + extra_qps = 65536; + extra_srqs = 8192; + } + ctx_pg = &ctx->qp_mem; - ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries; + ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries + + extra_qps; mem_size = ctx->qp_entry_size * ctx_pg->entries; - rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size); + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl); if (rc) return rc; ctx_pg = &ctx->srq_mem; - ctx_pg->entries = ctx->srq_max_l2_entries; + ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs; mem_size = ctx->srq_entry_size * ctx_pg->entries; - rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size); + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl); if (rc) return rc; ctx_pg = &ctx->cq_mem; - ctx_pg->entries = ctx->cq_max_l2_entries; + ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2; mem_size = ctx->cq_entry_size * ctx_pg->entries; - rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size); + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl); if (rc) return rc; @@ -6144,26 +6343,47 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp) ctx_pg->entries = ctx->vnic_max_vnic_entries + ctx->vnic_max_ring_table_entries; mem_size = ctx->vnic_entry_size * ctx_pg->entries; - rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size); + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1); if (rc) return rc; ctx_pg = &ctx->stat_mem; ctx_pg->entries = ctx->stat_max_entries; mem_size = ctx->stat_entry_size * ctx_pg->entries; - rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size); + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1); + if (rc) + return rc; + + ena = 0; + if (!(bp->flags & BNXT_FLAG_ROCE_CAP)) + goto skip_rdma; + + ctx_pg = &ctx->mrav_mem; + ctx_pg->entries = extra_qps * 4; + mem_size = ctx->mrav_entry_size * ctx_pg->entries; + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2); if (rc) return rc; + ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV; - entries = ctx->qp_max_l2_entries; + ctx_pg = &ctx->tim_mem; + ctx_pg->entries = ctx->qp_mem.entries; + mem_size = ctx->tim_entry_size * ctx_pg->entries; + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1); + if (rc) + return rc; + ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM; + +skip_rdma: + entries = ctx->qp_max_l2_entries + extra_qps; entries = roundup(entries, ctx->tqm_entries_multiple); entries = clamp_t(u32, entries, ctx->tqm_min_entries_per_ring, ctx->tqm_max_entries_per_ring); - for (i = 0, ena = 0; i < bp->max_q + 1; i++) { + for (i = 0; i < bp->max_q + 1; i++) { ctx_pg = ctx->tqm_mem[i]; ctx_pg->entries = entries; mem_size = ctx->tqm_entry_size * entries; - rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg, mem_size); + rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1); if (rc) return rc; ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i; @@ -6190,7 +6410,8 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) req.fid = cpu_to_le16(0xffff); mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = _hwrm_send_message_silent(bp, &req, sizeof(req), + HWRM_CMD_TIMEOUT); if (rc) { rc = -EIO; goto hwrm_func_resc_qcaps_exit; @@ -6220,7 +6441,7 @@ int bnxt_hwrm_func_resc_qcaps(struct bnxt *bp, bool all) if (bp->flags & BNXT_FLAG_CHIP_P5) { u16 max_msix = le16_to_cpu(resp->max_msix); - hw_resc->max_irqs = min_t(u16, hw_resc->max_irqs, max_msix); + hw_resc->max_nqs = max_msix; hw_resc->max_hw_ring_grps = hw_resc->max_rx_rings; } @@ -6442,6 +6663,13 @@ static int bnxt_hwrm_ver_get(struct bnxt *bp) (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED)) bp->fw_cap |= BNXT_FW_CAP_SHORT_CMD; + if (dev_caps_cfg & VER_GET_RESP_DEV_CAPS_CFG_KONG_MB_CHNL_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_KONG_MB_CHNL; + + if (dev_caps_cfg & + VER_GET_RESP_DEV_CAPS_CFG_FLOW_HANDLE_64BIT_SUPPORTED) + bp->fw_cap |= BNXT_FW_CAP_OVS_64BIT_HANDLE; + hwrm_ver_get_exit: mutex_unlock(&bp->hwrm_cmd_lock); return rc; @@ -6488,6 +6716,7 @@ static int bnxt_hwrm_port_qstats(struct bnxt *bp) static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) { struct hwrm_port_qstats_ext_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_queue_pri2cos_qcfg_input req2 = {0}; struct hwrm_port_qstats_ext_input req = {0}; struct bnxt_pf_info *pf = &bp->pf; int rc; @@ -6510,6 +6739,34 @@ static int bnxt_hwrm_port_qstats_ext(struct bnxt *bp) bp->fw_rx_stats_ext_size = 0; bp->fw_tx_stats_ext_size = 0; } + if (bp->fw_tx_stats_ext_size <= + offsetof(struct tx_port_stats_ext, pfc_pri0_tx_duration_us) / 8) { + mutex_unlock(&bp->hwrm_cmd_lock); + bp->pri2cos_valid = 0; + return rc; + } + + bnxt_hwrm_cmd_hdr_init(bp, &req2, HWRM_QUEUE_PRI2COS_QCFG, -1, -1); + req2.flags = cpu_to_le32(QUEUE_PRI2COS_QCFG_REQ_FLAGS_IVLAN); + + rc = _hwrm_send_message(bp, &req2, sizeof(req2), HWRM_CMD_TIMEOUT); + if (!rc) { + struct hwrm_queue_pri2cos_qcfg_output *resp2; + u8 *pri2cos; + int i, j; + + resp2 = bp->hwrm_cmd_resp_addr; + pri2cos = &resp2->pri0_cos_queue_id; + for (i = 0; i < 8; i++) { + u8 queue_id = pri2cos[i]; + + for (j = 0; j < bp->max_q; j++) { + if (bp->q_ids[j] == queue_id) + bp->pri2cos[i] = j; + } + } + bp->pri2cos_valid = 1; + } mutex_unlock(&bp->hwrm_cmd_lock); return rc; } @@ -7034,17 +7291,12 @@ unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp) return bp->hw_resc.max_stat_ctxs; } -void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max) -{ - bp->hw_resc.max_stat_ctxs = max; -} - unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp) { return bp->hw_resc.max_cp_rings; } -unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp) +static unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp) { unsigned int cp = bp->hw_resc.max_cp_rings; @@ -7058,6 +7310,9 @@ static unsigned int bnxt_get_max_func_irqs(struct bnxt *bp) { struct bnxt_hw_resc *hw_resc = &bp->hw_resc; + if (bp->flags & BNXT_FLAG_CHIP_P5) + return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_nqs); + return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings); } @@ -7066,6 +7321,26 @@ static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs) bp->hw_resc.max_irqs = max_irqs; } +unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp) +{ + unsigned int cp; + + cp = bnxt_get_max_func_cp_rings_for_en(bp); + if (bp->flags & BNXT_FLAG_CHIP_P5) + return cp - bp->rx_nr_rings - bp->tx_nr_rings; + else + return cp - bp->cp_nr_rings; +} + +unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp) +{ + unsigned int stat; + + stat = bnxt_get_max_func_stat_ctxs(bp) - bnxt_get_ulp_stat_ctxs(bp); + stat -= bp->cp_nr_rings; + return stat; +} + int bnxt_get_avail_msix(struct bnxt *bp, int num) { int max_cp = bnxt_get_max_func_cp_rings(bp); @@ -7203,23 +7478,26 @@ static void bnxt_clear_int_mode(struct bnxt *bp) int bnxt_reserve_rings(struct bnxt *bp) { int tcs = netdev_get_num_tc(bp->dev); + bool reinit_irq = false; int rc; if (!bnxt_need_reserve_rings(bp)) return 0; - rc = __bnxt_reserve_rings(bp); - if (rc) { - netdev_err(bp->dev, "ring reservation failure rc: %d\n", rc); - return rc; - } if (BNXT_NEW_RM(bp) && (bnxt_get_num_msix(bp) != bp->total_irqs)) { bnxt_ulp_irq_stop(bp); bnxt_clear_int_mode(bp); - rc = bnxt_init_int_mode(bp); + reinit_irq = true; + } + rc = __bnxt_reserve_rings(bp); + if (reinit_irq) { + if (!rc) + rc = bnxt_init_int_mode(bp); bnxt_ulp_irq_restart(bp, rc); - if (rc) - return rc; + } + if (rc) { + netdev_err(bp->dev, "ring reservation/IRQ init failure rc: %d\n", rc); + return rc; } if (tcs && (bp->tx_nr_rings_per_tc * tcs != bp->tx_nr_rings)) { netdev_err(bp->dev, "tx ring reservation failure\n"); @@ -7227,7 +7505,6 @@ int bnxt_reserve_rings(struct bnxt *bp) bp->tx_nr_rings_per_tc = bp->tx_nr_rings; return -ENOMEM; } - bp->num_stat_ctxs = bp->cp_nr_rings; return 0; } @@ -7821,6 +8098,7 @@ static int bnxt_hwrm_if_change(struct bnxt *bp, bool up) rc = bnxt_hwrm_func_resc_qcaps(bp, true); hw_resc->resv_cp_rings = 0; + hw_resc->resv_stat_ctxs = 0; hw_resc->resv_irqs = 0; hw_resc->resv_tx_rings = 0; hw_resc->resv_rx_rings = 0; @@ -8260,6 +8538,9 @@ static bool bnxt_drv_busy(struct bnxt *bp) test_bit(BNXT_STATE_READ_STATS, &bp->state)); } +static void bnxt_get_ring_stats(struct bnxt *bp, + struct rtnl_link_stats64 *stats); + static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) { @@ -8285,6 +8566,9 @@ static void __bnxt_close_nic(struct bnxt *bp, bool irq_re_init, del_timer_sync(&bp->timer); bnxt_free_skbs(bp); + /* Save ring stats before shutdown */ + if (bp->bnapi) + bnxt_get_ring_stats(bp, &bp->net_stats_prev); if (irq_re_init) { bnxt_free_irq(bp); bnxt_del_napi(bp); @@ -8346,23 +8630,12 @@ static int bnxt_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) return -EOPNOTSUPP; } -static void -bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +static void bnxt_get_ring_stats(struct bnxt *bp, + struct rtnl_link_stats64 *stats) { - u32 i; - struct bnxt *bp = netdev_priv(dev); + int i; - set_bit(BNXT_STATE_READ_STATS, &bp->state); - /* Make sure bnxt_close_nic() sees that we are reading stats before - * we check the BNXT_STATE_OPEN flag. - */ - smp_mb__after_atomic(); - if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { - clear_bit(BNXT_STATE_READ_STATS, &bp->state); - return; - } - /* TODO check if we need to synchronize with bnxt_close path */ for (i = 0; i < bp->cp_nr_rings; i++) { struct bnxt_napi *bnapi = bp->bnapi[i]; struct bnxt_cp_ring_info *cpr = &bnapi->cp_ring; @@ -8391,6 +8664,40 @@ bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->tx_dropped += le64_to_cpu(hw_stats->tx_drop_pkts); } +} + +static void bnxt_add_prev_stats(struct bnxt *bp, + struct rtnl_link_stats64 *stats) +{ + struct rtnl_link_stats64 *prev_stats = &bp->net_stats_prev; + + stats->rx_packets += prev_stats->rx_packets; + stats->tx_packets += prev_stats->tx_packets; + stats->rx_bytes += prev_stats->rx_bytes; + stats->tx_bytes += prev_stats->tx_bytes; + stats->rx_missed_errors += prev_stats->rx_missed_errors; + stats->multicast += prev_stats->multicast; + stats->tx_dropped += prev_stats->tx_dropped; +} + +static void +bnxt_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) +{ + struct bnxt *bp = netdev_priv(dev); + + set_bit(BNXT_STATE_READ_STATS, &bp->state); + /* Make sure bnxt_close_nic() sees that we are reading stats before + * we check the BNXT_STATE_OPEN flag. + */ + smp_mb__after_atomic(); + if (!test_bit(BNXT_STATE_OPEN, &bp->state)) { + clear_bit(BNXT_STATE_READ_STATS, &bp->state); + *stats = bp->net_stats_prev; + return; + } + + bnxt_get_ring_stats(bp, stats); + bnxt_add_prev_stats(bp, stats); if (bp->flags & BNXT_FLAG_PORT_STATS) { struct rx_port_stats *rx = bp->hw_rx_port_stats; @@ -8626,12 +8933,12 @@ static bool bnxt_rfs_capable(struct bnxt *bp) if (vnics == bp->hw_resc.resv_vnics) return true; - bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, vnics); + bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, vnics); if (vnics <= bp->hw_resc.resv_vnics) return true; netdev_warn(bp->dev, "Unable to reserve resources to support NTUPLE filters.\n"); - bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 1); + bnxt_hwrm_reserve_rings(bp, 0, 0, 0, 0, 0, 1); return false; #else return false; @@ -9042,7 +9349,7 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, int tx_xdp) { int max_rx, max_tx, tx_sets = 1; - int tx_rings_needed; + int tx_rings_needed, stats; int rx_rings = rx; int cp, vnics, rc; @@ -9067,10 +9374,13 @@ int bnxt_check_rings(struct bnxt *bp, int tx, int rx, bool sh, int tcs, if (bp->flags & BNXT_FLAG_AGG_RINGS) rx_rings <<= 1; cp = sh ? max_t(int, tx_rings_needed, rx) : tx_rings_needed + rx; - if (BNXT_NEW_RM(bp)) + stats = cp; + if (BNXT_NEW_RM(bp)) { cp += bnxt_get_ulp_msix_num(bp); + stats += bnxt_get_ulp_stat_ctxs(bp); + } return bnxt_hwrm_check_rings(bp, tx_rings_needed, rx_rings, rx, cp, - vnics); + stats, vnics); } static void bnxt_unmap_bars(struct bnxt *bp, struct pci_dev *pdev) @@ -9106,7 +9416,7 @@ static void bnxt_init_dflt_coal(struct bnxt *bp) * 1 coal_buf x bufs_per_record = 1 completion record. */ coal = &bp->rx_coal; - coal->coal_ticks = 14; + coal->coal_ticks = 10; coal->coal_bufs = 30; coal->coal_ticks_irq = 1; coal->coal_bufs_irq = 2; @@ -9294,7 +9604,6 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc) bp->tx_nr_rings += bp->tx_nr_rings_xdp; bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : bp->tx_nr_rings + bp->rx_nr_rings; - bp->num_stat_ctxs = bp->cp_nr_rings; if (netif_running(bp->dev)) return bnxt_open_nic(bp, true, false); @@ -9617,7 +9926,7 @@ static int bnxt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, } static int bnxt_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, - u16 flags) + u16 flags, struct netlink_ext_ack *extack) { struct bnxt *bp = netdev_priv(dev); struct nlattr *attr, *br_spec; @@ -9760,6 +10069,7 @@ static void bnxt_remove_one(struct pci_dev *pdev) kfree(bp->ctx); bp->ctx = NULL; bnxt_cleanup_pci(bp); + bnxt_free_port_stats(bp); free_netdev(dev); } @@ -9834,7 +10144,7 @@ static void _bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, *max_cp = bnxt_get_max_func_cp_rings_for_en(bp); max_irq = min_t(int, bnxt_get_max_func_irqs(bp) - bnxt_get_ulp_msix_num(bp), - bnxt_get_max_func_stat_ctxs(bp)); + hw_resc->max_stat_ctxs - bnxt_get_ulp_stat_ctxs(bp)); if (!(bp->flags & BNXT_FLAG_CHIP_P5)) *max_cp = min_t(int, *max_cp, max_irq); max_ring_grps = hw_resc->max_hw_ring_grps; @@ -9965,7 +10275,6 @@ static int bnxt_set_dflt_rings(struct bnxt *bp, bool sh) netdev_warn(bp->dev, "2nd rings reservation failed.\n"); bp->tx_nr_rings_per_tc = bp->tx_nr_rings; } - bp->num_stat_ctxs = bp->cp_nr_rings; if (BNXT_CHIP_TYPE_NITRO_A0(bp)) { bp->rx_nr_rings++; bp->cp_nr_rings++; @@ -10099,6 +10408,12 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) goto init_err_pci_clean; + if (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL) { + rc = bnxt_alloc_kong_hwrm_resources(bp); + if (rc) + bp->fw_cap &= ~BNXT_FW_CAP_KONG_MB_CHNL; + } + if ((bp->fw_cap & BNXT_FW_CAP_SHORT_CMD) || bp->hwrm_max_ext_req_len > BNXT_HWRM_MAX_REQ_LEN) { rc = bnxt_alloc_hwrm_short_cmd_req(bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 3030931ccaf8..a451796deefe 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -567,7 +567,6 @@ struct nqe_cn { #define HWRM_RESP_LEN_MASK 0xffff0000 #define HWRM_RESP_LEN_SFT 16 #define HWRM_RESP_VALID_MASK 0xff000000 -#define HWRM_SEQ_ID_INVALID -1 #define BNXT_HWRM_REQ_MAX_SIZE 128 #define BNXT_HWRM_REQS_PER_PAGE (BNXT_PAGE_SIZE / \ BNXT_HWRM_REQ_MAX_SIZE) @@ -585,6 +584,9 @@ struct nqe_cn { #define HWRM_VALID_BIT_DELAY_USEC 20 +#define BNXT_HWRM_CHNL_CHIMP 0 +#define BNXT_HWRM_CHNL_KONG 1 + #define BNXT_RX_EVENT 1 #define BNXT_AGG_EVENT 2 #define BNXT_TX_EVENT 4 @@ -615,9 +617,12 @@ struct bnxt_sw_rx_agg_bd { struct bnxt_ring_mem_info { int nr_pages; int page_size; - u32 flags; + u16 flags; #define BNXT_RMEM_VALID_PTE_FLAG 1 #define BNXT_RMEM_RING_PTE_FLAG 2 +#define BNXT_RMEM_USE_FULL_PAGE_FLAG 4 + + u16 depth; void **pg_arr; dma_addr_t *dma_arr; @@ -927,6 +932,8 @@ struct bnxt_hw_resc { u16 resv_vnics; u16 min_stat_ctxs; u16 max_stat_ctxs; + u16 resv_stat_ctxs; + u16 max_nqs; u16 max_irqs; u16 resv_irqs; }; @@ -1111,9 +1118,14 @@ struct bnxt_test_info { char string[BNXT_MAX_TEST][ETH_GSTRING_LEN]; }; -#define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400 -#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014 -#define BNXT_CAG_REG_BASE 0x300000 +#define BNXT_GRCPF_REG_CHIMP_COMM 0x0 +#define BNXT_GRCPF_REG_CHIMP_COMM_TRIGGER 0x100 +#define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400 +#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014 +#define BNXT_CAG_REG_BASE 0x300000 + +#define BNXT_GRCPF_REG_KONG_COMM 0xA00 +#define BNXT_GRCPF_REG_KONG_COMM_TRIGGER 0xB00 struct bnxt_tc_flow_stats { u64 packets; @@ -1181,12 +1193,15 @@ struct bnxt_vf_rep { #define PTU_PTE_NEXT_TO_LAST 0x4UL #define MAX_CTX_PAGES (BNXT_PAGE_SIZE / 8) +#define MAX_CTX_TOTAL_PAGES (MAX_CTX_PAGES * MAX_CTX_PAGES) struct bnxt_ctx_pg_info { u32 entries; + u32 nr_pages; void *ctx_pg_arr[MAX_CTX_PAGES]; dma_addr_t ctx_dma_arr[MAX_CTX_PAGES]; struct bnxt_ring_mem_info ring_mem; + struct bnxt_ctx_pg_info **ctx_pg_tbl; }; struct bnxt_ctx_mem_info { @@ -1222,6 +1237,8 @@ struct bnxt_ctx_mem_info { struct bnxt_ctx_pg_info cq_mem; struct bnxt_ctx_pg_info vnic_mem; struct bnxt_ctx_pg_info stat_mem; + struct bnxt_ctx_pg_info mrav_mem; + struct bnxt_ctx_pg_info tim_mem; struct bnxt_ctx_pg_info *tqm_mem[9]; }; @@ -1416,8 +1433,6 @@ struct bnxt { int cp_nr_pages; int cp_nr_rings; - int num_stat_ctxs; - /* grp_info indexed by completion ring index */ struct bnxt_ring_grp_info *grp_info; struct bnxt_vnic_info *vnic_info; @@ -1457,21 +1472,27 @@ struct bnxt { u32 msg_enable; u32 fw_cap; - #define BNXT_FW_CAP_SHORT_CMD 0x00000001 - #define BNXT_FW_CAP_LLDP_AGENT 0x00000002 - #define BNXT_FW_CAP_DCBX_AGENT 0x00000004 - #define BNXT_FW_CAP_NEW_RM 0x00000008 - #define BNXT_FW_CAP_IF_CHANGE 0x00000010 + #define BNXT_FW_CAP_SHORT_CMD 0x00000001 + #define BNXT_FW_CAP_LLDP_AGENT 0x00000002 + #define BNXT_FW_CAP_DCBX_AGENT 0x00000004 + #define BNXT_FW_CAP_NEW_RM 0x00000008 + #define BNXT_FW_CAP_IF_CHANGE 0x00000010 + #define BNXT_FW_CAP_KONG_MB_CHNL 0x00000080 + #define BNXT_FW_CAP_OVS_64BIT_HANDLE 0x00000400 #define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM) u32 hwrm_spec_code; u16 hwrm_cmd_seq; - u32 hwrm_intr_seq_id; + u16 hwrm_cmd_kong_seq; + u16 hwrm_intr_seq_id; void *hwrm_short_cmd_req_addr; dma_addr_t hwrm_short_cmd_req_dma_addr; void *hwrm_cmd_resp_addr; dma_addr_t hwrm_cmd_resp_dma_addr; + void *hwrm_cmd_kong_resp_addr; + dma_addr_t hwrm_cmd_kong_resp_dma_addr; + struct rtnl_link_stats64 net_stats_prev; struct rx_port_stats *hw_rx_port_stats; struct tx_port_stats *hw_tx_port_stats; struct rx_port_stats_ext *hw_rx_port_stats_ext; @@ -1483,6 +1504,8 @@ struct bnxt { int hw_port_stats_size; u16 fw_rx_stats_ext_size; u16 fw_tx_stats_ext_size; + u8 pri2cos[8]; + u8 pri2cos_valid; u16 hwrm_max_req_len; u16 hwrm_max_ext_req_len; @@ -1669,6 +1692,66 @@ static inline void bnxt_db_write(struct bnxt *bp, struct bnxt_db_info *db, } } +static inline bool bnxt_cfa_hwrm_message(u16 req_type) +{ + switch (req_type) { + case HWRM_CFA_ENCAP_RECORD_ALLOC: + case HWRM_CFA_ENCAP_RECORD_FREE: + case HWRM_CFA_DECAP_FILTER_ALLOC: + case HWRM_CFA_DECAP_FILTER_FREE: + case HWRM_CFA_NTUPLE_FILTER_ALLOC: + case HWRM_CFA_NTUPLE_FILTER_FREE: + case HWRM_CFA_NTUPLE_FILTER_CFG: + case HWRM_CFA_EM_FLOW_ALLOC: + case HWRM_CFA_EM_FLOW_FREE: + case HWRM_CFA_EM_FLOW_CFG: + case HWRM_CFA_FLOW_ALLOC: + case HWRM_CFA_FLOW_FREE: + case HWRM_CFA_FLOW_INFO: + case HWRM_CFA_FLOW_FLUSH: + case HWRM_CFA_FLOW_STATS: + case HWRM_CFA_METER_PROFILE_ALLOC: + case HWRM_CFA_METER_PROFILE_FREE: + case HWRM_CFA_METER_PROFILE_CFG: + case HWRM_CFA_METER_INSTANCE_ALLOC: + case HWRM_CFA_METER_INSTANCE_FREE: + return true; + default: + return false; + } +} + +static inline bool bnxt_kong_hwrm_message(struct bnxt *bp, struct input *req) +{ + return (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL && + bnxt_cfa_hwrm_message(le16_to_cpu(req->req_type))); +} + +static inline bool bnxt_hwrm_kong_chnl(struct bnxt *bp, struct input *req) +{ + return (bp->fw_cap & BNXT_FW_CAP_KONG_MB_CHNL && + req->resp_addr == cpu_to_le64(bp->hwrm_cmd_kong_resp_dma_addr)); +} + +static inline void *bnxt_get_hwrm_resp_addr(struct bnxt *bp, void *req) +{ + if (bnxt_hwrm_kong_chnl(bp, (struct input *)req)) + return bp->hwrm_cmd_kong_resp_addr; + else + return bp->hwrm_cmd_resp_addr; +} + +static inline u16 bnxt_get_hwrm_seq_id(struct bnxt *bp, u16 dst) +{ + u16 seq_id; + + if (dst == BNXT_HWRM_CHNL_CHIMP) + seq_id = bp->hwrm_cmd_seq++; + else + seq_id = bp->hwrm_cmd_kong_seq++; + return seq_id; +} + extern const u16 bnxt_lhint_arr[]; int bnxt_alloc_rx_data(struct bnxt *bp, struct bnxt_rx_ring_info *rxr, @@ -1686,11 +1769,12 @@ int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap, int bmap_size); int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id); int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings); +int bnxt_nq_rings_in_use(struct bnxt *bp); int bnxt_hwrm_set_coal(struct bnxt *); unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp); -void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max); +unsigned int bnxt_get_avail_stat_ctxs_for_en(struct bnxt *bp); unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp); -unsigned int bnxt_get_max_func_cp_rings_for_en(struct bnxt *bp); +unsigned int bnxt_get_avail_cp_rings_for_en(struct bnxt *bp); int bnxt_get_avail_msix(struct bnxt *bp, int num); int bnxt_reserve_rings(struct bnxt *bp); void bnxt_tx_disable(struct bnxt *bp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index a85d2be986af..15c7041e937b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c @@ -471,7 +471,10 @@ static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc) if (total_ets_bw > 100) return -EINVAL; - *tc = max_tc + 1; + if (max_tc >= bp->max_tc) + *tc = bp->max_tc; + else + *tc = max_tc + 1; return 0; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index 6b51f4de6017..adabbe94a259 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -207,6 +207,34 @@ reset_coalesce: BNXT_TX_STATS_EXT_COS_ENTRY(6), \ BNXT_TX_STATS_EXT_COS_ENTRY(7) \ +#define BNXT_RX_STATS_PRI_ENTRY(counter, n) \ + { BNXT_RX_STATS_EXT_OFFSET(counter##_cos0), \ + __stringify(counter##_pri##n) } + +#define BNXT_TX_STATS_PRI_ENTRY(counter, n) \ + { BNXT_TX_STATS_EXT_OFFSET(counter##_cos0), \ + __stringify(counter##_pri##n) } + +#define BNXT_RX_STATS_PRI_ENTRIES(counter) \ + BNXT_RX_STATS_PRI_ENTRY(counter, 0), \ + BNXT_RX_STATS_PRI_ENTRY(counter, 1), \ + BNXT_RX_STATS_PRI_ENTRY(counter, 2), \ + BNXT_RX_STATS_PRI_ENTRY(counter, 3), \ + BNXT_RX_STATS_PRI_ENTRY(counter, 4), \ + BNXT_RX_STATS_PRI_ENTRY(counter, 5), \ + BNXT_RX_STATS_PRI_ENTRY(counter, 6), \ + BNXT_RX_STATS_PRI_ENTRY(counter, 7) + +#define BNXT_TX_STATS_PRI_ENTRIES(counter) \ + BNXT_TX_STATS_PRI_ENTRY(counter, 0), \ + BNXT_TX_STATS_PRI_ENTRY(counter, 1), \ + BNXT_TX_STATS_PRI_ENTRY(counter, 2), \ + BNXT_TX_STATS_PRI_ENTRY(counter, 3), \ + BNXT_TX_STATS_PRI_ENTRY(counter, 4), \ + BNXT_TX_STATS_PRI_ENTRY(counter, 5), \ + BNXT_TX_STATS_PRI_ENTRY(counter, 6), \ + BNXT_TX_STATS_PRI_ENTRY(counter, 7) + enum { RX_TOTAL_DISCARDS, TX_TOTAL_DISCARDS, @@ -327,8 +355,41 @@ static const struct { BNXT_TX_STATS_EXT_PFC_ENTRIES, }; +static const struct { + long base_off; + char string[ETH_GSTRING_LEN]; +} bnxt_rx_bytes_pri_arr[] = { + BNXT_RX_STATS_PRI_ENTRIES(rx_bytes), +}; + +static const struct { + long base_off; + char string[ETH_GSTRING_LEN]; +} bnxt_rx_pkts_pri_arr[] = { + BNXT_RX_STATS_PRI_ENTRIES(rx_packets), +}; + +static const struct { + long base_off; + char string[ETH_GSTRING_LEN]; +} bnxt_tx_bytes_pri_arr[] = { + BNXT_TX_STATS_PRI_ENTRIES(tx_bytes), +}; + +static const struct { + long base_off; + char string[ETH_GSTRING_LEN]; +} bnxt_tx_pkts_pri_arr[] = { + BNXT_TX_STATS_PRI_ENTRIES(tx_packets), +}; + #define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats) #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr) +#define BNXT_NUM_STATS_PRI \ + (ARRAY_SIZE(bnxt_rx_bytes_pri_arr) + \ + ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \ + ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \ + ARRAY_SIZE(bnxt_tx_pkts_pri_arr)) static int bnxt_get_num_stats(struct bnxt *bp) { @@ -339,9 +400,12 @@ static int bnxt_get_num_stats(struct bnxt *bp) if (bp->flags & BNXT_FLAG_PORT_STATS) num_stats += BNXT_NUM_PORT_STATS; - if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) + if (bp->flags & BNXT_FLAG_PORT_STATS_EXT) { num_stats += bp->fw_rx_stats_ext_size + bp->fw_tx_stats_ext_size; + if (bp->pri2cos_valid) + num_stats += BNXT_NUM_STATS_PRI; + } return num_stats; } @@ -369,8 +433,10 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, struct bnxt *bp = netdev_priv(dev); u32 stat_fields = sizeof(struct ctx_hw_stats) / 8; - if (!bp->bnapi) - return; + if (!bp->bnapi) { + j += BNXT_NUM_STATS * bp->cp_nr_rings + BNXT_NUM_SW_FUNC_STATS; + goto skip_ring_stats; + } for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++) bnxt_sw_func_stats[i].counter = 0; @@ -395,6 +461,7 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, for (i = 0; i < BNXT_NUM_SW_FUNC_STATS; i++, j++) buf[j] = bnxt_sw_func_stats[i].counter; +skip_ring_stats: if (bp->flags & BNXT_FLAG_PORT_STATS) { __le64 *port_stats = (__le64 *)bp->hw_rx_port_stats; @@ -415,6 +482,32 @@ static void bnxt_get_ethtool_stats(struct net_device *dev, buf[j] = le64_to_cpu(*(tx_port_stats_ext + bnxt_tx_port_stats_ext_arr[i].offset)); } + if (bp->pri2cos_valid) { + for (i = 0; i < 8; i++, j++) { + long n = bnxt_rx_bytes_pri_arr[i].base_off + + bp->pri2cos[i]; + + buf[j] = le64_to_cpu(*(rx_port_stats_ext + n)); + } + for (i = 0; i < 8; i++, j++) { + long n = bnxt_rx_pkts_pri_arr[i].base_off + + bp->pri2cos[i]; + + buf[j] = le64_to_cpu(*(rx_port_stats_ext + n)); + } + for (i = 0; i < 8; i++, j++) { + long n = bnxt_tx_bytes_pri_arr[i].base_off + + bp->pri2cos[i]; + + buf[j] = le64_to_cpu(*(tx_port_stats_ext + n)); + } + for (i = 0; i < 8; i++, j++) { + long n = bnxt_tx_pkts_pri_arr[i].base_off + + bp->pri2cos[i]; + + buf[j] = le64_to_cpu(*(tx_port_stats_ext + n)); + } + } } } @@ -493,6 +586,28 @@ static void bnxt_get_strings(struct net_device *dev, u32 stringset, u8 *buf) bnxt_tx_port_stats_ext_arr[i].string); buf += ETH_GSTRING_LEN; } + if (bp->pri2cos_valid) { + for (i = 0; i < 8; i++) { + strcpy(buf, + bnxt_rx_bytes_pri_arr[i].string); + buf += ETH_GSTRING_LEN; + } + for (i = 0; i < 8; i++) { + strcpy(buf, + bnxt_rx_pkts_pri_arr[i].string); + buf += ETH_GSTRING_LEN; + } + for (i = 0; i < 8; i++) { + strcpy(buf, + bnxt_tx_bytes_pri_arr[i].string); + buf += ETH_GSTRING_LEN; + } + for (i = 0; i < 8; i++) { + strcpy(buf, + bnxt_tx_pkts_pri_arr[i].string); + buf += ETH_GSTRING_LEN; + } + } } break; case ETH_SS_TEST: @@ -663,8 +778,6 @@ static int bnxt_set_channels(struct net_device *dev, bp->cp_nr_rings = sh ? max_t(int, bp->tx_nr_rings, bp->rx_nr_rings) : bp->tx_nr_rings + bp->rx_nr_rings; - bp->num_stat_ctxs = bp->cp_nr_rings; - /* After changing number of rx channels, update NTUPLE feature. */ netdev_update_features(dev); if (netif_running(dev)) { @@ -1526,14 +1639,22 @@ static int bnxt_flash_nvram(struct net_device *dev, rc = hwrm_send_message(bp, &req, sizeof(req), FLASH_NVRAM_TIMEOUT); dma_free_coherent(&bp->pdev->dev, data_len, kmem, dma_handle); + if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) { + netdev_info(dev, + "PF does not have admin privileges to flash the device\n"); + rc = -EACCES; + } else if (rc) { + rc = -EIO; + } return rc; } static int bnxt_firmware_reset(struct net_device *dev, u16 dir_type) { - struct bnxt *bp = netdev_priv(dev); struct hwrm_fw_reset_input req = {0}; + struct bnxt *bp = netdev_priv(dev); + int rc; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FW_RESET, -1, -1); @@ -1573,7 +1694,15 @@ static int bnxt_firmware_reset(struct net_device *dev, return -EINVAL; } - return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) { + netdev_info(dev, + "PF does not have admin privileges to reset the device\n"); + rc = -EACCES; + } else if (rc) { + rc = -EIO; + } + return rc; } static int bnxt_flash_firmware(struct net_device *dev, @@ -1780,9 +1909,9 @@ static int bnxt_flash_package_from_file(struct net_device *dev, struct hwrm_nvm_install_update_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_nvm_install_update_input install = {0}; const struct firmware *fw; + int rc, hwrm_err = 0; u32 item_len; u16 index; - int rc; bnxt_hwrm_fw_set_time(bp); @@ -1825,15 +1954,16 @@ static int bnxt_flash_package_from_file(struct net_device *dev, memcpy(kmem, fw->data, fw->size); modify.host_src_addr = cpu_to_le64(dma_handle); - rc = hwrm_send_message(bp, &modify, sizeof(modify), - FLASH_PACKAGE_TIMEOUT); + hwrm_err = hwrm_send_message(bp, &modify, + sizeof(modify), + FLASH_PACKAGE_TIMEOUT); dma_free_coherent(&bp->pdev->dev, fw->size, kmem, dma_handle); } } release_firmware(fw); - if (rc) - return rc; + if (rc || hwrm_err) + goto err_exit; if ((install_type & 0xffff) == 0) install_type >>= 16; @@ -1841,12 +1971,10 @@ static int bnxt_flash_package_from_file(struct net_device *dev, install.install_type = cpu_to_le32(install_type); mutex_lock(&bp->hwrm_cmd_lock); - rc = _hwrm_send_message(bp, &install, sizeof(install), - INSTALL_PACKAGE_TIMEOUT); - if (rc) { - rc = -EOPNOTSUPP; + hwrm_err = _hwrm_send_message(bp, &install, sizeof(install), + INSTALL_PACKAGE_TIMEOUT); + if (hwrm_err) goto flash_pkg_exit; - } if (resp->error_code) { u8 error_code = ((struct hwrm_err_output *)resp)->cmd_err; @@ -1854,12 +1982,11 @@ static int bnxt_flash_package_from_file(struct net_device *dev, if (error_code == NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR) { install.flags |= cpu_to_le16( NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG); - rc = _hwrm_send_message(bp, &install, sizeof(install), - INSTALL_PACKAGE_TIMEOUT); - if (rc) { - rc = -EOPNOTSUPP; + hwrm_err = _hwrm_send_message(bp, &install, + sizeof(install), + INSTALL_PACKAGE_TIMEOUT); + if (hwrm_err) goto flash_pkg_exit; - } } } @@ -1870,6 +1997,14 @@ static int bnxt_flash_package_from_file(struct net_device *dev, } flash_pkg_exit: mutex_unlock(&bp->hwrm_cmd_lock); +err_exit: + if (hwrm_err == HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED) { + netdev_info(dev, + "PF does not have admin privileges to flash the device\n"); + rc = -EACCES; + } else if (hwrm_err) { + rc = -EOPNOTSUPP; + } return rc; } @@ -2450,17 +2585,37 @@ static int bnxt_hwrm_mac_loopback(struct bnxt *bp, bool enable) return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); } +static int bnxt_query_force_speeds(struct bnxt *bp, u16 *force_speeds) +{ + struct hwrm_port_phy_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + struct hwrm_port_phy_qcaps_input req = {0}; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_PHY_QCAPS, -1, -1); + mutex_lock(&bp->hwrm_cmd_lock); + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) + *force_speeds = le16_to_cpu(resp->supported_speeds_force_mode); + + mutex_unlock(&bp->hwrm_cmd_lock); + return rc; +} + static int bnxt_disable_an_for_lpbk(struct bnxt *bp, struct hwrm_port_phy_cfg_input *req) { struct bnxt_link_info *link_info = &bp->link_info; - u16 fw_advertising = link_info->advertising; + u16 fw_advertising; u16 fw_speed; int rc; if (!link_info->autoneg) return 0; + rc = bnxt_query_force_speeds(bp, &fw_advertising); + if (rc) + return rc; + fw_speed = PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB; if (netif_carrier_ok(bp->dev)) fw_speed = bp->link_info.link_speed; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h index 5dd086059568..f1aaac8e6268 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h @@ -194,6 +194,8 @@ struct cmd_nums { #define HWRM_STAT_CTX_QUERY 0xb2UL #define HWRM_STAT_CTX_CLR_STATS 0xb3UL #define HWRM_PORT_QSTATS_EXT 0xb4UL + #define HWRM_PORT_PHY_MDIO_WRITE 0xb5UL + #define HWRM_PORT_PHY_MDIO_READ 0xb6UL #define HWRM_FW_RESET 0xc0UL #define HWRM_FW_QSTATUS 0xc1UL #define HWRM_FW_HEALTH_CHECK 0xc2UL @@ -213,6 +215,7 @@ struct cmd_nums { #define HWRM_WOL_FILTER_FREE 0xf1UL #define HWRM_WOL_FILTER_QCFG 0xf2UL #define HWRM_WOL_REASON_QCFG 0xf3UL + #define HWRM_CFA_METER_QCAPS 0xf4UL #define HWRM_CFA_METER_PROFILE_ALLOC 0xf5UL #define HWRM_CFA_METER_PROFILE_FREE 0xf6UL #define HWRM_CFA_METER_PROFILE_CFG 0xf7UL @@ -239,6 +242,24 @@ struct cmd_nums { #define HWRM_FW_IPC_MSG 0x110UL #define HWRM_CFA_REDIRECT_TUNNEL_TYPE_INFO 0x111UL #define HWRM_CFA_REDIRECT_QUERY_TUNNEL_TYPE 0x112UL + #define HWRM_CFA_FLOW_AGING_TIMER_RESET 0x113UL + #define HWRM_CFA_FLOW_AGING_CFG 0x114UL + #define HWRM_CFA_FLOW_AGING_QCFG 0x115UL + #define HWRM_CFA_FLOW_AGING_QCAPS 0x116UL + #define HWRM_CFA_CTX_MEM_RGTR 0x117UL + #define HWRM_CFA_CTX_MEM_UNRGTR 0x118UL + #define HWRM_CFA_CTX_MEM_QCTX 0x119UL + #define HWRM_CFA_CTX_MEM_QCAPS 0x11aUL + #define HWRM_CFA_COUNTER_QCAPS 0x11bUL + #define HWRM_CFA_COUNTER_CFG 0x11cUL + #define HWRM_CFA_COUNTER_QCFG 0x11dUL + #define HWRM_CFA_COUNTER_QSTATS 0x11eUL + #define HWRM_CFA_TCP_FLAG_PROCESS_QCFG 0x11fUL + #define HWRM_CFA_EEM_QCAPS 0x120UL + #define HWRM_CFA_EEM_CFG 0x121UL + #define HWRM_CFA_EEM_QCFG 0x122UL + #define HWRM_CFA_EEM_OP 0x123UL + #define HWRM_CFA_ADV_FLOW_MGNT_QCAPS 0x124UL #define HWRM_ENGINE_CKV_HELLO 0x12dUL #define HWRM_ENGINE_CKV_STATUS 0x12eUL #define HWRM_ENGINE_CKV_CKEK_ADD 0x12fUL @@ -335,6 +356,8 @@ struct ret_codes { #define HWRM_ERR_CODE_UNSUPPORTED_TLV 0x7UL #define HWRM_ERR_CODE_NO_BUFFER 0x8UL #define HWRM_ERR_CODE_UNSUPPORTED_OPTION_ERR 0x9UL + #define HWRM_ERR_CODE_HOT_RESET_PROGRESS 0xaUL + #define HWRM_ERR_CODE_HOT_RESET_FAIL 0xbUL #define HWRM_ERR_CODE_HWRM_ERROR 0xfUL #define HWRM_ERR_CODE_TLV_ENCAPSULATED_RESPONSE 0x8000UL #define HWRM_ERR_CODE_UNKNOWN_ERR 0xfffeUL @@ -363,8 +386,8 @@ struct hwrm_err_output { #define HWRM_VERSION_MAJOR 1 #define HWRM_VERSION_MINOR 10 #define HWRM_VERSION_UPDATE 0 -#define HWRM_VERSION_RSVD 3 -#define HWRM_VERSION_STR "1.10.0.3" +#define HWRM_VERSION_RSVD 33 +#define HWRM_VERSION_STR "1.10.0.33" /* hwrm_ver_get_input (size:192b/24B) */ struct hwrm_ver_get_input { @@ -411,6 +434,10 @@ struct hwrm_ver_get_output { #define VER_GET_RESP_DEV_CAPS_CFG_L2_FILTER_TYPES_ROCE_OR_L2_SUPPORTED 0x40UL #define VER_GET_RESP_DEV_CAPS_CFG_VIRTIO_VSWITCH_OFFLOAD_SUPPORTED 0x80UL #define VER_GET_RESP_DEV_CAPS_CFG_TRUSTED_VF_SUPPORTED 0x100UL + #define VER_GET_RESP_DEV_CAPS_CFG_FLOW_AGING_SUPPORTED 0x200UL + #define VER_GET_RESP_DEV_CAPS_CFG_ADV_FLOW_COUNTERS_SUPPORTED 0x400UL + #define VER_GET_RESP_DEV_CAPS_CFG_CFA_EEM_SUPPORTED 0x800UL + #define VER_GET_RESP_DEV_CAPS_CFG_CFA_ADV_FLOW_MGNT_SUPPORTED 0x1000UL u8 roce_fw_maj_8b; u8 roce_fw_min_8b; u8 roce_fw_bld_8b; @@ -465,14 +492,27 @@ struct hwrm_ver_get_output { /* eject_cmpl (size:128b/16B) */ struct eject_cmpl { __le16 type; - #define EJECT_CMPL_TYPE_MASK 0x3fUL - #define EJECT_CMPL_TYPE_SFT 0 - #define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL - #define EJECT_CMPL_TYPE_LAST EJECT_CMPL_TYPE_STAT_EJECT + #define EJECT_CMPL_TYPE_MASK 0x3fUL + #define EJECT_CMPL_TYPE_SFT 0 + #define EJECT_CMPL_TYPE_STAT_EJECT 0x1aUL + #define EJECT_CMPL_TYPE_LAST EJECT_CMPL_TYPE_STAT_EJECT + #define EJECT_CMPL_FLAGS_MASK 0xffc0UL + #define EJECT_CMPL_FLAGS_SFT 6 + #define EJECT_CMPL_FLAGS_ERROR 0x40UL __le16 len; __le32 opaque; - __le32 v; - #define EJECT_CMPL_V 0x1UL + __le16 v; + #define EJECT_CMPL_V 0x1UL + #define EJECT_CMPL_ERRORS_MASK 0xfffeUL + #define EJECT_CMPL_ERRORS_SFT 1 + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_MASK 0xeUL + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_SFT 1 + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER (0x0UL << 1) + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT (0x1UL << 1) + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT (0x3UL << 1) + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH (0x5UL << 1) + #define EJECT_CMPL_ERRORS_BUFFER_ERROR_LAST EJECT_CMPL_ERRORS_BUFFER_ERROR_FLUSH + __le16 reserved16; __le32 unused_2; }; @@ -552,6 +592,10 @@ struct hwrm_async_event_cmpl { #define ASYNC_EVENT_CMPL_EVENT_ID_LLFC_PFC_CHANGE 0x34UL #define ASYNC_EVENT_CMPL_EVENT_ID_DEFAULT_VNIC_CHANGE 0x35UL #define ASYNC_EVENT_CMPL_EVENT_ID_HW_FLOW_AGED 0x36UL + #define ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION 0x37UL + #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL + #define ASYNC_EVENT_CMPL_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL + #define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG 0xfeUL #define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR 0xffUL #define ASYNC_EVENT_CMPL_EVENT_ID_LAST ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR __le32 event_data2; @@ -647,6 +691,39 @@ struct hwrm_async_event_cmpl_link_speed_cfg_change { #define ASYNC_EVENT_CMPL_LINK_SPEED_CFG_CHANGE_EVENT_DATA1_ILLEGAL_LINK_SPEED_CFG 0x20000UL }; +/* hwrm_async_event_cmpl_reset_notify (size:128b/16B) */ +struct hwrm_async_event_cmpl_reset_notify { + __le16 type; + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY 0x8UL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_ID_RESET_NOTIFY + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_V 0x1UL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_MASK 0xffUL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_SFT 0 + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_STOP_TX_QUEUE 0x1UL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN 0x2UL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DRIVER_ACTION_DRIVER_IFDOWN + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MASK 0xff00UL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_SFT 8 + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_MANAGEMENT_RESET_REQUEST (0x1UL << 8) + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_FATAL (0x2UL << 8) + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL (0x3UL << 8) + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_LAST ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_REASON_CODE_FW_EXCEPTION_NON_FATAL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_MASK 0xffff0000UL + #define ASYNC_EVENT_CMPL_RESET_NOTIFY_EVENT_DATA1_DELAY_IN_100MS_TICKS_SFT 16 +}; + /* hwrm_async_event_cmpl_vf_cfg_change (size:128b/16B) */ struct hwrm_async_event_cmpl_vf_cfg_change { __le16 type; @@ -672,6 +749,74 @@ struct hwrm_async_event_cmpl_vf_cfg_change { #define ASYNC_EVENT_CMPL_VF_CFG_CHANGE_EVENT_DATA1_TRUSTED_VF_CFG_CHANGE 0x10UL }; +/* hwrm_async_event_cmpl_hw_flow_aged (size:128b/16B) */ +struct hwrm_async_event_cmpl_hw_flow_aged { + __le16 type; + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED 0x36UL + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_ID_HW_FLOW_AGED + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_V 0x1UL + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_MASK 0x7fffffffUL + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_ID_SFT 0 + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION 0x80000000UL + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_RX (0x0UL << 31) + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX (0x1UL << 31) + #define ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_LAST ASYNC_EVENT_CMPL_HW_FLOW_AGED_EVENT_DATA1_FLOW_DIRECTION_TX +}; + +/* hwrm_async_event_cmpl_eem_cache_flush_req (size:128b/16B) */ +struct hwrm_async_event_cmpl_eem_cache_flush_req { + __le16 type; + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_EEM_CACHE_FLUSH_REQ 0x38UL + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_EVENT_ID_EEM_CACHE_FLUSH_REQ + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_V 0x1UL + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_REQ_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; +}; + +/* hwrm_async_event_cmpl_eem_cache_flush_done (size:128b/16B) */ +struct hwrm_async_event_cmpl_eem_cache_flush_done { + __le16 type; + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_MASK 0x3fUL + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_SFT 0 + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_HWRM_ASYNC_EVENT 0x2eUL + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_TYPE_HWRM_ASYNC_EVENT + __le16 event_id; + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_EEM_CACHE_FLUSH_DONE 0x39UL + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_LAST ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_ID_EEM_CACHE_FLUSH_DONE + __le32 event_data2; + u8 opaque_v; + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_V 0x1UL + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_OPAQUE_MASK 0xfeUL + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_OPAQUE_SFT 1 + u8 timestamp_lo; + __le16 timestamp_hi; + __le32 event_data1; + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_MASK 0xffffUL + #define ASYNC_EVENT_CMPL_EEM_CACHE_FLUSH_DONE_EVENT_DATA1_FID_SFT 0 +}; + /* hwrm_func_reset_input (size:192b/24B) */ struct hwrm_func_reset_input { __le16 req_type; @@ -867,6 +1012,8 @@ struct hwrm_func_qcaps_output { #define FUNC_QCAPS_RESP_FLAGS_ADMIN_PF_SUPPORTED 0x40000UL #define FUNC_QCAPS_RESP_FLAGS_LINK_ADMIN_STATUS_SUPPORTED 0x80000UL #define FUNC_QCAPS_RESP_FLAGS_WCB_PUSH_MODE 0x100000UL + #define FUNC_QCAPS_RESP_FLAGS_DYNAMIC_TX_RING_ALLOC 0x200000UL + #define FUNC_QCAPS_RESP_FLAGS_HOT_RESET_CAPABLE 0x400000UL u8 mac_address[6]; __le16 max_rsscos_ctx; __le16 max_cmpl_rings; @@ -902,7 +1049,7 @@ struct hwrm_func_qcfg_input { u8 unused_0[6]; }; -/* hwrm_func_qcfg_output (size:640b/80B) */ +/* hwrm_func_qcfg_output (size:704b/88B) */ struct hwrm_func_qcfg_output { __le16 error_code; __le16 req_type; @@ -919,6 +1066,7 @@ struct hwrm_func_qcfg_output { #define FUNC_QCFG_RESP_FLAGS_FW_LLDP_AGENT_ENABLED 0x10UL #define FUNC_QCFG_RESP_FLAGS_MULTI_HOST 0x20UL #define FUNC_QCFG_RESP_FLAGS_TRUSTED_VF 0x40UL + #define FUNC_QCFG_RESP_FLAGS_SECURE_MODE_ENABLED 0x80UL u8 mac_address[6]; __le16 pci_id; __le16 alloc_rsscos_ctx; @@ -1000,7 +1148,11 @@ struct hwrm_func_qcfg_output { __le16 alloc_sp_tx_rings; __le16 alloc_stat_ctx; __le16 alloc_msix; - u8 unused_2[5]; + __le16 registered_vfs; + u8 unused_1[3]; + u8 always_1; + __le32 reset_addr_poll; + u8 unused_2[3]; u8 valid; }; @@ -1031,6 +1183,7 @@ struct hwrm_func_cfg_input { #define FUNC_CFG_REQ_FLAGS_VNIC_ASSETS_TEST 0x80000UL #define FUNC_CFG_REQ_FLAGS_L2_CTX_ASSETS_TEST 0x100000UL #define FUNC_CFG_REQ_FLAGS_TRUSTED_VF_ENABLE 0x200000UL + #define FUNC_CFG_REQ_FLAGS_DYNAMIC_TX_RING_ALLOC 0x400000UL __le32 enables; #define FUNC_CFG_REQ_ENABLES_MTU 0x1UL #define FUNC_CFG_REQ_ENABLES_MRU 0x2UL @@ -1235,6 +1388,7 @@ struct hwrm_func_drv_rgtr_input { #define FUNC_DRV_RGTR_REQ_FLAGS_FWD_NONE_MODE 0x2UL #define FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE 0x4UL #define FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE 0x8UL + #define FUNC_DRV_RGTR_REQ_FLAGS_HOT_RESET_SUPPORT 0x10UL __le32 enables; #define FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE 0x1UL #define FUNC_DRV_RGTR_REQ_ENABLES_VER 0x2UL @@ -1888,7 +2042,8 @@ struct hwrm_func_drv_if_change_output { __le16 seq_id; __le16 resp_len; __le32 flags; - #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE 0x1UL + #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_RESC_CHANGE 0x1UL + #define FUNC_DRV_IF_CHANGE_RESP_FLAGS_HOT_FW_RESET_DONE 0x2UL u8 unused_0[3]; u8 valid; }; @@ -2864,6 +3019,60 @@ struct hwrm_port_phy_i2c_read_output { u8 valid; }; +/* hwrm_port_phy_mdio_write_input (size:320b/40B) */ +struct hwrm_port_phy_mdio_write_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 unused_0[2]; + __le16 port_id; + u8 phy_addr; + u8 dev_addr; + __le16 reg_addr; + __le16 reg_data; + u8 cl45_mdio; + u8 unused_1[7]; +}; + +/* hwrm_port_phy_mdio_write_output (size:128b/16B) */ +struct hwrm_port_phy_mdio_write_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_port_phy_mdio_read_input (size:256b/32B) */ +struct hwrm_port_phy_mdio_read_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 unused_0[2]; + __le16 port_id; + u8 phy_addr; + u8 dev_addr; + __le16 reg_addr; + u8 cl45_mdio; + u8 unused_1; +}; + +/* hwrm_port_phy_mdio_read_output (size:128b/16B) */ +struct hwrm_port_phy_mdio_read_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le16 reg_data; + u8 unused_0[5]; + u8 valid; +}; + /* hwrm_port_led_cfg_input (size:512b/64B) */ struct hwrm_port_led_cfg_input { __le16 req_type; @@ -4869,6 +5078,10 @@ struct hwrm_ring_grp_free_output { u8 unused_0[7]; u8 valid; }; +#define DEFAULT_FLOW_ID 0xFFFFFFFFUL +#define ROCEV1_FLOW_ID 0xFFFFFFFEUL +#define ROCEV2_FLOW_ID 0xFFFFFFFDUL +#define ROCEV2_CNP_FLOW_ID 0xFFFFFFFCUL /* hwrm_cfa_l2_filter_alloc_input (size:768b/96B) */ struct hwrm_cfa_l2_filter_alloc_input { @@ -4937,20 +5150,21 @@ struct hwrm_cfa_l2_filter_alloc_input { u8 unused_3; __le32 src_id; u8 tunnel_type; - #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL - #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL - #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL - #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL - #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL - #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL - #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL - #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL - #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL - #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL - #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL - #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL - #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL - #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_L2_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL u8 unused_4; __le16 dst_id; __le16 mirror_vnic_id; @@ -5108,20 +5322,21 @@ struct hwrm_cfa_tunnel_filter_alloc_input { u8 l3_addr_type; u8 t_l3_addr_type; u8 tunnel_type; - #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL - #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL - #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL - #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL - #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL - #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL - #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL - #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL - #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL - #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL - #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL - #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL - #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL - #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL u8 tunnel_flags; #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_OAM_CHECKSUM_EXPLHDR 0x1UL #define CFA_TUNNEL_FILTER_ALLOC_REQ_TUNNEL_FLAGS_TUN_FLAGS_CRITICAL_OPT_S1 0x2UL @@ -5326,20 +5541,21 @@ struct hwrm_cfa_ntuple_filter_alloc_input { __le16 dst_id; __le16 mirror_vnic_id; u8 tunnel_type; - #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL - #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_NTUPLE_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL u8 pri_hint; #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_NO_PREFER 0x0UL #define CFA_NTUPLE_FILTER_ALLOC_REQ_PRI_HINT_ABOVE 0x1UL @@ -5459,20 +5675,21 @@ struct hwrm_cfa_decap_filter_alloc_input { #define CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_MIRROR_VNIC_ID 0x10000UL __be32 tunnel_id; u8 tunnel_type; - #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL - #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL - #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL - #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL - #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL - #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL - #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL - #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL - #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL - #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL - #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL - #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL - #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL - #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL u8 unused_0; __le16 unused_1; u8 src_macaddr[6]; @@ -5559,20 +5776,23 @@ struct hwrm_cfa_flow_alloc_input { #define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_TX 0x40UL #define CFA_FLOW_ALLOC_REQ_FLAGS_PATH_RX 0x80UL #define CFA_FLOW_ALLOC_REQ_FLAGS_MATCH_VXLAN_IP_VNI 0x100UL + #define CFA_FLOW_ALLOC_REQ_FLAGS_VHOST_ID_USE_VLAN 0x200UL __le16 src_fid; __le32 tunnel_handle; __le16 action_flags; - #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL - #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL - #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL - #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL - #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL - #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL - #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL - #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL - #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL - #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL - #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL_IP 0x400UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD 0x1UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_RECYCLE 0x2UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP 0x4UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_METER 0x8UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL 0x10UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_SRC 0x20UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_DEST 0x40UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_NAT_IPV4_ADDRESS 0x80UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE 0x100UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TTL_DECREMENT 0x200UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL_IP 0x400UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FLOW_AGING_ENABLED 0x800UL + #define CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_PRI_HINT 0x1000UL __le16 dst_fid; __be16 l2_rewrite_vlan_tpid; __be16 l2_rewrite_vlan_tci; @@ -5597,20 +5817,21 @@ struct hwrm_cfa_flow_alloc_input { __be16 l2_rewrite_smac[3]; u8 ip_proto; u8 tunnel_type; - #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL - #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL - #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL - #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL - #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL - #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL - #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL - #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL - #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL - #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL - #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL - #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL - #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL - #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NONTUNNEL 0x0UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_NVGRE 0x2UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2GRE 0x3UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPIP 0x4UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_MPLS 0x6UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_STT 0x7UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE 0x8UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL 0xffUL + #define CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_LAST CFA_FLOW_ALLOC_REQ_TUNNEL_TYPE_ANYTUNNEL }; /* hwrm_cfa_flow_alloc_output (size:256b/32B) */ @@ -5623,7 +5844,8 @@ struct hwrm_cfa_flow_alloc_output { u8 unused_0[2]; __le32 flow_id; __le64 ext_flow_handle; - u8 unused_1[7]; + __le32 flow_counter_id; + u8 unused_1[3]; u8 valid; }; @@ -5651,6 +5873,46 @@ struct hwrm_cfa_flow_free_output { u8 valid; }; +/* hwrm_cfa_flow_info_input (size:256b/32B) */ +struct hwrm_cfa_flow_info_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le16 flow_handle; + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK 0xfffUL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_SFT 0 + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_CNP_CNT 0x1000UL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV1_CNT 0x2000UL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_ROCEV2_CNT 0x4000UL + #define CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX 0x8000UL + u8 unused_0[6]; + __le64 ext_flow_handle; +}; + +/* hwrm_cfa_flow_info_output (size:448b/56B) */ +struct hwrm_cfa_flow_info_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 flags; + u8 profile; + __le16 src_fid; + __le16 dst_fid; + __le16 l2_ctxt_id; + __le64 em_info; + __le64 tcam_info; + __le64 vfp_tcam_info; + __le16 ar_id; + __le16 flow_handle; + __le32 tunnel_handle; + __le16 flow_timer; + u8 unused_0[5]; + u8 valid; +}; + /* hwrm_cfa_flow_stats_input (size:640b/80B) */ struct hwrm_cfa_flow_stats_input { __le16 req_type; @@ -5757,6 +6019,128 @@ struct hwrm_cfa_vfr_free_output { u8 valid; }; +/* hwrm_cfa_eem_qcaps_input (size:192b/24B) */ +struct hwrm_cfa_eem_qcaps_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_EEM_QCAPS_REQ_FLAGS_PATH_TX 0x1UL + #define CFA_EEM_QCAPS_REQ_FLAGS_PATH_RX 0x2UL + #define CFA_EEM_QCAPS_REQ_FLAGS_PREFERRED_OFFLOAD 0x4UL + __le32 unused_0; +}; + +/* hwrm_cfa_eem_qcaps_output (size:256b/32B) */ +struct hwrm_cfa_eem_qcaps_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define CFA_EEM_QCAPS_RESP_FLAGS_PATH_TX 0x1UL + #define CFA_EEM_QCAPS_RESP_FLAGS_PATH_RX 0x2UL + __le32 unused_0; + __le32 supported; + #define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY0_TABLE 0x1UL + #define CFA_EEM_QCAPS_RESP_SUPPORTED_KEY1_TABLE 0x2UL + #define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_RECORD_TABLE 0x4UL + #define CFA_EEM_QCAPS_RESP_SUPPORTED_EXTERNAL_FLOW_COUNTERS_TABLE 0x8UL + __le32 max_entries_supported; + __le16 key_entry_size; + __le16 record_entry_size; + __le16 efc_entry_size; + u8 unused_1; + u8 valid; +}; + +/* hwrm_cfa_eem_cfg_input (size:320b/40B) */ +struct hwrm_cfa_eem_cfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_EEM_CFG_REQ_FLAGS_PATH_TX 0x1UL + #define CFA_EEM_CFG_REQ_FLAGS_PATH_RX 0x2UL + #define CFA_EEM_CFG_REQ_FLAGS_PREFERRED_OFFLOAD 0x4UL + __le32 unused_0; + __le32 num_entries; + __le32 unused_1; + __le16 key0_ctx_id; + __le16 key1_ctx_id; + __le16 record_ctx_id; + __le16 efc_ctx_id; +}; + +/* hwrm_cfa_eem_cfg_output (size:128b/16B) */ +struct hwrm_cfa_eem_cfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + +/* hwrm_cfa_eem_qcfg_input (size:192b/24B) */ +struct hwrm_cfa_eem_qcfg_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_EEM_QCFG_REQ_FLAGS_PATH_TX 0x1UL + #define CFA_EEM_QCFG_REQ_FLAGS_PATH_RX 0x2UL + __le32 unused_0; +}; + +/* hwrm_cfa_eem_qcfg_output (size:128b/16B) */ +struct hwrm_cfa_eem_qcfg_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + __le32 flags; + #define CFA_EEM_QCFG_RESP_FLAGS_PATH_TX 0x1UL + #define CFA_EEM_QCFG_RESP_FLAGS_PATH_RX 0x2UL + #define CFA_EEM_QCFG_RESP_FLAGS_PREFERRED_OFFLOAD 0x4UL + __le32 num_entries; +}; + +/* hwrm_cfa_eem_op_input (size:192b/24B) */ +struct hwrm_cfa_eem_op_input { + __le16 req_type; + __le16 cmpl_ring; + __le16 seq_id; + __le16 target_id; + __le64 resp_addr; + __le32 flags; + #define CFA_EEM_OP_REQ_FLAGS_PATH_TX 0x1UL + #define CFA_EEM_OP_REQ_FLAGS_PATH_RX 0x2UL + __le16 unused_0; + __le16 op; + #define CFA_EEM_OP_REQ_OP_RESERVED 0x0UL + #define CFA_EEM_OP_REQ_OP_EEM_DISABLE 0x1UL + #define CFA_EEM_OP_REQ_OP_EEM_ENABLE 0x2UL + #define CFA_EEM_OP_REQ_OP_EEM_CLEANUP 0x3UL + #define CFA_EEM_OP_REQ_OP_LAST CFA_EEM_OP_REQ_OP_EEM_CLEANUP +}; + +/* hwrm_cfa_eem_op_output (size:128b/16B) */ +struct hwrm_cfa_eem_op_output { + __le16 error_code; + __le16 req_type; + __le16 seq_id; + __le16 resp_len; + u8 unused_0[7]; + u8 valid; +}; + /* hwrm_tunnel_dst_port_query_input (size:192b/24B) */ struct hwrm_tunnel_dst_port_query_input { __le16 req_type; @@ -5765,12 +6149,13 @@ struct hwrm_tunnel_dst_port_query_input { __le16 target_id; __le64 resp_addr; u8 tunnel_type; - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL - #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_QUERY_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 u8 unused_0[7]; }; @@ -5794,12 +6179,13 @@ struct hwrm_tunnel_dst_port_alloc_input { __le16 target_id; __le64 resp_addr; u8 tunnel_type; - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL - #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_ALLOC_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 u8 unused_0; __be16 tunnel_dst_port_val; u8 unused_1[4]; @@ -5824,12 +6210,13 @@ struct hwrm_tunnel_dst_port_free_input { __le16 target_id; __le64 resp_addr; u8 tunnel_type; - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL - #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN 0x1UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE 0x5UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_V4 0x9UL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_IPGRE_V1 0xaUL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_L2_ETYPE 0xbUL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 0xcUL + #define TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_LAST TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_VXLAN_GPE_V6 u8 unused_0; __le16 tunnel_dst_port_id; u8 unused_1[4]; @@ -6040,7 +6427,9 @@ struct hwrm_fw_reset_input { #define FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE 0x3UL #define FW_RESET_REQ_SELFRST_STATUS_LAST FW_RESET_REQ_SELFRST_STATUS_SELFRSTIMMEDIATE u8 host_idx; - u8 unused_0[5]; + u8 flags; + #define FW_RESET_REQ_FLAGS_RESET_GRACEFUL 0x1UL + u8 unused_0[4]; }; /* hwrm_fw_reset_output (size:128b/16B) */ @@ -6137,6 +6526,7 @@ struct hwrm_struct_hdr { #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL + #define STRUCT_HDR_STRUCT_ID_POWER_BKUP 0x427UL #define STRUCT_HDR_STRUCT_ID_AFM_OPAQUE 0x1UL #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL #define STRUCT_HDR_STRUCT_ID_RSS_V2 0x64UL diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c index 3962f6fd543c..d80f5c981d90 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c @@ -448,16 +448,22 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs) u16 vf_stat_ctx, vf_vnics, vf_ring_grps; struct bnxt_pf_info *pf = &bp->pf; int i, rc = 0, min = 1; + u16 vf_msix = 0; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_VF_RESOURCE_CFG, -1, -1); - vf_cp_rings = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings; - vf_stat_ctx = hw_resc->max_stat_ctxs - bp->num_stat_ctxs; + if (bp->flags & BNXT_FLAG_CHIP_P5) { + vf_msix = hw_resc->max_nqs - bnxt_nq_rings_in_use(bp); + vf_ring_grps = 0; + } else { + vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings; + } + vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp); + vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp); if (bp->flags & BNXT_FLAG_AGG_RINGS) vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings * 2; else vf_rx_rings = hw_resc->max_rx_rings - bp->rx_nr_rings; - vf_ring_grps = hw_resc->max_hw_ring_grps - bp->rx_nr_rings; vf_tx_rings = hw_resc->max_tx_rings - bp->tx_nr_rings; vf_vnics = hw_resc->max_vnics - bp->nr_vnics; vf_vnics = min_t(u16, vf_vnics, vf_rx_rings); @@ -476,7 +482,8 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs) req.min_l2_ctxs = cpu_to_le16(min); req.min_vnics = cpu_to_le16(min); req.min_stat_ctx = cpu_to_le16(min); - req.min_hw_ring_grps = cpu_to_le16(min); + if (!(bp->flags & BNXT_FLAG_CHIP_P5)) + req.min_hw_ring_grps = cpu_to_le16(min); } else { vf_cp_rings /= num_vfs; vf_tx_rings /= num_vfs; @@ -500,6 +507,8 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs) req.max_vnics = cpu_to_le16(vf_vnics); req.max_stat_ctx = cpu_to_le16(vf_stat_ctx); req.max_hw_ring_grps = cpu_to_le16(vf_ring_grps); + if (bp->flags & BNXT_FLAG_CHIP_P5) + req.max_msix = cpu_to_le16(vf_msix / num_vfs); mutex_lock(&bp->hwrm_cmd_lock); for (i = 0; i < num_vfs; i++) { @@ -525,6 +534,8 @@ static int bnxt_hwrm_func_vf_resc_cfg(struct bnxt *bp, int num_vfs) hw_resc->max_rsscos_ctxs -= pf->active_vfs; hw_resc->max_stat_ctxs -= le16_to_cpu(req.min_stat_ctx) * n; hw_resc->max_vnics -= le16_to_cpu(req.min_vnics) * n; + if (bp->flags & BNXT_FLAG_CHIP_P5) + hw_resc->max_irqs -= vf_msix * n; rc = pf->active_vfs; } @@ -539,19 +550,16 @@ static int bnxt_hwrm_func_cfg(struct bnxt *bp, int num_vfs) u32 rc = 0, mtu, i; u16 vf_tx_rings, vf_rx_rings, vf_cp_rings, vf_stat_ctx, vf_vnics; struct bnxt_hw_resc *hw_resc = &bp->hw_resc; - u16 vf_ring_grps, max_stat_ctxs; struct hwrm_func_cfg_input req = {0}; struct bnxt_pf_info *pf = &bp->pf; int total_vf_tx_rings = 0; + u16 vf_ring_grps; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); - max_stat_ctxs = hw_resc->max_stat_ctxs; - /* Remaining rings are distributed equally amongs VF's for now */ - vf_cp_rings = (bnxt_get_max_func_cp_rings_for_en(bp) - - bp->cp_nr_rings) / num_vfs; - vf_stat_ctx = (max_stat_ctxs - bp->num_stat_ctxs) / num_vfs; + vf_cp_rings = bnxt_get_avail_cp_rings_for_en(bp) / num_vfs; + vf_stat_ctx = bnxt_get_avail_stat_ctxs_for_en(bp) / num_vfs; if (bp->flags & BNXT_FLAG_AGG_RINGS) vf_rx_rings = (hw_resc->max_rx_rings - bp->rx_nr_rings * 2) / num_vfs; @@ -644,8 +652,8 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs) */ vfs_supported = *num_vfs; - avail_cp = bnxt_get_max_func_cp_rings_for_en(bp) - bp->cp_nr_rings; - avail_stat = hw_resc->max_stat_ctxs - bp->num_stat_ctxs; + avail_cp = bnxt_get_avail_cp_rings_for_en(bp); + avail_stat = bnxt_get_avail_stat_ctxs_for_en(bp); avail_cp = min_t(int, avail_cp, avail_stat); while (vfs_supported) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c index 749f63beddd8..c683b5e96b1d 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c @@ -337,18 +337,21 @@ static int bnxt_tc_parse_flow(struct bnxt *bp, return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts); } -static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, __le16 flow_handle) +static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, + struct bnxt_tc_flow_node *flow_node) { struct hwrm_cfa_flow_free_input req = { 0 }; int rc; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1); - req.flow_handle = flow_handle; + if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) + req.ext_flow_handle = flow_node->ext_flow_handle; + else + req.flow_handle = flow_node->flow_handle; rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (rc) - netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d", - __func__, flow_handle, rc); + netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); if (rc) rc = -EIO; @@ -418,13 +421,14 @@ static bool bits_set(void *key, int len) static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, __le16 ref_flow_handle, - __le32 tunnel_handle, __le16 *flow_handle) + __le32 tunnel_handle, + struct bnxt_tc_flow_node *flow_node) { - struct hwrm_cfa_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr; struct bnxt_tc_actions *actions = &flow->actions; struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask; struct bnxt_tc_l3_key *l3_key = &flow->l3_key; struct hwrm_cfa_flow_alloc_input req = { 0 }; + struct hwrm_cfa_flow_alloc_output *resp; u16 flow_flags = 0, action_flags = 0; int rc; @@ -527,8 +531,23 @@ static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow, mutex_lock(&bp->hwrm_cmd_lock); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (!rc) - *flow_handle = resp->flow_handle; + if (!rc) { + resp = bnxt_get_hwrm_resp_addr(bp, &req); + /* CFA_FLOW_ALLOC response interpretation: + * fw with fw with + * 16-bit 64-bit + * flow handle flow handle + * =========== =========== + * flow_handle flow handle flow context id + * ext_flow_handle INVALID flow handle + * flow_id INVALID flow counter id + */ + flow_node->flow_handle = resp->flow_handle; + if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) { + flow_node->ext_flow_handle = resp->ext_flow_handle; + flow_node->flow_id = resp->flow_id; + } + } mutex_unlock(&bp->hwrm_cmd_lock); if (rc == HWRM_ERR_CODE_RESOURCE_ALLOC_ERROR) @@ -544,9 +563,8 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp, __le32 ref_decap_handle, __le32 *decap_filter_handle) { - struct hwrm_cfa_decap_filter_alloc_output *resp = - bp->hwrm_cmd_resp_addr; struct hwrm_cfa_decap_filter_alloc_input req = { 0 }; + struct hwrm_cfa_decap_filter_alloc_output *resp; struct ip_tunnel_key *tun_key = &flow->tun_key; u32 enables = 0; int rc; @@ -599,10 +617,12 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp, mutex_lock(&bp->hwrm_cmd_lock); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (!rc) + if (!rc) { + resp = bnxt_get_hwrm_resp_addr(bp, &req); *decap_filter_handle = resp->decap_filter_id; - else + } else { netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); + } mutex_unlock(&bp->hwrm_cmd_lock); if (rc) @@ -633,9 +653,8 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp, struct bnxt_tc_l2_key *l2_info, __le32 *encap_record_handle) { - struct hwrm_cfa_encap_record_alloc_output *resp = - bp->hwrm_cmd_resp_addr; struct hwrm_cfa_encap_record_alloc_input req = { 0 }; + struct hwrm_cfa_encap_record_alloc_output *resp; struct hwrm_cfa_encap_data_vxlan *encap = (struct hwrm_cfa_encap_data_vxlan *)&req.encap_data; struct hwrm_vxlan_ipv4_hdr *encap_ipv4 = @@ -667,10 +686,12 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp, mutex_lock(&bp->hwrm_cmd_lock); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); - if (!rc) + if (!rc) { + resp = bnxt_get_hwrm_resp_addr(bp, &req); *encap_record_handle = resp->encap_record_id; - else + } else { netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc); + } mutex_unlock(&bp->hwrm_cmd_lock); if (rc) @@ -1224,7 +1245,7 @@ static int __bnxt_tc_del_flow(struct bnxt *bp, int rc; /* send HWRM cmd to free the flow-id */ - bnxt_hwrm_cfa_flow_free(bp, flow_node->flow_handle); + bnxt_hwrm_cfa_flow_free(bp, flow_node); mutex_lock(&tc_info->lock); @@ -1246,6 +1267,12 @@ static int __bnxt_tc_del_flow(struct bnxt *bp, return 0; } +static void bnxt_tc_set_flow_dir(struct bnxt *bp, struct bnxt_tc_flow *flow, + u16 src_fid) +{ + flow->dir = (bp->pf.fw_fid == src_fid) ? BNXT_DIR_RX : BNXT_DIR_TX; +} + static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow, u16 src_fid) { @@ -1293,6 +1320,9 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid, bnxt_tc_set_src_fid(bp, flow, src_fid); + if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) + bnxt_tc_set_flow_dir(bp, flow, src_fid); + if (!bnxt_tc_can_offload(bp, flow)) { rc = -ENOSPC; goto free_node; @@ -1320,7 +1350,7 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid, /* send HWRM cmd to alloc the flow */ rc = bnxt_hwrm_cfa_flow_alloc(bp, flow, ref_flow_handle, - tunnel_handle, &new_node->flow_handle); + tunnel_handle, new_node); if (rc) goto put_tunnel; @@ -1336,7 +1366,7 @@ static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid, return 0; hwrm_flow_free: - bnxt_hwrm_cfa_flow_free(bp, new_node->flow_handle); + bnxt_hwrm_cfa_flow_free(bp, new_node); put_tunnel: bnxt_tc_put_tunnel_handle(bp, flow, new_node); put_l2: @@ -1397,13 +1427,40 @@ static int bnxt_tc_get_flow_stats(struct bnxt *bp, return 0; } +static void bnxt_fill_cfa_stats_req(struct bnxt *bp, + struct bnxt_tc_flow_node *flow_node, + __le16 *flow_handle, __le32 *flow_id) +{ + u16 handle; + + if (bp->fw_cap & BNXT_FW_CAP_OVS_64BIT_HANDLE) { + *flow_id = flow_node->flow_id; + + /* If flow_id is used to fetch flow stats then: + * 1. lower 12 bits of flow_handle must be set to all 1s. + * 2. 15th bit of flow_handle must specify the flow + * direction (TX/RX). + */ + if (flow_node->flow.dir == BNXT_DIR_RX) + handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_DIR_RX | + CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK; + else + handle = CFA_FLOW_INFO_REQ_FLOW_HANDLE_MAX_MASK; + + *flow_handle = cpu_to_le16(handle); + } else { + *flow_handle = flow_node->flow_handle; + } +} + static int bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows, struct bnxt_tc_stats_batch stats_batch[]) { - struct hwrm_cfa_flow_stats_output *resp = bp->hwrm_cmd_resp_addr; struct hwrm_cfa_flow_stats_input req = { 0 }; + struct hwrm_cfa_flow_stats_output *resp; __le16 *req_flow_handles = &req.flow_handle_0; + __le32 *req_flow_ids = &req.flow_id_0; int rc, i; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1); @@ -1411,14 +1468,19 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows, for (i = 0; i < num_flows; i++) { struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node; - req_flow_handles[i] = flow_node->flow_handle; + bnxt_fill_cfa_stats_req(bp, flow_node, + &req_flow_handles[i], &req_flow_ids[i]); } mutex_lock(&bp->hwrm_cmd_lock); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); if (!rc) { - __le64 *resp_packets = &resp->packet_0; - __le64 *resp_bytes = &resp->byte_0; + __le64 *resp_packets; + __le64 *resp_bytes; + + resp = bnxt_get_hwrm_resp_addr(bp, &req); + resp_packets = &resp->packet_0; + resp_bytes = &resp->byte_0; for (i = 0; i < num_flows; i++) { stats_batch[i].hw_stats.packets = diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h index 97e09a880693..8a0968967bc5 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.h @@ -98,6 +98,9 @@ struct bnxt_tc_flow { /* flow applicable to pkts ingressing on this fid */ u16 src_fid; + u8 dir; +#define BNXT_DIR_RX 1 +#define BNXT_DIR_TX 0 struct bnxt_tc_l2_key l2_key; struct bnxt_tc_l2_key l2_mask; struct bnxt_tc_l3_key l3_key; @@ -170,7 +173,9 @@ struct bnxt_tc_flow_node { struct bnxt_tc_flow flow; + __le64 ext_flow_handle; __le16 flow_handle; + __le32 flow_id; /* L2 node in l2 hashtable that shares flow's l2 key */ struct bnxt_tc_l2_node *l2_node; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index 0a3097baafde..ea45a9b8179e 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -48,10 +48,8 @@ static int bnxt_register_dev(struct bnxt_en_dev *edev, int ulp_id, max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp); if (max_stat_ctxs <= BNXT_MIN_ROCE_STAT_CTXS || - bp->num_stat_ctxs == max_stat_ctxs) + bp->cp_nr_rings == max_stat_ctxs) return -ENOMEM; - bnxt_set_max_func_stat_ctxs(bp, max_stat_ctxs - - BNXT_MIN_ROCE_STAT_CTXS); } atomic_set(&ulp->ref_count, 0); @@ -82,14 +80,9 @@ static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id) netdev_err(bp->dev, "ulp id %d not registered\n", ulp_id); return -EINVAL; } - if (ulp_id == BNXT_ROCE_ULP) { - unsigned int max_stat_ctxs; + if (ulp_id == BNXT_ROCE_ULP && ulp->msix_requested) + edev->en_ops->bnxt_free_msix(edev, ulp_id); - max_stat_ctxs = bnxt_get_max_func_stat_ctxs(bp); - bnxt_set_max_func_stat_ctxs(bp, max_stat_ctxs + 1); - if (ulp->msix_requested) - edev->en_ops->bnxt_free_msix(edev, ulp_id); - } if (ulp->max_async_event_id) bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0); @@ -218,6 +211,14 @@ int bnxt_get_ulp_msix_base(struct bnxt *bp) return 0; } +int bnxt_get_ulp_stat_ctxs(struct bnxt *bp) +{ + if (bnxt_ulp_registered(bp->edev, BNXT_ROCE_ULP)) + return BNXT_MIN_ROCE_STAT_CTXS; + + return 0; +} + static int bnxt_send_msg(struct bnxt_en_dev *edev, int ulp_id, struct bnxt_fw_msg *fw_msg) { diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h index d9bea37cd211..cd78453d0bf0 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.h @@ -90,6 +90,7 @@ static inline bool bnxt_ulp_registered(struct bnxt_en_dev *edev, int ulp_id) int bnxt_get_ulp_msix_num(struct bnxt *bp); int bnxt_get_ulp_msix_base(struct bnxt *bp); +int bnxt_get_ulp_stat_ctxs(struct bnxt *bp); void bnxt_ulp_stop(struct bnxt *bp); void bnxt_ulp_start(struct bnxt *bp); void bnxt_ulp_sriov_cfg(struct bnxt *bp, int num_vfs); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c index bf6de02be396..0184ef6f05a7 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c @@ -199,7 +199,6 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog) bp->tx_nr_rings_xdp = tx_xdp; bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc + tx_xdp; bp->cp_nr_rings = max_t(int, bp->tx_nr_rings, bp->rx_nr_rings); - bp->num_stat_ctxs = bp->cp_nr_rings; bnxt_set_tpa_flags(bp); bnxt_set_ring_params(bp); diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c index d83233ae4a15..510dfc1c236b 100644 --- a/drivers/net/ethernet/broadcom/cnic.c +++ b/drivers/net/ethernet/broadcom/cnic.c @@ -5731,7 +5731,7 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event, if (realdev) { dev = cnic_from_netdev(realdev); if (dev) { - vid |= VLAN_TAG_PRESENT; + vid |= VLAN_CFI_MASK; /* make non-zero */ cnic_rcv_netevent(dev->cnic_priv, event, vid); cnic_put(dev); } diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c index 2d6f090bf644..983245c0867c 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c @@ -1169,7 +1169,7 @@ static int bcmgenet_power_down(struct bcmgenet_priv *priv, break; } - return 0; + return ret; } static void bcmgenet_power_up(struct bcmgenet_priv *priv, @@ -3612,36 +3612,6 @@ static int bcmgenet_remove(struct platform_device *pdev) } #ifdef CONFIG_PM_SLEEP -static int bcmgenet_suspend(struct device *d) -{ - struct net_device *dev = dev_get_drvdata(d); - struct bcmgenet_priv *priv = netdev_priv(dev); - int ret = 0; - - if (!netif_running(dev)) - return 0; - - netif_device_detach(dev); - - bcmgenet_netif_stop(dev); - - if (!device_may_wakeup(d)) - phy_suspend(dev->phydev); - - /* Prepare the device for Wake-on-LAN and switch to the slow clock */ - if (device_may_wakeup(d) && priv->wolopts) { - ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC); - clk_prepare_enable(priv->clk_wol); - } else if (priv->internal_phy) { - ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE); - } - - /* Turn off the clocks */ - clk_disable_unprepare(priv->clk); - - return ret; -} - static int bcmgenet_resume(struct device *d) { struct net_device *dev = dev_get_drvdata(d); @@ -3719,6 +3689,39 @@ out_clk_disable: clk_disable_unprepare(priv->clk); return ret; } + +static int bcmgenet_suspend(struct device *d) +{ + struct net_device *dev = dev_get_drvdata(d); + struct bcmgenet_priv *priv = netdev_priv(dev); + int ret = 0; + + if (!netif_running(dev)) + return 0; + + netif_device_detach(dev); + + bcmgenet_netif_stop(dev); + + if (!device_may_wakeup(d)) + phy_suspend(dev->phydev); + + /* Prepare the device for Wake-on-LAN and switch to the slow clock */ + if (device_may_wakeup(d) && priv->wolopts) { + ret = bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC); + clk_prepare_enable(priv->clk_wol); + } else if (priv->internal_phy) { + ret = bcmgenet_power_down(priv, GENET_POWER_PASSIVE); + } + + /* Turn off the clocks */ + clk_disable_unprepare(priv->clk); + + if (ret) + bcmgenet_resume(d); + + return ret; +} #endif /* CONFIG_PM_SLEEP */ static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume); diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c index 2fbd027f0148..57582efa362d 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c @@ -186,6 +186,8 @@ void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv, } reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); + if (!(reg & MPD_EN)) + return; /* already powered up so skip the rest */ reg &= ~MPD_EN; bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c index a6cbaca37e94..aceb9b7b55bd 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmmii.c +++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c @@ -226,7 +226,8 @@ int bcmgenet_mii_config(struct net_device *dev, bool init) * capabilities, use that knowledge to also configure the * Reverse MII interface correctly. */ - if (dev->phydev->supported & PHY_1000BT_FEATURES) + if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + dev->phydev->supported)) port_ctrl = PORT_MODE_EXT_RVMII_50; else port_ctrl = PORT_MODE_EXT_RVMII_25; @@ -317,7 +318,7 @@ int bcmgenet_mii_probe(struct net_device *dev) return ret; } - phydev->advertising = phydev->supported; + linkmode_copy(phydev->advertising, phydev->supported); /* The internal PHY has its link interrupts routed to the * Ethernet MAC ISRs. On GENETv5 there is a hardware issue diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 432c3b867084..3b1397af81f7 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -66,11 +66,6 @@ #include <uapi/linux/net_tstamp.h> #include <linux/ptp_clock_kernel.h> -#ifdef CONFIG_SPARC -#include <asm/idprom.h> -#include <asm/prom.h> -#endif - #define BAR_0 0 #define BAR_2 2 @@ -2157,7 +2152,8 @@ static void tg3_phy_start(struct tg3 *tp) phydev->speed = tp->link_config.speed; phydev->duplex = tp->link_config.duplex; phydev->autoneg = tp->link_config.autoneg; - phydev->advertising = tp->link_config.advertising; + ethtool_convert_legacy_u32_to_link_mode( + phydev->advertising, tp->link_config.advertising); } phy_start(phydev); @@ -4057,8 +4053,9 @@ static int tg3_power_down_prepare(struct tg3 *tp) do_low_power = false; if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) && !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising) = { 0, }; struct phy_device *phydev; - u32 phyid, advertising; + u32 phyid; phydev = mdiobus_get_phy(tp->mdio_bus, tp->phy_addr); @@ -4067,25 +4064,33 @@ static int tg3_power_down_prepare(struct tg3 *tp) tp->link_config.speed = phydev->speed; tp->link_config.duplex = phydev->duplex; tp->link_config.autoneg = phydev->autoneg; - tp->link_config.advertising = phydev->advertising; - - advertising = ADVERTISED_TP | - ADVERTISED_Pause | - ADVERTISED_Autoneg | - ADVERTISED_10baseT_Half; + ethtool_convert_link_mode_to_legacy_u32( + &tp->link_config.advertising, + phydev->advertising); + + linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, advertising); + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, + advertising); + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + advertising); + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, + advertising); if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) { - if (tg3_flag(tp, WOL_SPEED_100MB)) - advertising |= - ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full | - ADVERTISED_10baseT_Full; - else - advertising |= ADVERTISED_10baseT_Full; + if (tg3_flag(tp, WOL_SPEED_100MB)) { + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, + advertising); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + advertising); + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, + advertising); + } else { + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, + advertising); + } } - phydev->advertising = advertising; - + linkmode_copy(phydev->advertising, advertising); phy_start_aneg(phydev); phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask; @@ -6135,10 +6140,16 @@ static int tg3_setup_phy(struct tg3 *tp, bool force_reset) } /* tp->lock must be held */ -static u64 tg3_refclk_read(struct tg3 *tp) +static u64 tg3_refclk_read(struct tg3 *tp, struct ptp_system_timestamp *sts) { - u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB); - return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32; + u64 stamp; + + ptp_read_system_prets(sts); + stamp = tr32(TG3_EAV_REF_CLCK_LSB); + ptp_read_system_postts(sts); + stamp |= (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32; + + return stamp; } /* tp->lock must be held */ @@ -6229,13 +6240,14 @@ static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) return 0; } -static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +static int tg3_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, + struct ptp_system_timestamp *sts) { u64 ns; struct tg3 *tp = container_of(ptp, struct tg3, ptp_info); tg3_full_lock(tp, 0); - ns = tg3_refclk_read(tp); + ns = tg3_refclk_read(tp, sts); ns += tp->ptp_adjust; tg3_full_unlock(tp); @@ -6330,7 +6342,7 @@ static const struct ptp_clock_info tg3_ptp_caps = { .pps = 0, .adjfreq = tg3_ptp_adjfreq, .adjtime = tg3_ptp_adjtime, - .gettime64 = tg3_ptp_gettime, + .gettimex64 = tg3_ptp_gettimex, .settime64 = tg3_ptp_settime, .enable = tg3_ptp_enable, }; @@ -16973,32 +16985,6 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent) return err; } -#ifdef CONFIG_SPARC -static int tg3_get_macaddr_sparc(struct tg3 *tp) -{ - struct net_device *dev = tp->dev; - struct pci_dev *pdev = tp->pdev; - struct device_node *dp = pci_device_to_OF_node(pdev); - const unsigned char *addr; - int len; - - addr = of_get_property(dp, "local-mac-address", &len); - if (addr && len == ETH_ALEN) { - memcpy(dev->dev_addr, addr, ETH_ALEN); - return 0; - } - return -ENODEV; -} - -static int tg3_get_default_macaddr_sparc(struct tg3 *tp) -{ - struct net_device *dev = tp->dev; - - memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN); - return 0; -} -#endif - static int tg3_get_device_address(struct tg3 *tp) { struct net_device *dev = tp->dev; @@ -17006,10 +16992,8 @@ static int tg3_get_device_address(struct tg3 *tp) int addr_ok = 0; int err; -#ifdef CONFIG_SPARC - if (!tg3_get_macaddr_sparc(tp)) + if (!eth_platform_get_mac_address(&tp->pdev->dev, dev->dev_addr)) return 0; -#endif if (tg3_flag(tp, IS_SSB_CORE)) { err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]); @@ -17071,13 +17055,8 @@ static int tg3_get_device_address(struct tg3 *tp) } } - if (!is_valid_ether_addr(&dev->dev_addr[0])) { -#ifdef CONFIG_SPARC - if (!tg3_get_default_macaddr_sparc(tp)) - return 0; -#endif + if (!is_valid_ether_addr(&dev->dev_addr[0])) return -EINVAL; - } return 0; } diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 4c816e5a841f..b126926ef7f5 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -4091,7 +4091,7 @@ static int macb_probe(struct platform_device *pdev) if (mac) { ether_addr_copy(bp->dev->dev_addr, mac); } else { - err = of_get_nvmem_mac_address(np, bp->dev->dev_addr); + err = nvmem_get_mac_address(&pdev->dev, bp->dev->dev_addr); if (err) { if (err == -EPROBE_DEFER) goto err_out_free_netdev; diff --git a/drivers/net/ethernet/cavium/common/cavium_ptp.c b/drivers/net/ethernet/cavium/common/cavium_ptp.c index 6aeb1045c302..73632b843749 100644 --- a/drivers/net/ethernet/cavium/common/cavium_ptp.c +++ b/drivers/net/ethernet/cavium/common/cavium_ptp.c @@ -277,10 +277,6 @@ static int cavium_ptp_probe(struct pci_dev *pdev, writeq(clock_comp, clock->reg_base + PTP_CLOCK_COMP); clock->ptp_clock = ptp_clock_register(&clock->ptp_info, dev); - if (!clock->ptp_clock) { - err = -ENODEV; - goto error_stop; - } if (IS_ERR(clock->ptp_clock)) { err = PTR_ERR(clock->ptp_clock); goto error_stop; diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c index 4b3aecf98f2a..5359c1021f42 100644 --- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c +++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c @@ -1080,8 +1080,11 @@ static int octeon_mgmt_open(struct net_device *netdev) /* Set the mode of the interface, RGMII/MII. */ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && netdev->phydev) { union cvmx_agl_prtx_ctl agl_prtx_ctl; - int rgmii_mode = (netdev->phydev->supported & - (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0; + int rgmii_mode = + (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + netdev->phydev->supported) | + linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + netdev->phydev->supported)) != 0; agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1; diff --git a/drivers/net/ethernet/chelsio/Kconfig b/drivers/net/ethernet/chelsio/Kconfig index e2cdfa75673f..e8001e974411 100644 --- a/drivers/net/ethernet/chelsio/Kconfig +++ b/drivers/net/ethernet/chelsio/Kconfig @@ -24,7 +24,8 @@ config CHELSIO_T1 ---help--- This driver supports Chelsio gigabit and 10-gigabit Ethernet cards. More information about adapter features and - performance tuning is in <file:Documentation/networking/cxgb.txt>. + performance tuning is in + <file:Documentation/networking/device_drivers/chelsio/cxgb.txt>. For general information about Chelsio and our products, visit our website at <http://www.chelsio.com>. diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h index b16f4b3ef4c5..2d1ca920601e 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h @@ -404,6 +404,7 @@ struct adapter_params { bool fr_nsmr_tpte_wr_support; /* FW support for FR_NSMR_TPTE_WR */ u8 fw_caps_support; /* 32-bit Port Capabilities */ bool filter2_wr_support; /* FW support for FILTER2_WR */ + unsigned int viid_smt_extn_support:1; /* FW returns vin and smt index */ /* MPS Buffer Group Map[per Port]. Bit i is set if buffer group i is * used by the Port @@ -592,6 +593,13 @@ struct port_info { bool ptp_enable; struct sched_table *sched_tbl; u32 eth_flags; + + /* viid and smt fields either returned by fw + * or decoded by parsing viid by driver. + */ + u8 vin; + u8 vivld; + u8 smt_idx; }; struct dentry; @@ -1757,7 +1765,7 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int nexact, unsigned int rcaps, unsigned int wxcaps); int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, - unsigned int *rss_size); + unsigned int *rss_size, u8 *vivld, u8 *vin); int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf, unsigned int vf, unsigned int viid); @@ -1783,7 +1791,7 @@ int t4_free_mac_filt(struct adapter *adap, unsigned int mbox, unsigned int viid, unsigned int naddr, const u8 **addr, bool sleep_ok); int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, - int idx, const u8 *addr, bool persist, bool add_smt); + int idx, const u8 *addr, bool persist, u8 *smt_idx); int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid, bool ucast, u64 vec, bool sleep_ok); int t4_enable_vi_params(struct adapter *adap, unsigned int mbox, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c index cab492ec8f59..b0ff9fa183f4 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c @@ -378,19 +378,7 @@ static int cim_qcfg_show(struct seq_file *seq, void *v) QUEREMFLITS_G(p[2]) * 16); return 0; } - -static int cim_qcfg_open(struct inode *inode, struct file *file) -{ - return single_open(file, cim_qcfg_show, inode->i_private); -} - -static const struct file_operations cim_qcfg_fops = { - .owner = THIS_MODULE, - .open = cim_qcfg_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(cim_qcfg); static int cimq_show(struct seq_file *seq, void *v, int idx) { @@ -860,8 +848,7 @@ static int tx_rate_show(struct seq_file *seq, void *v) } return 0; } - -DEFINE_SIMPLE_DEBUGFS_FILE(tx_rate); +DEFINE_SHOW_ATTRIBUTE(tx_rate); static int cctrl_tbl_show(struct seq_file *seq, void *v) { @@ -893,8 +880,7 @@ static int cctrl_tbl_show(struct seq_file *seq, void *v) kfree(incr); return 0; } - -DEFINE_SIMPLE_DEBUGFS_FILE(cctrl_tbl); +DEFINE_SHOW_ATTRIBUTE(cctrl_tbl); /* Format a value in a unit that differs from the value's native unit by the * given factor. @@ -955,8 +941,7 @@ static int clk_show(struct seq_file *seq, void *v) return 0; } - -DEFINE_SIMPLE_DEBUGFS_FILE(clk); +DEFINE_SHOW_ATTRIBUTE(clk); /* Firmware Device Log dump. */ static const char * const devlog_level_strings[] = { @@ -1990,22 +1975,10 @@ static int sensors_show(struct seq_file *seq, void *v) return 0; } - -DEFINE_SIMPLE_DEBUGFS_FILE(sensors); +DEFINE_SHOW_ATTRIBUTE(sensors); #if IS_ENABLED(CONFIG_IPV6) -static int clip_tbl_open(struct inode *inode, struct file *file) -{ - return single_open(file, clip_tbl_show, inode->i_private); -} - -static const struct file_operations clip_tbl_debugfs_fops = { - .owner = THIS_MODULE, - .open = clip_tbl_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release -}; +DEFINE_SHOW_ATTRIBUTE(clip_tbl); #endif /*RSS Table. @@ -2208,8 +2181,7 @@ static int rss_config_show(struct seq_file *seq, void *v) return 0; } - -DEFINE_SIMPLE_DEBUGFS_FILE(rss_config); +DEFINE_SHOW_ATTRIBUTE(rss_config); /* RSS Secret Key. */ @@ -2628,19 +2600,7 @@ static int resources_show(struct seq_file *seq, void *v) return 0; } - -static int resources_open(struct inode *inode, struct file *file) -{ - return single_open(file, resources_show, inode->i_private); -} - -static const struct file_operations resources_debugfs_fops = { - .owner = THIS_MODULE, - .open = resources_open, - .read = seq_read, - .llseek = seq_lseek, - .release = seq_release, -}; +DEFINE_SHOW_ATTRIBUTE(resources); /** * ethqset2pinfo - return port_info of an Ethernet Queue Set @@ -3233,8 +3193,7 @@ static int tid_info_show(struct seq_file *seq, void *v) t4_read_reg(adap, LE_DB_ACT_CNT_IPV6_A)); return 0; } - -DEFINE_SIMPLE_DEBUGFS_FILE(tid_info); +DEFINE_SHOW_ATTRIBUTE(tid_info); static void add_debugfs_mem(struct adapter *adap, const char *name, unsigned int idx, unsigned int size_mb) @@ -3364,21 +3323,9 @@ static int meminfo_show(struct seq_file *seq, void *v) return 0; } +DEFINE_SHOW_ATTRIBUTE(meminfo); -static int meminfo_open(struct inode *inode, struct file *file) -{ - return single_open(file, meminfo_show, inode->i_private); -} - -static const struct file_operations meminfo_fops = { - .owner = THIS_MODULE, - .open = meminfo_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -static int chcr_show(struct seq_file *seq, void *v) +static int chcr_stats_show(struct seq_file *seq, void *v) { struct adapter *adap = seq->private; @@ -3399,20 +3346,7 @@ static int chcr_show(struct seq_file *seq, void *v) atomic_read(&adap->chcr_stats.ipsec_cnt)); return 0; } - - -static int chcr_stats_open(struct inode *inode, struct file *file) -{ - return single_open(file, chcr_show, inode->i_private); -} - -static const struct file_operations chcr_stats_debugfs_fops = { - .owner = THIS_MODULE, - .open = chcr_stats_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(chcr_stats); #define PRINT_ADAP_STATS(string, value) \ seq_printf(seq, "%-25s %-20llu\n", (string), \ @@ -3573,8 +3507,7 @@ static int tp_stats_show(struct seq_file *seq, void *v) return 0; } - -DEFINE_SIMPLE_DEBUGFS_FILE(tp_stats); +DEFINE_SHOW_ATTRIBUTE(tp_stats); /* Add an array of Debug FS files. */ @@ -3603,7 +3536,7 @@ int t4_setup_debugfs(struct adapter *adap) { "cim_pif_la", &cim_pif_la_fops, 0400, 0 }, { "cim_ma_la", &cim_ma_la_fops, 0400, 0 }, { "cim_qcfg", &cim_qcfg_fops, 0400, 0 }, - { "clk", &clk_debugfs_fops, 0400, 0 }, + { "clk", &clk_fops, 0400, 0 }, { "devlog", &devlog_fops, 0400, 0 }, { "mboxlog", &mboxlog_fops, 0400, 0 }, { "mbox0", &mbox_debugfs_fops, 0600, 0 }, @@ -3621,11 +3554,11 @@ int t4_setup_debugfs(struct adapter *adap) { "l2t", &t4_l2t_fops, 0400, 0}, { "mps_tcam", &mps_tcam_debugfs_fops, 0400, 0 }, { "rss", &rss_debugfs_fops, 0400, 0 }, - { "rss_config", &rss_config_debugfs_fops, 0400, 0 }, + { "rss_config", &rss_config_fops, 0400, 0 }, { "rss_key", &rss_key_debugfs_fops, 0400, 0 }, { "rss_pf_config", &rss_pf_config_debugfs_fops, 0400, 0 }, { "rss_vf_config", &rss_vf_config_debugfs_fops, 0400, 0 }, - { "resources", &resources_debugfs_fops, 0400, 0 }, + { "resources", &resources_fops, 0400, 0 }, #ifdef CONFIG_CHELSIO_T4_DCB { "dcb_info", &dcb_info_debugfs_fops, 0400, 0 }, #endif @@ -3644,18 +3577,18 @@ int t4_setup_debugfs(struct adapter *adap) { "obq_ncsi", &cim_obq_fops, 0400, 5 }, { "tp_la", &tp_la_fops, 0400, 0 }, { "ulprx_la", &ulprx_la_fops, 0400, 0 }, - { "sensors", &sensors_debugfs_fops, 0400, 0 }, + { "sensors", &sensors_fops, 0400, 0 }, { "pm_stats", &pm_stats_debugfs_fops, 0400, 0 }, - { "tx_rate", &tx_rate_debugfs_fops, 0400, 0 }, - { "cctrl", &cctrl_tbl_debugfs_fops, 0400, 0 }, + { "tx_rate", &tx_rate_fops, 0400, 0 }, + { "cctrl", &cctrl_tbl_fops, 0400, 0 }, #if IS_ENABLED(CONFIG_IPV6) - { "clip_tbl", &clip_tbl_debugfs_fops, 0400, 0 }, + { "clip_tbl", &clip_tbl_fops, 0400, 0 }, #endif - { "tids", &tid_info_debugfs_fops, 0400, 0}, + { "tids", &tid_info_fops, 0400, 0}, { "blocked_fl", &blocked_fl_fops, 0600, 0 }, { "meminfo", &meminfo_fops, 0400, 0 }, - { "crypto", &chcr_stats_debugfs_fops, 0400, 0 }, - { "tp_stats", &tp_stats_debugfs_fops, 0400, 0 }, + { "crypto", &chcr_stats_fops, 0400, 0 }, + { "tp_stats", &tp_stats_fops, 0400, 0 }, }; /* Debug FS nodes common to all T5 and later adapters. diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h index 23f43a0f8950..ba95e13d52da 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.h @@ -37,19 +37,6 @@ #include <linux/export.h> -#define DEFINE_SIMPLE_DEBUGFS_FILE(name) \ -static int name##_open(struct inode *inode, struct file *file) \ -{ \ - return single_open(file, name##_show, inode->i_private); \ -} \ -static const struct file_operations name##_debugfs_fops = { \ - .owner = THIS_MODULE, \ - .open = name##_open, \ - .read = seq_read, \ - .llseek = seq_lseek, \ - .release = single_release \ -} - struct t4_debugfs_entry { const char *name; const struct file_operations *ops; diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c index d49db46254cd..6ba9099ca7fe 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c @@ -453,7 +453,7 @@ static int link_start(struct net_device *dev) if (ret == 0) { ret = t4_change_mac(pi->adapter, mb, pi->viid, pi->xact_addr_filt, dev->dev_addr, true, - true); + &pi->smt_idx); if (ret >= 0) { pi->xact_addr_filt = ret; ret = 0; @@ -1585,28 +1585,6 @@ unsigned int cxgb4_best_aligned_mtu(const unsigned short *mtus, EXPORT_SYMBOL(cxgb4_best_aligned_mtu); /** - * cxgb4_tp_smt_idx - Get the Source Mac Table index for this VI - * @chip: chip type - * @viid: VI id of the given port - * - * Return the SMT index for this VI. - */ -unsigned int cxgb4_tp_smt_idx(enum chip_type chip, unsigned int viid) -{ - /* In T4/T5, SMT contains 256 SMAC entries organized in - * 128 rows of 2 entries each. - * In T6, SMT contains 256 SMAC entries in 256 rows. - * TODO: The below code needs to be updated when we add support - * for 256 VFs. - */ - if (CHELSIO_CHIP_VERSION(chip) <= CHELSIO_T5) - return ((viid & 0x7f) << 1); - else - return (viid & 0x7f); -} -EXPORT_SYMBOL(cxgb4_tp_smt_idx); - -/** * cxgb4_port_chan - get the HW channel of a port * @dev: the net device for the port * @@ -2280,8 +2258,6 @@ static int cxgb_up(struct adapter *adap) #if IS_ENABLED(CONFIG_IPV6) update_clip(adap); #endif - /* Initialize hash mac addr list*/ - INIT_LIST_HEAD(&adap->mac_hlist); return err; irq_err: @@ -2303,6 +2279,7 @@ static void cxgb_down(struct adapter *adapter) t4_sge_stop(adapter); t4_free_sge_resources(adapter); + adapter->flags &= ~FULL_INIT_DONE; } @@ -2669,7 +2646,7 @@ static void cxgb4_mgmt_fill_vf_station_mac_addr(struct adapter *adap) for (vf = 0, nvfs = pci_sriov_get_totalvfs(adap->pdev); vf < nvfs; vf++) { - macaddr[5] = adap->pf * 16 + vf; + macaddr[5] = adap->pf * nvfs + vf; ether_addr_copy(adap->vfinfo[vf].vf_mac_addr, macaddr); } } @@ -2863,7 +2840,8 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p) return -EADDRNOTAVAIL; ret = t4_change_mac(pi->adapter, pi->adapter->pf, pi->viid, - pi->xact_addr_filt, addr->sa_data, true, true); + pi->xact_addr_filt, addr->sa_data, true, + &pi->smt_idx); if (ret < 0) return ret; @@ -4467,6 +4445,15 @@ static int adap_init0(struct adapter *adap) adap->params.filter2_wr_support = (ret == 0 && val[0] != 0); } + /* Check if FW supports returning vin and smt index. + * If this is not supported, driver will interpret + * these values from viid. + */ + params[0] = FW_PARAM_DEV(OPAQUE_VIID_SMT_EXTN); + ret = t4_query_params(adap, adap->mbox, adap->pf, 0, + 1, params, val); + adap->params.viid_smt_extn_support = (ret == 0 && val[0] != 0); + /* * Get device capabilities so we can determine what resources we need * to manage. @@ -4777,14 +4764,26 @@ static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev) return PCI_ERS_RESULT_DISCONNECT; for_each_port(adap, i) { - struct port_info *p = adap2pinfo(adap, i); + struct port_info *pi = adap2pinfo(adap, i); + u8 vivld = 0, vin = 0; - ret = t4_alloc_vi(adap, adap->mbox, p->tx_chan, adap->pf, 0, 1, - NULL, NULL); + ret = t4_alloc_vi(adap, adap->mbox, pi->tx_chan, adap->pf, 0, 1, + NULL, NULL, &vivld, &vin); if (ret < 0) return PCI_ERS_RESULT_DISCONNECT; - p->viid = ret; - p->xact_addr_filt = -1; + pi->viid = ret; + pi->xact_addr_filt = -1; + /* If fw supports returning the VIN as part of FW_VI_CMD, + * save the returned values. + */ + if (adap->params.viid_smt_extn_support) { + pi->vivld = vivld; + pi->vin = vin; + } else { + /* Retrieve the values from VIID */ + pi->vivld = FW_VIID_VIVLD_G(pi->viid); + pi->vin = FW_VIID_VIN_G(pi->viid); + } } t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd, @@ -5621,6 +5620,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent) (is_t5(adapter->params.chip) ? STATMODE_V(0) : T6_STATMODE_V(0))); + /* Initialize hash mac addr list */ + INIT_LIST_HEAD(&adapter->mac_hlist); + for_each_port(adapter, i) { netdev = alloc_etherdev_mq(sizeof(struct port_info), MAX_ETH_QSETS); @@ -5899,6 +5901,7 @@ fw_attach_fail: static void remove_one(struct pci_dev *pdev) { struct adapter *adapter = pci_get_drvdata(pdev); + struct hash_mac_addr *entry, *tmp; if (!adapter) { pci_release_regions(pdev); @@ -5948,6 +5951,12 @@ static void remove_one(struct pci_dev *pdev) if (adapter->num_uld || adapter->num_ofld_uld) t4_uld_mem_free(adapter); free_some_resources(adapter); + list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, + list) { + list_del(&entry->list); + kfree(entry); + } + #if IS_ENABLED(CONFIG_IPV6) t4_cleanup_clip_tbl(adapter); #endif diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c index 99022c0898b5..4852febbfec3 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c +++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c @@ -495,14 +495,11 @@ u64 cxgb4_select_ntuple(struct net_device *dev, ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift; if (tp->vnic_shift >= 0 && (tp->ingress_config & VNIC_F)) { - u32 viid = cxgb4_port_viid(dev); - u32 vf = FW_VIID_VIN_G(viid); - u32 pf = FW_VIID_PFN_G(viid); - u32 vld = FW_VIID_VIVLD_G(viid); - - ntuple |= (u64)(FT_VNID_ID_VF_V(vf) | - FT_VNID_ID_PF_V(pf) | - FT_VNID_ID_VLD_V(vld)) << tp->vnic_shift; + struct port_info *pi = (struct port_info *)netdev_priv(dev); + + ntuple |= (u64)(FT_VNID_ID_VF_V(pi->vin) | + FT_VNID_ID_PF_V(adap->pf) | + FT_VNID_ID_VLD_V(pi->vivld)) << tp->vnic_shift; } return ntuple; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c index cb523949c812..e8c34292a0ec 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c @@ -5880,7 +5880,6 @@ int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, { int i, ofst = idx * 4; u32 data_reg, mask_reg, cfg; - u32 multitrc = TRCMULTIFILTER_F; if (!enable) { t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0); @@ -5900,7 +5899,6 @@ int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp, * maximum packet capture size of 9600 bytes is recommended. * Also in this mode, only trace0 can be enabled and running. */ - multitrc = 0; if (tp->snap_len > 9600 || idx) return -EINVAL; } @@ -7141,21 +7139,10 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size, unsigned int cache_line_size) { unsigned int page_shift = fls(page_size) - 1; - unsigned int sge_hps = page_shift - 10; unsigned int stat_len = cache_line_size > 64 ? 128 : 64; unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size; unsigned int fl_align_log = fls(fl_align) - 1; - t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A, - HOSTPAGESIZEPF0_V(sge_hps) | - HOSTPAGESIZEPF1_V(sge_hps) | - HOSTPAGESIZEPF2_V(sge_hps) | - HOSTPAGESIZEPF3_V(sge_hps) | - HOSTPAGESIZEPF4_V(sge_hps) | - HOSTPAGESIZEPF5_V(sge_hps) | - HOSTPAGESIZEPF6_V(sge_hps) | - HOSTPAGESIZEPF7_V(sge_hps)); - if (is_t4(adap->params.chip)) { t4_set_reg_field(adap, SGE_CONTROL_A, INGPADBOUNDARY_V(INGPADBOUNDARY_M) | @@ -7488,7 +7475,7 @@ int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf, */ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac, - unsigned int *rss_size) + unsigned int *rss_size, u8 *vivld, u8 *vin) { int ret; struct fw_vi_cmd c; @@ -7523,6 +7510,13 @@ int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port, } if (rss_size) *rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(c.rsssize_pkd)); + + if (vivld) + *vivld = FW_VI_CMD_VFVLD_G(be32_to_cpu(c.alloc_to_len16)); + + if (vin) + *vin = FW_VI_CMD_VIN_G(be32_to_cpu(c.alloc_to_len16)); + return FW_VI_CMD_VIID_G(be16_to_cpu(c.type_viid)); } @@ -7980,7 +7974,7 @@ int t4_free_mac_filt(struct adapter *adap, unsigned int mbox, * MAC value. */ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, - int idx, const u8 *addr, bool persist, bool add_smt) + int idx, const u8 *addr, bool persist, u8 *smt_idx) { int ret, mode; struct fw_vi_mac_cmd c; @@ -7989,7 +7983,7 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, if (idx < 0) /* new allocation */ idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC; - mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; + mode = smt_idx ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY; memset(&c, 0, sizeof(c)); c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) | @@ -8006,6 +8000,23 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid, ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx)); if (ret >= max_mac_addr) ret = -ENOMEM; + if (smt_idx) { + if (adap->params.viid_smt_extn_support) { + *smt_idx = FW_VI_MAC_CMD_SMTID_G + (be32_to_cpu(c.op_to_viid)); + } else { + /* In T4/T5, SMT contains 256 SMAC entries + * organized in 128 rows of 2 entries each. + * In T6, SMT contains 256 SMAC entries in + * 256 rows. + */ + if (CHELSIO_CHIP_VERSION(adap->params.chip) <= + CHELSIO_T5) + *smt_idx = (viid & FW_VIID_VIN_M) << 1; + else + *smt_idx = (viid & FW_VIID_VIN_M); + } + } } return ret; } @@ -8593,7 +8604,7 @@ int t4_get_link_params(struct port_info *pi, unsigned int *link_okp, { unsigned int fw_caps = pi->adapter->params.fw_caps_support; struct fw_port_cmd port_cmd; - unsigned int action, link_ok, speed, mtu; + unsigned int action, link_ok, mtu; fw_port_cap32_t linkattr; int ret; @@ -8627,7 +8638,6 @@ int t4_get_link_params(struct port_info *pi, unsigned int *link_okp, mtu = FW_PORT_CMD_MTU32_G( be32_to_cpu(port_cmd.u.info32.auxlinfo32_mtu32)); } - speed = fwcap_to_speed(linkattr); *link_okp = link_ok; *speedp = fwcap_to_speed(linkattr); @@ -9374,6 +9384,7 @@ int t4_init_portinfo(struct port_info *pi, int mbox, enum fw_port_type port_type; int mdio_addr; fw_port_cap32_t pcaps, acaps; + u8 vivld = 0, vin = 0; int ret; /* If we haven't yet determined whether we're talking to Firmware @@ -9428,7 +9439,8 @@ int t4_init_portinfo(struct port_info *pi, int mbox, acaps = be32_to_cpu(cmd.u.info32.acaps32); } - ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, &rss_size); + ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, &rss_size, + &vivld, &vin); if (ret < 0) return ret; @@ -9437,6 +9449,18 @@ int t4_init_portinfo(struct port_info *pi, int mbox, pi->lport = port; pi->rss_size = rss_size; + /* If fw supports returning the VIN as part of FW_VI_CMD, + * save the returned values. + */ + if (adapter->params.viid_smt_extn_support) { + pi->vivld = vivld; + pi->vin = vin; + } else { + /* Retrieve the values from VIID */ + pi->vivld = FW_VIID_VIVLD_G(pi->viid); + pi->vin = FW_VIID_VIN_G(pi->viid); + } + pi->port_type = port_type; pi->mdio_addr = mdio_addr; pi->mod_type = FW_PORT_MOD_TYPE_NA; diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h index 60df66f4d21c..bf7325f6d553 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h @@ -217,6 +217,7 @@ CH_PCI_DEVICE_ID_TABLE_DEFINE_BEGIN CH_PCI_ID_TABLE_FENTRY(0x6087), /* Custom T6225-CR */ CH_PCI_ID_TABLE_FENTRY(0x6088), /* Custom T62100-CR */ CH_PCI_ID_TABLE_FENTRY(0x6089), /* Custom T62100-KR */ + CH_PCI_ID_TABLE_FENTRY(0x608a), /* Custom T62100-CR */ CH_PCI_DEVICE_ID_TABLE_DEFINE_END; #endif /* __T4_PCI_ID_TBL_H__ */ diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h index 57584ab32043..1d9b3e1e5f94 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h +++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h @@ -1253,6 +1253,7 @@ enum fw_params_param_dev { FW_PARAMS_PARAM_DEV_HMA_SIZE = 0x20, FW_PARAMS_PARAM_DEV_RDMA_WRITE_WITH_IMM = 0x21, FW_PARAMS_PARAM_DEV_RI_WRITE_CMPL_WR = 0x24, + FW_PARAMS_PARAM_DEV_OPAQUE_VIID_SMT_EXTN = 0x27, }; /* @@ -2109,6 +2110,19 @@ struct fw_vi_cmd { #define FW_VI_CMD_FREE_V(x) ((x) << FW_VI_CMD_FREE_S) #define FW_VI_CMD_FREE_F FW_VI_CMD_FREE_V(1U) +#define FW_VI_CMD_VFVLD_S 24 +#define FW_VI_CMD_VFVLD_M 0x1 +#define FW_VI_CMD_VFVLD_V(x) ((x) << FW_VI_CMD_VFVLD_S) +#define FW_VI_CMD_VFVLD_G(x) \ + (((x) >> FW_VI_CMD_VFVLD_S) & FW_VI_CMD_VFVLD_M) +#define FW_VI_CMD_VFVLD_F FW_VI_CMD_VFVLD_V(1U) + +#define FW_VI_CMD_VIN_S 16 +#define FW_VI_CMD_VIN_M 0xff +#define FW_VI_CMD_VIN_V(x) ((x) << FW_VI_CMD_VIN_S) +#define FW_VI_CMD_VIN_G(x) \ + (((x) >> FW_VI_CMD_VIN_S) & FW_VI_CMD_VIN_M) + #define FW_VI_CMD_VIID_S 0 #define FW_VI_CMD_VIID_M 0xfff #define FW_VI_CMD_VIID_V(x) ((x) << FW_VI_CMD_VIID_S) @@ -2182,6 +2196,12 @@ struct fw_vi_mac_cmd { } u; }; +#define FW_VI_MAC_CMD_SMTID_S 12 +#define FW_VI_MAC_CMD_SMTID_M 0xff +#define FW_VI_MAC_CMD_SMTID_V(x) ((x) << FW_VI_MAC_CMD_SMTID_S) +#define FW_VI_MAC_CMD_SMTID_G(x) \ + (((x) >> FW_VI_MAC_CMD_SMTID_S) & FW_VI_MAC_CMD_SMTID_M) + #define FW_VI_MAC_CMD_VIID_S 0 #define FW_VI_MAC_CMD_VIID_V(x) ((x) << FW_VI_MAC_CMD_VIID_S) diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index ff84791a0ff8..2fab87e86561 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -722,6 +722,7 @@ static int adapter_up(struct adapter *adapter) if (adapter->flags & USING_MSIX) name_msix_vecs(adapter); + adapter->flags |= FULL_INIT_DONE; } @@ -747,8 +748,6 @@ static int adapter_up(struct adapter *adapter) enable_rx(adapter); t4vf_sge_start(adapter); - /* Initialize hash mac addr list*/ - INIT_LIST_HEAD(&adapter->mac_hlist); return 0; } @@ -2324,19 +2323,7 @@ static int resources_show(struct seq_file *seq, void *v) return 0; } - -static int resources_open(struct inode *inode, struct file *file) -{ - return single_open(file, resources_show, inode->i_private); -} - -static const struct file_operations resources_proc_fops = { - .owner = THIS_MODULE, - .open = resources_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(resources); /* * Show Virtual Interfaces. @@ -2420,7 +2407,7 @@ static struct cxgb4vf_debugfs_entry debugfs_files[] = { { "mboxlog", 0444, &mboxlog_fops }, { "sge_qinfo", 0444, &sge_qinfo_debugfs_fops }, { "sge_qstats", 0444, &sge_qstats_proc_fops }, - { "resources", 0444, &resources_proc_fops }, + { "resources", 0444, &resources_fops }, { "interfaces", 0444, &interfaces_proc_fops }, }; @@ -3036,6 +3023,9 @@ static int cxgb4vf_pci_probe(struct pci_dev *pdev, if (err) goto err_unmap_bar; + /* Initialize hash mac addr list */ + INIT_LIST_HEAD(&adapter->mac_hlist); + /* * Allocate our "adapter ports" and stitch everything together. */ @@ -3287,6 +3277,7 @@ err_disable_device: static void cxgb4vf_pci_remove(struct pci_dev *pdev) { struct adapter *adapter = pci_get_drvdata(pdev); + struct hash_mac_addr *entry, *tmp; /* * Tear down driver state associated with device. @@ -3337,6 +3328,11 @@ static void cxgb4vf_pci_remove(struct pci_dev *pdev) if (!is_t4(adapter->params.chip)) iounmap(adapter->bar2); kfree(adapter->mbox_log); + list_for_each_entry_safe(entry, tmp, &adapter->mac_hlist, + list) { + list_del(&entry->list); + kfree(entry); + } kfree(adapter); } diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig index ec0b545197e2..e9a0213b08c4 100644 --- a/drivers/net/ethernet/cirrus/Kconfig +++ b/drivers/net/ethernet/cirrus/Kconfig @@ -23,7 +23,7 @@ config CS89x0 ---help--- Support for CS89x0 chipset based Ethernet cards. If you have a network (Ethernet) card of this type, say Y and read the file - <file:Documentation/networking/cs89x0.txt>. + <file:Documentation/networking/device_drivers/cirrus/cs89x0.txt>. To compile this driver as a module, choose M here. The module will be called cs89x0. diff --git a/drivers/net/ethernet/cisco/enic/enic_ethtool.c b/drivers/net/ethernet/cisco/enic/enic_ethtool.c index f42f7a6e1559..ebd5c2cf1efe 100644 --- a/drivers/net/ethernet/cisco/enic/enic_ethtool.c +++ b/drivers/net/ethernet/cisco/enic/enic_ethtool.c @@ -241,7 +241,7 @@ static int enic_set_ringparam(struct net_device *netdev, } enic_init_vnic_resources(enic); if (running) { - err = dev_open(netdev); + err = dev_open(netdev, NULL); if (err) goto err_out; } diff --git a/drivers/net/ethernet/dec/tulip/Kconfig b/drivers/net/ethernet/dec/tulip/Kconfig index 1003201b5d80..264e9b413e94 100644 --- a/drivers/net/ethernet/dec/tulip/Kconfig +++ b/drivers/net/ethernet/dec/tulip/Kconfig @@ -113,7 +113,7 @@ config DE4X5 These include the DE425, DE434, DE435, DE450 and DE500 models. If you have a network card of this type, say Y. More specific information is contained in - <file:Documentation/networking/de4x5.txt>. + <file:Documentation/networking/device_drivers/dec/de4x5.txt>. To compile this driver as a module, choose M here. The module will be called de4x5. @@ -137,7 +137,7 @@ config DM9102 This driver is for DM9102(A)/DM9132/DM9801 compatible PCI cards from Davicom (<http://www.davicom.com.tw/>). If you have such a network (Ethernet) card, say Y. Some information is contained in the file - <file:Documentation/networking/dmfe.txt>. + <file:Documentation/networking/device_drivers/dec/dmfe.txt>. To compile this driver as a module, choose M here. The module will be called dmfe. diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c index f0536b16b3c3..d8d423f22c4f 100644 --- a/drivers/net/ethernet/dlink/dl2k.c +++ b/drivers/net/ethernet/dlink/dl2k.c @@ -1881,7 +1881,7 @@ Compile command: gcc -D__KERNEL__ -DMODULE -I/usr/src/linux/include -Wall -Wstrict-prototypes -O2 -c dl2k.c -Read Documentation/networking/dl2k.txt for details. +Read Documentation/networking/device_drivers/dlink/dl2k.txt for details. */ diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index c5ad7a4f4d83..852f5bfe5f6d 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -796,7 +796,7 @@ static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter, u16 vlan_tag; vlan_tag = skb_vlan_tag_get(skb); - vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT; + vlan_prio = skb_vlan_tag_get_prio(skb); /* If vlan priority provided by OS is NOT in available bmap */ if (!(adapter->vlan_prio_bmap & (1 << vlan_prio))) vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) | @@ -1049,30 +1049,35 @@ static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter, struct be_wrb_params *wrb_params) { + bool insert_vlan = false; u16 vlan_tag = 0; skb = skb_share_check(skb, GFP_ATOMIC); if (unlikely(!skb)) return skb; - if (skb_vlan_tag_present(skb)) + if (skb_vlan_tag_present(skb)) { vlan_tag = be_get_tx_vlan_tag(adapter, skb); + insert_vlan = true; + } if (qnq_async_evt_rcvd(adapter) && adapter->pvid) { - if (!vlan_tag) + if (!insert_vlan) { vlan_tag = adapter->pvid; + insert_vlan = true; + } /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to * skip VLAN insertion */ BE_WRB_F_SET(wrb_params->features, VLAN_SKIP_HW, 1); } - if (vlan_tag) { + if (insert_vlan) { skb = vlan_insert_tag_set_proto(skb, htons(ETH_P_8021Q), vlan_tag); if (unlikely(!skb)) return skb; - skb->vlan_tci = 0; + __vlan_hwaccel_clear_tag(skb); } /* Insert the outer VLAN, if any */ @@ -4950,7 +4955,7 @@ fw_exit: } static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, - u16 flags) + u16 flags, struct netlink_ext_ack *extack) { struct be_adapter *adapter = netdev_priv(dev); struct nlattr *attr, *br_spec; diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 6e0f47f2c8a3..f53090cde041 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -51,9 +51,9 @@ #include <linux/percpu.h> #include <linux/dma-mapping.h> #include <linux/sort.h> +#include <linux/phy_fixed.h> #include <soc/fsl/bman.h> #include <soc/fsl/qman.h> - #include "fman.h" #include "fman_port.h" #include "mac.h" @@ -2475,6 +2475,7 @@ static void dpaa_adjust_link(struct net_device *net_dev) static int dpaa_phy_init(struct net_device *net_dev) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; struct mac_device *mac_dev; struct phy_device *phy_dev; struct dpaa_priv *priv; @@ -2491,7 +2492,9 @@ static int dpaa_phy_init(struct net_device *net_dev) } /* Remove any features not supported by the controller */ - phy_dev->supported &= mac_dev->if_support; + ethtool_convert_legacy_u32_to_link_mode(mask, mac_dev->if_support); + linkmode_and(phy_dev->supported, phy_dev->supported, mask); + phy_support_asym_pause(phy_dev); mac_dev->phy_dev = phy_dev; @@ -2613,6 +2616,7 @@ static const struct net_device_ops dpaa_ops = { .ndo_stop = dpaa_eth_stop, .ndo_tx_timeout = dpaa_tx_timeout, .ndo_get_stats64 = dpaa_get_stats64, + .ndo_change_carrier = fixed_phy_change_carrier, .ndo_set_mac_address = dpaa_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_set_rx_mode = dpaa_set_rx_mode, diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c index 13d6e2272ece..62497119c85f 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c @@ -529,6 +529,75 @@ static int dpaa_get_ts_info(struct net_device *net_dev, return 0; } +static int dpaa_get_coalesce(struct net_device *dev, + struct ethtool_coalesce *c) +{ + struct qman_portal *portal; + u32 period; + u8 thresh; + + portal = qman_get_affine_portal(smp_processor_id()); + qman_portal_get_iperiod(portal, &period); + qman_dqrr_get_ithresh(portal, &thresh); + + c->rx_coalesce_usecs = period; + c->rx_max_coalesced_frames = thresh; + c->use_adaptive_rx_coalesce = false; + + return 0; +} + +static int dpaa_set_coalesce(struct net_device *dev, + struct ethtool_coalesce *c) +{ + const cpumask_t *cpus = qman_affine_cpus(); + bool needs_revert[NR_CPUS] = {false}; + struct qman_portal *portal; + u32 period, prev_period; + u8 thresh, prev_thresh; + int cpu, res; + + if (c->use_adaptive_rx_coalesce) + return -EINVAL; + + period = c->rx_coalesce_usecs; + thresh = c->rx_max_coalesced_frames; + + /* save previous values */ + portal = qman_get_affine_portal(smp_processor_id()); + qman_portal_get_iperiod(portal, &prev_period); + qman_dqrr_get_ithresh(portal, &prev_thresh); + + /* set new values */ + for_each_cpu(cpu, cpus) { + portal = qman_get_affine_portal(cpu); + res = qman_portal_set_iperiod(portal, period); + if (res) + goto revert_values; + res = qman_dqrr_set_ithresh(portal, thresh); + if (res) { + qman_portal_set_iperiod(portal, prev_period); + goto revert_values; + } + needs_revert[cpu] = true; + } + + return 0; + +revert_values: + /* restore previous values */ + for_each_cpu(cpu, cpus) { + if (!needs_revert[cpu]) + continue; + portal = qman_get_affine_portal(cpu); + /* previous values will not fail, ignore return value */ + qman_portal_set_iperiod(portal, prev_period); + qman_dqrr_set_ithresh(portal, prev_thresh); + } + + return res; +} + const struct ethtool_ops dpaa_ethtool_ops = { .get_drvinfo = dpaa_get_drvinfo, .get_msglevel = dpaa_get_msglevel, @@ -545,4 +614,6 @@ const struct ethtool_ops dpaa_ethtool_ops = { .get_rxnfc = dpaa_get_rxnfc, .set_rxnfc = dpaa_set_rxnfc, .get_ts_info = dpaa_get_ts_info, + .get_coalesce = dpaa_get_coalesce, + .set_coalesce = dpaa_set_coalesce, }; diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c index 88f7acce38dc..1ca9a18139ec 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c @@ -13,7 +13,8 @@ #include <linux/iommu.h> #include <linux/net_tstamp.h> #include <linux/fsl/mc.h> - +#include <linux/bpf.h> +#include <linux/bpf_trace.h> #include <net/sock.h> #include "dpaa2-eth.h" @@ -86,7 +87,7 @@ static void free_rx_fd(struct dpaa2_eth_priv *priv, addr = dpaa2_sg_get_addr(&sgt[i]); sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, - DMA_FROM_DEVICE); + DMA_BIDIRECTIONAL); skb_free_frag(sg_vaddr); if (dpaa2_sg_is_final(&sgt[i])) @@ -144,7 +145,7 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv, sg_addr = dpaa2_sg_get_addr(sge); sg_vaddr = dpaa2_iova_to_virt(priv->iommu_domain, sg_addr); dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE, - DMA_FROM_DEVICE); + DMA_BIDIRECTIONAL); sg_length = dpaa2_sg_get_len(sge); @@ -199,12 +200,148 @@ static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv, return skb; } +/* Free buffers acquired from the buffer pool or which were meant to + * be released in the pool + */ +static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count) +{ + struct device *dev = priv->net_dev->dev.parent; + void *vaddr; + int i; + + for (i = 0; i < count; i++) { + vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]); + dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE, + DMA_BIDIRECTIONAL); + skb_free_frag(vaddr); + } +} + +static void xdp_release_buf(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + dma_addr_t addr) +{ + int err; + + ch->xdp.drop_bufs[ch->xdp.drop_cnt++] = addr; + if (ch->xdp.drop_cnt < DPAA2_ETH_BUFS_PER_CMD) + return; + + while ((err = dpaa2_io_service_release(ch->dpio, priv->bpid, + ch->xdp.drop_bufs, + ch->xdp.drop_cnt)) == -EBUSY) + cpu_relax(); + + if (err) { + free_bufs(priv, ch->xdp.drop_bufs, ch->xdp.drop_cnt); + ch->buf_count -= ch->xdp.drop_cnt; + } + + ch->xdp.drop_cnt = 0; +} + +static int xdp_enqueue(struct dpaa2_eth_priv *priv, struct dpaa2_fd *fd, + void *buf_start, u16 queue_id) +{ + struct dpaa2_eth_fq *fq; + struct dpaa2_faead *faead; + u32 ctrl, frc; + int i, err; + + /* Mark the egress frame hardware annotation area as valid */ + frc = dpaa2_fd_get_frc(fd); + dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV); + dpaa2_fd_set_ctrl(fd, DPAA2_FD_CTRL_ASAL); + + /* Instruct hardware to release the FD buffer directly into + * the buffer pool once transmission is completed, instead of + * sending a Tx confirmation frame to us + */ + ctrl = DPAA2_FAEAD_A4V | DPAA2_FAEAD_A2V | DPAA2_FAEAD_EBDDV; + faead = dpaa2_get_faead(buf_start, false); + faead->ctrl = cpu_to_le32(ctrl); + faead->conf_fqid = 0; + + fq = &priv->fq[queue_id]; + for (i = 0; i < DPAA2_ETH_ENQUEUE_RETRIES; i++) { + err = dpaa2_io_service_enqueue_qd(fq->channel->dpio, + priv->tx_qdid, 0, + fq->tx_qdbin, fd); + if (err != -EBUSY) + break; + } + + return err; +} + +static u32 run_xdp(struct dpaa2_eth_priv *priv, + struct dpaa2_eth_channel *ch, + struct dpaa2_eth_fq *rx_fq, + struct dpaa2_fd *fd, void *vaddr) +{ + dma_addr_t addr = dpaa2_fd_get_addr(fd); + struct rtnl_link_stats64 *percpu_stats; + struct bpf_prog *xdp_prog; + struct xdp_buff xdp; + u32 xdp_act = XDP_PASS; + int err; + + percpu_stats = this_cpu_ptr(priv->percpu_stats); + + rcu_read_lock(); + + xdp_prog = READ_ONCE(ch->xdp.prog); + if (!xdp_prog) + goto out; + + xdp.data = vaddr + dpaa2_fd_get_offset(fd); + xdp.data_end = xdp.data + dpaa2_fd_get_len(fd); + xdp.data_hard_start = xdp.data - XDP_PACKET_HEADROOM; + xdp_set_data_meta_invalid(&xdp); + + xdp_act = bpf_prog_run_xdp(xdp_prog, &xdp); + + /* xdp.data pointer may have changed */ + dpaa2_fd_set_offset(fd, xdp.data - vaddr); + dpaa2_fd_set_len(fd, xdp.data_end - xdp.data); + + switch (xdp_act) { + case XDP_PASS: + break; + case XDP_TX: + err = xdp_enqueue(priv, fd, vaddr, rx_fq->flowid); + if (err) { + xdp_release_buf(priv, ch, addr); + percpu_stats->tx_errors++; + ch->stats.xdp_tx_err++; + } else { + percpu_stats->tx_packets++; + percpu_stats->tx_bytes += dpaa2_fd_get_len(fd); + ch->stats.xdp_tx++; + } + break; + default: + bpf_warn_invalid_xdp_action(xdp_act); + /* fall through */ + case XDP_ABORTED: + trace_xdp_exception(priv->net_dev, xdp_prog, xdp_act); + /* fall through */ + case XDP_DROP: + xdp_release_buf(priv, ch, addr); + ch->stats.xdp_drop++; + break; + } + +out: + rcu_read_unlock(); + return xdp_act; +} + /* Main Rx frame processing routine */ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, struct dpaa2_eth_channel *ch, const struct dpaa2_fd *fd, - struct napi_struct *napi, - u16 queue_id) + struct dpaa2_eth_fq *fq) { dma_addr_t addr = dpaa2_fd_get_addr(fd); u8 fd_format = dpaa2_fd_get_format(fd); @@ -216,12 +353,14 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, struct dpaa2_fas *fas; void *buf_data; u32 status = 0; + u32 xdp_act; /* Tracing point */ trace_dpaa2_rx_fd(priv->net_dev, fd); vaddr = dpaa2_iova_to_virt(priv->iommu_domain, addr); - dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE); + dma_sync_single_for_cpu(dev, addr, DPAA2_ETH_RX_BUF_SIZE, + DMA_BIDIRECTIONAL); fas = dpaa2_get_fas(vaddr, false); prefetch(fas); @@ -232,8 +371,21 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, percpu_extras = this_cpu_ptr(priv->percpu_extras); if (fd_format == dpaa2_fd_single) { + xdp_act = run_xdp(priv, ch, fq, (struct dpaa2_fd *)fd, vaddr); + if (xdp_act != XDP_PASS) { + percpu_stats->rx_packets++; + percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); + return; + } + + dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, + DMA_BIDIRECTIONAL); skb = build_linear_skb(ch, fd, vaddr); } else if (fd_format == dpaa2_fd_sg) { + WARN_ON(priv->xdp_prog); + + dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, + DMA_BIDIRECTIONAL); skb = build_frag_skb(priv, ch, buf_data); skb_free_frag(vaddr); percpu_extras->rx_sg_frames++; @@ -267,12 +419,12 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, } skb->protocol = eth_type_trans(skb, priv->net_dev); - skb_record_rx_queue(skb, queue_id); + skb_record_rx_queue(skb, fq->flowid); percpu_stats->rx_packets++; percpu_stats->rx_bytes += dpaa2_fd_get_len(fd); - napi_gro_receive(napi, skb); + napi_gro_receive(&ch->napi, skb); return; @@ -289,7 +441,7 @@ err_frame_format: * Observance of NAPI budget is not our concern, leaving that to the caller. */ static int consume_frames(struct dpaa2_eth_channel *ch, - enum dpaa2_eth_fq_type *type) + struct dpaa2_eth_fq **src) { struct dpaa2_eth_priv *priv = ch->priv; struct dpaa2_eth_fq *fq = NULL; @@ -312,7 +464,7 @@ static int consume_frames(struct dpaa2_eth_channel *ch, fd = dpaa2_dq_fd(dq); fq = (struct dpaa2_eth_fq *)(uintptr_t)dpaa2_dq_fqd_ctx(dq); - fq->consume(priv, ch, fd, &ch->napi, fq->flowid); + fq->consume(priv, ch, fd, fq); cleaned++; } while (!is_last); @@ -320,13 +472,12 @@ static int consume_frames(struct dpaa2_eth_channel *ch, return 0; fq->stats.frames += cleaned; - ch->stats.frames += cleaned; /* A dequeue operation only pulls frames from a single queue - * into the store. Return the frame queue type as an out param. + * into the store. Return the frame queue as an out param. */ - if (type) - *type = fq->type; + if (src) + *src = fq; return cleaned; } @@ -571,8 +722,10 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) struct rtnl_link_stats64 *percpu_stats; struct dpaa2_eth_drv_stats *percpu_extras; struct dpaa2_eth_fq *fq; + struct netdev_queue *nq; u16 queue_mapping; unsigned int needed_headroom; + u32 fd_len; int err, i; percpu_stats = this_cpu_ptr(priv->percpu_stats); @@ -644,8 +797,12 @@ static netdev_tx_t dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) /* Clean up everything, including freeing the skb */ free_tx_fd(priv, &fd); } else { + fd_len = dpaa2_fd_get_len(&fd); percpu_stats->tx_packets++; - percpu_stats->tx_bytes += dpaa2_fd_get_len(&fd); + percpu_stats->tx_bytes += fd_len; + + nq = netdev_get_tx_queue(net_dev, queue_mapping); + netdev_tx_sent_queue(nq, fd_len); } return NETDEV_TX_OK; @@ -661,11 +818,11 @@ err_alloc_headroom: static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv, struct dpaa2_eth_channel *ch __always_unused, const struct dpaa2_fd *fd, - struct napi_struct *napi __always_unused, - u16 queue_id __always_unused) + struct dpaa2_eth_fq *fq) { struct rtnl_link_stats64 *percpu_stats; struct dpaa2_eth_drv_stats *percpu_extras; + u32 fd_len = dpaa2_fd_get_len(fd); u32 fd_errors; /* Tracing point */ @@ -673,7 +830,10 @@ static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv, percpu_extras = this_cpu_ptr(priv->percpu_extras); percpu_extras->tx_conf_frames++; - percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd); + percpu_extras->tx_conf_bytes += fd_len; + + fq->dq_frames++; + fq->dq_bytes += fd_len; /* Check frame errors in the FD field */ fd_errors = dpaa2_fd_get_ctrl(fd) & DPAA2_FD_TX_ERR_MASK; @@ -735,23 +895,6 @@ static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable) return 0; } -/* Free buffers acquired from the buffer pool or which were meant to - * be released in the pool - */ -static void free_bufs(struct dpaa2_eth_priv *priv, u64 *buf_array, int count) -{ - struct device *dev = priv->net_dev->dev.parent; - void *vaddr; - int i; - - for (i = 0; i < count; i++) { - vaddr = dpaa2_iova_to_virt(priv->iommu_domain, buf_array[i]); - dma_unmap_single(dev, buf_array[i], DPAA2_ETH_RX_BUF_SIZE, - DMA_FROM_DEVICE); - skb_free_frag(vaddr); - } -} - /* Perform a single release command to add buffers * to the specified buffer pool */ @@ -775,7 +918,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv, buf = PTR_ALIGN(buf, priv->rx_buf_align); addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE, - DMA_FROM_DEVICE); + DMA_BIDIRECTIONAL); if (unlikely(dma_mapping_error(dev, addr))) goto err_map; @@ -934,8 +1077,9 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget) struct dpaa2_eth_channel *ch; struct dpaa2_eth_priv *priv; int rx_cleaned = 0, txconf_cleaned = 0; - enum dpaa2_eth_fq_type type = 0; - int store_cleaned; + struct dpaa2_eth_fq *fq, *txc_fq = NULL; + struct netdev_queue *nq; + int store_cleaned, work_done; int err; ch = container_of(napi, struct dpaa2_eth_channel, napi); @@ -949,18 +1093,25 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget) /* Refill pool if appropriate */ refill_pool(priv, ch, priv->bpid); - store_cleaned = consume_frames(ch, &type); - if (type == DPAA2_RX_FQ) + store_cleaned = consume_frames(ch, &fq); + if (!store_cleaned) + break; + if (fq->type == DPAA2_RX_FQ) { rx_cleaned += store_cleaned; - else + } else { txconf_cleaned += store_cleaned; + /* We have a single Tx conf FQ on this channel */ + txc_fq = fq; + } /* If we either consumed the whole NAPI budget with Rx frames * or we reached the Tx confirmations threshold, we're done. */ if (rx_cleaned >= budget || - txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) - return budget; + txconf_cleaned >= DPAA2_ETH_TXCONF_PER_NAPI) { + work_done = budget; + goto out; + } } while (store_cleaned); /* We didn't consume the entire budget, so finish napi and @@ -974,7 +1125,18 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget) WARN_ONCE(err, "CDAN notifications rearm failed on core %d", ch->nctx.desired_cpu); - return max(rx_cleaned, 1); + work_done = max(rx_cleaned, 1); + +out: + if (txc_fq) { + nq = netdev_get_tx_queue(priv->net_dev, txc_fq->flowid); + netdev_tx_completed_queue(nq, txc_fq->dq_frames, + txc_fq->dq_bytes); + txc_fq->dq_frames = 0; + txc_fq->dq_bytes = 0; + } + + return work_done; } static void enable_ch_napi(struct dpaa2_eth_priv *priv) @@ -1400,6 +1562,174 @@ static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) return -EINVAL; } +static bool xdp_mtu_valid(struct dpaa2_eth_priv *priv, int mtu) +{ + int mfl, linear_mfl; + + mfl = DPAA2_ETH_L2_MAX_FRM(mtu); + linear_mfl = DPAA2_ETH_RX_BUF_SIZE - DPAA2_ETH_RX_HWA_SIZE - + dpaa2_eth_rx_head_room(priv) - XDP_PACKET_HEADROOM; + + if (mfl > linear_mfl) { + netdev_warn(priv->net_dev, "Maximum MTU for XDP is %d\n", + linear_mfl - VLAN_ETH_HLEN); + return false; + } + + return true; +} + +static int set_rx_mfl(struct dpaa2_eth_priv *priv, int mtu, bool has_xdp) +{ + int mfl, err; + + /* We enforce a maximum Rx frame length based on MTU only if we have + * an XDP program attached (in order to avoid Rx S/G frames). + * Otherwise, we accept all incoming frames as long as they are not + * larger than maximum size supported in hardware + */ + if (has_xdp) + mfl = DPAA2_ETH_L2_MAX_FRM(mtu); + else + mfl = DPAA2_ETH_MFL; + + err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, mfl); + if (err) { + netdev_err(priv->net_dev, "dpni_set_max_frame_length failed\n"); + return err; + } + + return 0; +} + +static int dpaa2_eth_change_mtu(struct net_device *dev, int new_mtu) +{ + struct dpaa2_eth_priv *priv = netdev_priv(dev); + int err; + + if (!priv->xdp_prog) + goto out; + + if (!xdp_mtu_valid(priv, new_mtu)) + return -EINVAL; + + err = set_rx_mfl(priv, new_mtu, true); + if (err) + return err; + +out: + dev->mtu = new_mtu; + return 0; +} + +static int update_rx_buffer_headroom(struct dpaa2_eth_priv *priv, bool has_xdp) +{ + struct dpni_buffer_layout buf_layout = {0}; + int err; + + err = dpni_get_buffer_layout(priv->mc_io, 0, priv->mc_token, + DPNI_QUEUE_RX, &buf_layout); + if (err) { + netdev_err(priv->net_dev, "dpni_get_buffer_layout failed\n"); + return err; + } + + /* Reserve extra headroom for XDP header size changes */ + buf_layout.data_head_room = dpaa2_eth_rx_head_room(priv) + + (has_xdp ? XDP_PACKET_HEADROOM : 0); + buf_layout.options = DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM; + err = dpni_set_buffer_layout(priv->mc_io, 0, priv->mc_token, + DPNI_QUEUE_RX, &buf_layout); + if (err) { + netdev_err(priv->net_dev, "dpni_set_buffer_layout failed\n"); + return err; + } + + return 0; +} + +static int setup_xdp(struct net_device *dev, struct bpf_prog *prog) +{ + struct dpaa2_eth_priv *priv = netdev_priv(dev); + struct dpaa2_eth_channel *ch; + struct bpf_prog *old; + bool up, need_update; + int i, err; + + if (prog && !xdp_mtu_valid(priv, dev->mtu)) + return -EINVAL; + + if (prog) { + prog = bpf_prog_add(prog, priv->num_channels); + if (IS_ERR(prog)) + return PTR_ERR(prog); + } + + up = netif_running(dev); + need_update = (!!priv->xdp_prog != !!prog); + + if (up) + dpaa2_eth_stop(dev); + + /* While in xdp mode, enforce a maximum Rx frame size based on MTU. + * Also, when switching between xdp/non-xdp modes we need to reconfigure + * our Rx buffer layout. Buffer pool was drained on dpaa2_eth_stop, + * so we are sure no old format buffers will be used from now on. + */ + if (need_update) { + err = set_rx_mfl(priv, dev->mtu, !!prog); + if (err) + goto out_err; + err = update_rx_buffer_headroom(priv, !!prog); + if (err) + goto out_err; + } + + old = xchg(&priv->xdp_prog, prog); + if (old) + bpf_prog_put(old); + + for (i = 0; i < priv->num_channels; i++) { + ch = priv->channel[i]; + old = xchg(&ch->xdp.prog, prog); + if (old) + bpf_prog_put(old); + } + + if (up) { + err = dpaa2_eth_open(dev); + if (err) + return err; + } + + return 0; + +out_err: + if (prog) + bpf_prog_sub(prog, priv->num_channels); + if (up) + dpaa2_eth_open(dev); + + return err; +} + +static int dpaa2_eth_xdp(struct net_device *dev, struct netdev_bpf *xdp) +{ + struct dpaa2_eth_priv *priv = netdev_priv(dev); + + switch (xdp->command) { + case XDP_SETUP_PROG: + return setup_xdp(dev, xdp->prog); + case XDP_QUERY_PROG: + xdp->prog_id = priv->xdp_prog ? priv->xdp_prog->aux->id : 0; + break; + default: + return -EINVAL; + } + + return 0; +} + static const struct net_device_ops dpaa2_eth_ops = { .ndo_open = dpaa2_eth_open, .ndo_start_xmit = dpaa2_eth_tx, @@ -1409,6 +1739,8 @@ static const struct net_device_ops dpaa2_eth_ops = { .ndo_set_rx_mode = dpaa2_eth_set_rx_mode, .ndo_set_features = dpaa2_eth_set_features, .ndo_do_ioctl = dpaa2_eth_ioctl, + .ndo_change_mtu = dpaa2_eth_change_mtu, + .ndo_bpf = dpaa2_eth_xdp, }; static void cdan_cb(struct dpaa2_io_notification_ctx *ctx) @@ -1434,8 +1766,11 @@ static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv) err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPCON, &dpcon); if (err) { - dev_info(dev, "Not enough DPCONs, will go on as-is\n"); - return NULL; + if (err == -ENXIO) + err = -EPROBE_DEFER; + else + dev_info(dev, "Not enough DPCONs, will go on as-is\n"); + return ERR_PTR(err); } err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle); @@ -1493,8 +1828,10 @@ alloc_channel(struct dpaa2_eth_priv *priv) return NULL; channel->dpcon = setup_dpcon(priv); - if (!channel->dpcon) + if (IS_ERR_OR_NULL(channel->dpcon)) { + err = PTR_ERR(channel->dpcon); goto err_setup; + } err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle, &attr); @@ -1513,7 +1850,7 @@ err_get_attr: free_dpcon(priv, channel->dpcon); err_setup: kfree(channel); - return NULL; + return ERR_PTR(err); } static void free_channel(struct dpaa2_eth_priv *priv, @@ -1547,10 +1884,11 @@ static int setup_dpio(struct dpaa2_eth_priv *priv) for_each_online_cpu(i) { /* Try to allocate a channel */ channel = alloc_channel(priv); - if (!channel) { - dev_info(dev, - "No affine channel for cpu %d and above\n", i); - err = -ENODEV; + if (IS_ERR_OR_NULL(channel)) { + err = PTR_ERR(channel); + if (err != -EPROBE_DEFER) + dev_info(dev, + "No affine channel for cpu %d and above\n", i); goto err_alloc_ch; } @@ -1597,7 +1935,7 @@ static int setup_dpio(struct dpaa2_eth_priv *priv) /* Stop if we already have enough channels to accommodate all * RX and TX conf queues */ - if (priv->num_channels == dpaa2_eth_queue_count(priv)) + if (priv->num_channels == priv->dpni_attrs.num_queues) break; } @@ -1608,9 +1946,12 @@ err_set_cdan: err_service_reg: free_channel(priv, channel); err_alloc_ch: + if (err == -EPROBE_DEFER) + return err; + if (cpumask_empty(&priv->dpio_cpumask)) { dev_err(dev, "No cpu with an affine DPIO/DPCON\n"); - return err; + return -ENODEV; } dev_info(dev, "Cores %*pbl available for processing ingress traffic\n", @@ -1732,7 +2073,10 @@ static int setup_dpbp(struct dpaa2_eth_priv *priv) err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP, &dpbp_dev); if (err) { - dev_err(dev, "DPBP device allocation failed\n"); + if (err == -ENXIO) + err = -EPROBE_DEFER; + else + dev_err(dev, "DPBP device allocation failed\n"); return err; } diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h index 452a8e9c4f0e..69c965de192b 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.h @@ -139,7 +139,9 @@ struct dpaa2_faead { }; #define DPAA2_FAEAD_A2V 0x20000000 +#define DPAA2_FAEAD_A4V 0x08000000 #define DPAA2_FAEAD_UPDV 0x00001000 +#define DPAA2_FAEAD_EBDDV 0x00002000 #define DPAA2_FAEAD_UPD 0x00000010 /* Accessors for the hardware annotation fields that we use */ @@ -243,12 +245,14 @@ struct dpaa2_eth_fq_stats { struct dpaa2_eth_ch_stats { /* Volatile dequeues retried due to portal busy */ __u64 dequeue_portal_busy; - /* Number of CDANs; useful to estimate avg NAPI len */ - __u64 cdan; - /* Number of frames received on queues from this channel */ - __u64 frames; /* Pull errors */ __u64 pull_err; + /* Number of CDANs; useful to estimate avg NAPI len */ + __u64 cdan; + /* XDP counters */ + __u64 xdp_drop; + __u64 xdp_tx; + __u64 xdp_tx_err; }; /* Maximum number of queues associated with a DPNI */ @@ -271,17 +275,24 @@ struct dpaa2_eth_fq { u32 tx_qdbin; u16 flowid; int target_cpu; + u32 dq_frames; + u32 dq_bytes; struct dpaa2_eth_channel *channel; enum dpaa2_eth_fq_type type; void (*consume)(struct dpaa2_eth_priv *priv, struct dpaa2_eth_channel *ch, const struct dpaa2_fd *fd, - struct napi_struct *napi, - u16 queue_id); + struct dpaa2_eth_fq *fq); struct dpaa2_eth_fq_stats stats; }; +struct dpaa2_eth_ch_xdp { + struct bpf_prog *prog; + u64 drop_bufs[DPAA2_ETH_BUFS_PER_CMD]; + int drop_cnt; +}; + struct dpaa2_eth_channel { struct dpaa2_io_notification_ctx nctx; struct fsl_mc_device *dpcon; @@ -293,6 +304,7 @@ struct dpaa2_eth_channel { struct dpaa2_eth_priv *priv; int buf_count; struct dpaa2_eth_ch_stats stats; + struct dpaa2_eth_ch_xdp xdp; }; struct dpaa2_eth_dist_fields { @@ -352,6 +364,7 @@ struct dpaa2_eth_priv { u64 rx_hash_fields; struct dpaa2_eth_cls_rule *cls_rules; u8 rx_cls_enabled; + struct bpf_prog *xdp_prog; }; #define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \ @@ -434,9 +447,10 @@ static inline unsigned int dpaa2_eth_rx_head_room(struct dpaa2_eth_priv *priv) DPAA2_ETH_RX_HWA_SIZE; } +/* We have exactly one {Rx, Tx conf} queue per channel */ static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv) { - return priv->dpni_attrs.num_queues; + return priv->num_channels; } int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags); diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c index 26bd5a2bd8ed..a7389e722c49 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ethtool.c @@ -45,6 +45,15 @@ static char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = { "[drv] dequeue portal busy", "[drv] channel pull errors", "[drv] cdan", + "[drv] xdp drop", + "[drv] xdp tx", + "[drv] xdp tx errors", + /* FQ stats */ + "[qbman] rx pending frames", + "[qbman] rx pending bytes", + "[qbman] tx conf pending frames", + "[qbman] tx conf pending bytes", + "[qbman] buffer count", }; #define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras) @@ -174,8 +183,10 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, int j, k, err; int num_cnt; union dpni_statistics dpni_stats; - u64 cdan = 0; - u64 portal_busy = 0, pull_err = 0; + u32 fcnt, bcnt; + u32 fcnt_rx_total = 0, fcnt_tx_total = 0; + u32 bcnt_rx_total = 0, bcnt_tx_total = 0; + u32 buf_cnt; struct dpaa2_eth_priv *priv = netdev_priv(net_dev); struct dpaa2_eth_drv_stats *extras; struct dpaa2_eth_ch_stats *ch_stats; @@ -212,16 +223,43 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, } i += j; - for (j = 0; j < priv->num_channels; j++) { - ch_stats = &priv->channel[j]->stats; - cdan += ch_stats->cdan; - portal_busy += ch_stats->dequeue_portal_busy; - pull_err += ch_stats->pull_err; + /* Per-channel stats */ + for (k = 0; k < priv->num_channels; k++) { + ch_stats = &priv->channel[k]->stats; + for (j = 0; j < sizeof(*ch_stats) / sizeof(__u64); j++) + *((__u64 *)data + i + j) += *((__u64 *)ch_stats + j); } + i += j; + + for (j = 0; j < priv->num_fqs; j++) { + /* Print FQ instantaneous counts */ + err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid, + &fcnt, &bcnt); + if (err) { + netdev_warn(net_dev, "FQ query error %d", err); + return; + } - *(data + i++) = portal_busy; - *(data + i++) = pull_err; - *(data + i++) = cdan; + if (priv->fq[j].type == DPAA2_TX_CONF_FQ) { + fcnt_tx_total += fcnt; + bcnt_tx_total += bcnt; + } else { + fcnt_rx_total += fcnt; + bcnt_rx_total += bcnt; + } + } + + *(data + i++) = fcnt_rx_total; + *(data + i++) = bcnt_rx_total; + *(data + i++) = fcnt_tx_total; + *(data + i++) = bcnt_tx_total; + + err = dpaa2_io_query_bp_count(NULL, priv->bpid, &buf_cnt); + if (err) { + netdev_warn(net_dev, "Buffer count query error %d\n", err); + return; + } + *(data + i++) = buf_cnt; } static int prep_eth_rule(struct ethhdr *eth_value, struct ethhdr *eth_mask, diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c index 84b942b1eccc..9b150db3b510 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-ptp.c @@ -140,7 +140,10 @@ static int dpaa2_ptp_probe(struct fsl_mc_device *mc_dev) err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io); if (err) { - dev_err(dev, "fsl_mc_portal_allocate err %d\n", err); + if (err == -ENXIO) + err = -EPROBE_DEFER; + else + dev_err(dev, "fsl_mc_portal_allocate err %d\n", err); goto err_exit; } diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index bf80855dd0dd..f79e57f735b3 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -531,7 +531,6 @@ struct fec_enet_private { /* Phylib and MDIO interface */ struct mii_bus *mii_bus; - int mii_timeout; uint phy_speed; phy_interface_t phy_interface; struct device_node *phy_node; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 6db69ba30dcd..ae0f88bce9aa 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1714,12 +1714,6 @@ static void fec_enet_adjust_link(struct net_device *ndev) struct phy_device *phy_dev = ndev->phydev; int status_change = 0; - /* Prevent a state halted on mii error */ - if (fep->mii_timeout && phy_dev->state == PHY_HALTED) { - phy_dev->state = PHY_RESUMING; - return; - } - /* * If the netdev is down, or is going down, we're not interested * in link state events, so just mark our idea of the link as down @@ -1779,7 +1773,6 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) if (ret < 0) return ret; - fep->mii_timeout = 0; reinit_completion(&fep->mdio_done); /* start a read op */ @@ -1791,7 +1784,6 @@ static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) time_left = wait_for_completion_timeout(&fep->mdio_done, usecs_to_jiffies(FEC_MII_TIMEOUT)); if (time_left == 0) { - fep->mii_timeout = 1; netdev_err(fep->netdev, "MDIO read timeout\n"); ret = -ETIMEDOUT; goto out; @@ -1820,7 +1812,6 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, else ret = 0; - fep->mii_timeout = 0; reinit_completion(&fep->mdio_done); /* start a write op */ @@ -1833,7 +1824,6 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, time_left = wait_for_completion_timeout(&fep->mdio_done, usecs_to_jiffies(FEC_MII_TIMEOUT)); if (time_left == 0) { - fep->mii_timeout = 1; netdev_err(fep->netdev, "MDIO write timeout\n"); ret = -ETIMEDOUT; } @@ -2001,8 +1991,6 @@ static int fec_enet_mii_init(struct platform_device *pdev) return -ENOENT; } - fep->mii_timeout = 0; - /* * Set MII speed to 2.5 MHz (= clk_get_rate() / 2 * phy_speed) * diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index d79e4e009d63..71f4205f14e7 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -393,7 +393,7 @@ void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause, */ /* get local capabilities */ - lcl_adv = ethtool_adv_to_lcl_adv_t(phy_dev->advertising); + lcl_adv = linkmode_adv_to_lcl_adv_t(phy_dev->advertising); /* get link partner capabilities */ rmt_adv = 0; diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c index 82722d05fedb..88a396fd242f 100644 --- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c +++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c @@ -473,7 +473,7 @@ static int fsl_pq_mdio_probe(struct platform_device *pdev) if (data->get_tbipa) { for_each_child_of_node(np, tbi) { - if (strcmp(tbi->type, "tbi-phy") == 0) { + if (of_node_is_type(tbi, "tbi-phy")) { dev_dbg(&pdev->dev, "found TBI PHY node %pOFP\n", tbi); break; diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 3c8da1a18ba0..45fcc96be90e 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -500,6 +500,7 @@ static const struct net_device_ops gfar_netdev_ops = { .ndo_tx_timeout = gfar_timeout, .ndo_do_ioctl = gfar_ioctl, .ndo_get_stats = gfar_get_stats, + .ndo_change_carrier = fixed_phy_change_carrier, .ndo_set_mac_address = gfar_set_mac_addr, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER @@ -720,7 +721,7 @@ static int gfar_of_group_count(struct device_node *np) int num = 0; for_each_available_child_of_node(np, child) - if (!of_node_cmp(child->name, "queue-group")) + if (of_node_name_eq(child, "queue-group")) num++; return num; @@ -838,7 +839,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) /* Parse and initialize group specific information */ if (priv->mode == MQ_MG_MODE) { for_each_available_child_of_node(np, child) { - if (of_node_cmp(child->name, "queue-group")) + if (!of_node_name_eq(child, "queue-group")) continue; err = gfar_parse_group(child, priv, model); @@ -1784,14 +1785,20 @@ static phy_interface_t gfar_get_interface(struct net_device *dev) */ static int init_phy(struct net_device *dev) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; struct gfar_private *priv = netdev_priv(dev); - uint gigabit_support = - priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT ? - GFAR_SUPPORTED_GBIT : 0; phy_interface_t interface; struct phy_device *phydev; struct ethtool_eee edata; + linkmode_set_bit_array(phy_10_100_features_array, + ARRAY_SIZE(phy_10_100_features_array), + mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_MII_BIT, mask); + if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT) + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, mask); + priv->oldlink = 0; priv->oldspeed = 0; priv->oldduplex = -1; @@ -1809,8 +1816,8 @@ static int init_phy(struct net_device *dev) gfar_configure_serdes(dev); /* Remove any features not supported by the controller */ - phydev->supported &= (GFAR_SUPPORTED | gigabit_support); - phydev->advertising = phydev->supported; + linkmode_and(phydev->supported, phydev->supported, mask); + linkmode_copy(phydev->advertising, phydev->supported); /* Add support for flow control */ phy_support_asym_pause(phydev); @@ -3656,7 +3663,7 @@ static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv) if (phydev->asym_pause) rmt_adv |= LPA_PAUSE_ASYM; - lcl_adv = ethtool_adv_to_lcl_adv_t(phydev->advertising); + lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising); flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); if (flowctrl & FLOW_CTRL_TX) val |= MACCFG1_TX_FLOW; diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c index 0d76e15cd6dd..241325c35cb4 100644 --- a/drivers/net/ethernet/freescale/gianfar_ethtool.c +++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c @@ -1134,11 +1134,9 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule, prio = vlan_tci_prio(rule); prio_mask = vlan_tci_priom(rule); - if (cfi == VLAN_TAG_PRESENT && cfi_mask == VLAN_TAG_PRESENT) { - vlan |= RQFPR_CFI; - vlan_mask |= RQFPR_CFI; - } else if (cfi != VLAN_TAG_PRESENT && - cfi_mask == VLAN_TAG_PRESENT) { + if (cfi_mask) { + if (cfi) + vlan |= RQFPR_CFI; vlan_mask |= RQFPR_CFI; } } diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c index 32e02700feaa..c3d539e209ed 100644 --- a/drivers/net/ethernet/freescale/ucc_geth.c +++ b/drivers/net/ethernet/freescale/ucc_geth.c @@ -30,6 +30,7 @@ #include <linux/dma-mapping.h> #include <linux/mii.h> #include <linux/phy.h> +#include <linux/phy_fixed.h> #include <linux/workqueue.h> #include <linux/of_address.h> #include <linux/of_irq.h> @@ -1742,12 +1743,7 @@ static int init_phy(struct net_device *dev) if (priv->phy_interface == PHY_INTERFACE_MODE_SGMII) uec_configure_serdes(dev); - phy_set_max_speed(phydev, SPEED_100); - - if (priv->max_speed == SPEED_1000) - phydev->supported |= ADVERTISED_1000baseT_Full; - - phydev->advertising = phydev->supported; + phy_set_max_speed(phydev, priv->max_speed); priv->phydev = phydev; @@ -3681,6 +3677,7 @@ static const struct net_device_ops ucc_geth_netdev_ops = { .ndo_stop = ucc_geth_close, .ndo_start_xmit = ucc_geth_start_xmit, .ndo_validate_addr = eth_validate_addr, + .ndo_change_carrier = fixed_phy_change_carrier, .ndo_set_mac_address = ucc_geth_set_mac_addr, .ndo_set_rx_mode = ucc_geth_set_multi, .ndo_tx_timeout = ucc_geth_timeout, diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig index 25152715396b..fee4664c9189 100644 --- a/drivers/net/ethernet/hisilicon/Kconfig +++ b/drivers/net/ethernet/hisilicon/Kconfig @@ -118,6 +118,7 @@ config HNS3_ENET tristate "Hisilicon HNS3 Ethernet Device Support" default m depends on 64BIT && PCI + depends on INET ---help--- This selects the Ethernet Driver for Hisilicon Network Subsystem 3 for hip08 family of SoCs. This module depends upon HNAE3 driver to access the HNAE3 diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c index 6242249c9f4c..5748d3f722f6 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c @@ -1163,6 +1163,7 @@ static void hns_nic_adjust_link(struct net_device *ndev) */ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, }; struct phy_device *phy_dev = h->phy_dev; int ret; @@ -1180,8 +1181,9 @@ int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h) if (unlikely(ret)) return -ENODEV; - phy_dev->supported &= h->if_support; - phy_dev->advertising = phy_dev->supported; + ethtool_convert_legacy_u32_to_link_mode(supported, h->if_support); + linkmode_and(phy_dev->supported, phy_dev->supported, supported); + linkmode_copy(phy_dev->advertising, phy_dev->supported); if (h->phy_if == PHY_INTERFACE_MODE_XGMII) phy_dev->autoneg = false; diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 774beda040a1..8e9b95871d30 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -624,7 +624,7 @@ static void hns_nic_self_test(struct net_device *ndev, clear_bit(NIC_STATE_TESTING, &priv->state); if (if_running) - (void)dev_open(ndev); + (void)dev_open(ndev, NULL); } /* Online tests aren't run; pass by default */ diff --git a/drivers/net/ethernet/hisilicon/hns3/Makefile b/drivers/net/ethernet/hisilicon/hns3/Makefile index 002534f12b66..d01bf536eb86 100644 --- a/drivers/net/ethernet/hisilicon/hns3/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/Makefile @@ -9,6 +9,6 @@ obj-$(CONFIG_HNS3) += hns3vf/ obj-$(CONFIG_HNS3) += hnae3.o obj-$(CONFIG_HNS3_ENET) += hns3.o -hns3-objs = hns3_enet.o hns3_ethtool.o +hns3-objs = hns3_enet.o hns3_ethtool.o hns3_debugfs.o hns3-$(CONFIG_HNS3_DCB) += hns3_dcbnl.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index 038326cfda93..691d12174902 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -36,6 +36,10 @@ enum HCLGE_MBX_OPCODE { HCLGE_MBX_BIND_FUNC_QUEUE, /* (VF -> PF) bind function and queue */ HCLGE_MBX_GET_LINK_STATUS, /* (VF -> PF) get link status */ HCLGE_MBX_QUEUE_RESET, /* (VF -> PF) reset queue */ + HCLGE_MBX_KEEP_ALIVE, /* (VF -> PF) send keep alive cmd */ + HCLGE_MBX_SET_ALIVE, /* (VF -> PF) set alive state */ + HCLGE_MBX_SET_MTU, /* (VF -> PF) set mtu */ + HCLGE_MBX_GET_QID_IN_PF, /* (VF -> PF) get queue id in pf */ }; /* below are per-VF mac-vlan subcodes */ @@ -85,6 +89,12 @@ struct hclge_mbx_pf_to_vf_cmd { u16 msg[8]; }; +struct hclge_vf_rst_cmd { + u8 dest_vfid; + u8 vf_rst; + u8 rsv[22]; +}; + /* used by VF to store the received Async responses from PF */ struct hclgevf_mbx_arq_ring { #define HCLGE_MBX_MAX_ARQ_MSG_SIZE 8 diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 055b40606dbc..36eab37d8a40 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -52,6 +52,7 @@ #define HNAE3_UNIC_CLIENT_INITED_B 0x4 #define HNAE3_ROCE_CLIENT_INITED_B 0x5 #define HNAE3_DEV_SUPPORT_FD_B 0x6 +#define HNAE3_DEV_SUPPORT_GRO_B 0x7 #define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\ BIT(HNAE3_DEV_SUPPORT_ROCE_B)) @@ -65,6 +66,9 @@ #define hnae3_dev_fd_supported(hdev) \ hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B) +#define hnae3_dev_gro_supported(hdev) \ + hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B) + #define ring_ptr_move_fw(ring, p) \ ((ring)->p = ((ring)->p + 1) % (ring)->desc_num) #define ring_ptr_move_bw(ring, p) \ @@ -124,14 +128,23 @@ enum hnae3_reset_notify_type { enum hnae3_reset_type { HNAE3_VF_RESET, + HNAE3_VF_FUNC_RESET, + HNAE3_VF_PF_FUNC_RESET, HNAE3_VF_FULL_RESET, + HNAE3_FLR_RESET, HNAE3_FUNC_RESET, HNAE3_CORE_RESET, HNAE3_GLOBAL_RESET, HNAE3_IMP_RESET, + HNAE3_UNKNOWN_RESET, HNAE3_NONE_RESET, }; +enum hnae3_flr_state { + HNAE3_FLR_DOWN, + HNAE3_FLR_DONE, +}; + struct hnae3_vector_info { u8 __iomem *io_addr; int vector; @@ -162,6 +175,7 @@ struct hnae3_client_ops { int (*setup_tc)(struct hnae3_handle *handle, u8 tc); int (*reset_notify)(struct hnae3_handle *handle, enum hnae3_reset_notify_type type); + enum hnae3_reset_type (*process_hw_error)(struct hnae3_handle *handle); }; #define HNAE3_CLIENT_NAME_LENGTH 16 @@ -197,6 +211,10 @@ struct hnae3_ae_dev { * Enable the hardware * stop() * Disable the hardware + * start_client() + * Inform the hclge that client has been started + * stop_client() + * Inform the hclge that client has been stopped * get_status() * Get the carrier state of the back channel of the handle, 1 for ok, 0 for * non-ok @@ -292,17 +310,22 @@ struct hnae3_ae_dev { * Set vlan filter config of vf * enable_hw_strip_rxvtag() * Enable/disable hardware strip vlan tag of packets received + * set_gro_en + * Enable/disable HW GRO */ struct hnae3_ae_ops { int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev); void (*uninit_ae_dev)(struct hnae3_ae_dev *ae_dev); - + void (*flr_prepare)(struct hnae3_ae_dev *ae_dev); + void (*flr_done)(struct hnae3_ae_dev *ae_dev); int (*init_client_instance)(struct hnae3_client *client, struct hnae3_ae_dev *ae_dev); void (*uninit_client_instance)(struct hnae3_client *client, struct hnae3_ae_dev *ae_dev); int (*start)(struct hnae3_handle *handle); void (*stop)(struct hnae3_handle *handle); + int (*client_start)(struct hnae3_handle *handle); + void (*client_stop)(struct hnae3_handle *handle); int (*get_status)(struct hnae3_handle *handle); void (*get_ksettings_an_result)(struct hnae3_handle *handle, u8 *auto_neg, u32 *speed, u8 *duplex); @@ -403,6 +426,8 @@ struct hnae3_ae_ops { u16 vlan, u8 qos, __be16 proto); int (*enable_hw_strip_rxvtag)(struct hnae3_handle *handle, bool enable); void (*reset_event)(struct pci_dev *pdev, struct hnae3_handle *handle); + void (*set_default_reset_request)(struct hnae3_ae_dev *ae_dev, + enum hnae3_reset_type rst_type); void (*get_channels)(struct hnae3_handle *handle, struct ethtool_channels *ch); void (*get_tqps_and_rss_info)(struct hnae3_handle *h, @@ -429,7 +454,14 @@ struct hnae3_ae_ops { struct ethtool_rxnfc *cmd, u32 *rule_locs); int (*restore_fd_rules)(struct hnae3_handle *handle); void (*enable_fd)(struct hnae3_handle *handle, bool enable); - pci_ers_result_t (*process_hw_error)(struct hnae3_ae_dev *ae_dev); + int (*dbg_run_cmd)(struct hnae3_handle *handle, char *cmd_buf); + pci_ers_result_t (*handle_hw_ras_error)(struct hnae3_ae_dev *ae_dev); + bool (*get_hw_reset_stat)(struct hnae3_handle *handle); + bool (*ae_dev_resetting)(struct hnae3_handle *handle); + unsigned long (*ae_dev_reset_cnt)(struct hnae3_handle *handle); + int (*set_gro_en)(struct hnae3_handle *handle, int enable); + u16 (*get_global_queue_id)(struct hnae3_handle *handle, u16 queue_id); + void (*set_timer_task)(struct hnae3_handle *handle, bool enable); }; struct hnae3_dcb_ops { @@ -488,6 +520,14 @@ struct hnae3_roce_private_info { void __iomem *roce_io_base; int base_vector; int num_vectors; + + /* The below attributes defined for RoCE client, hnae3 gives + * initial values to them, and RoCE client can modify and use + * them. + */ + unsigned long reset_state; + unsigned long instance_state; + unsigned long state; }; struct hnae3_unic_private_info { @@ -520,9 +560,6 @@ struct hnae3_handle { struct hnae3_ae_algo *ae_algo; /* the class who provides this handle */ u64 flags; /* Indicate the capabilities for this handle*/ - unsigned long last_reset_time; - enum hnae3_reset_type reset_level; - union { struct net_device *netdev; /* first member */ struct hnae3_knic_private_info kinfo; @@ -533,6 +570,7 @@ struct hnae3_handle { u32 numa_node_mask; /* for multi-chip support */ u8 netdev_flags; + struct dentry *hnae3_dbgfs; }; #define hnae3_set_field(origin, mask, shift, val) \ diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c index ea5f8a84070d..b6fabbbdfd5b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_dcbnl.c @@ -9,6 +9,9 @@ int hns3_dcbnl_ieee_getets(struct net_device *ndev, struct ieee_ets *ets) { struct hnae3_handle *h = hns3_get_handle(ndev); + if (hns3_nic_resetting(ndev)) + return -EBUSY; + if (h->kinfo.dcb_ops->ieee_getets) return h->kinfo.dcb_ops->ieee_getets(h, ets); @@ -20,6 +23,9 @@ int hns3_dcbnl_ieee_setets(struct net_device *ndev, struct ieee_ets *ets) { struct hnae3_handle *h = hns3_get_handle(ndev); + if (hns3_nic_resetting(ndev)) + return -EBUSY; + if (h->kinfo.dcb_ops->ieee_setets) return h->kinfo.dcb_ops->ieee_setets(h, ets); @@ -31,6 +37,9 @@ int hns3_dcbnl_ieee_getpfc(struct net_device *ndev, struct ieee_pfc *pfc) { struct hnae3_handle *h = hns3_get_handle(ndev); + if (hns3_nic_resetting(ndev)) + return -EBUSY; + if (h->kinfo.dcb_ops->ieee_getpfc) return h->kinfo.dcb_ops->ieee_getpfc(h, pfc); @@ -42,6 +51,9 @@ int hns3_dcbnl_ieee_setpfc(struct net_device *ndev, struct ieee_pfc *pfc) { struct hnae3_handle *h = hns3_get_handle(ndev); + if (hns3_nic_resetting(ndev)) + return -EBUSY; + if (h->kinfo.dcb_ops->ieee_setpfc) return h->kinfo.dcb_ops->ieee_setpfc(h, pfc); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c new file mode 100644 index 000000000000..0de543faa5b1 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_debugfs.c @@ -0,0 +1,399 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2018-2019 Hisilicon Limited. */ + +#include <linux/debugfs.h> +#include <linux/device.h> + +#include "hnae3.h" +#include "hns3_enet.h" + +#define HNS3_DBG_READ_LEN 256 + +static struct dentry *hns3_dbgfs_root; + +static int hns3_dbg_queue_info(struct hnae3_handle *h, char *cmd_buf) +{ + struct hns3_nic_priv *priv = h->priv; + struct hns3_nic_ring_data *ring_data; + struct hns3_enet_ring *ring; + u32 base_add_l, base_add_h; + u32 queue_num, queue_max; + u32 value, i = 0; + int cnt; + + if (!priv->ring_data) { + dev_err(&h->pdev->dev, "ring_data is NULL\n"); + return -EFAULT; + } + + queue_max = h->kinfo.num_tqps; + cnt = kstrtouint(&cmd_buf[11], 0, &queue_num); + if (cnt) + queue_num = 0; + else + queue_max = queue_num + 1; + + dev_info(&h->pdev->dev, "queue info\n"); + + if (queue_num >= h->kinfo.num_tqps) { + dev_err(&h->pdev->dev, + "Queue number(%u) is out of range(%u)\n", queue_num, + h->kinfo.num_tqps - 1); + return -EINVAL; + } + + ring_data = priv->ring_data; + for (i = queue_num; i < queue_max; i++) { + /* Each cycle needs to determine whether the instance is reset, + * to prevent reference to invalid memory. And need to ensure + * that the following code is executed within 100ms. + */ + if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) || + test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) + return -EPERM; + + ring = ring_data[(u32)(i + h->kinfo.num_tqps)].ring; + base_add_h = readl_relaxed(ring->tqp->io_base + + HNS3_RING_RX_RING_BASEADDR_H_REG); + base_add_l = readl_relaxed(ring->tqp->io_base + + HNS3_RING_RX_RING_BASEADDR_L_REG); + dev_info(&h->pdev->dev, "RX(%d) BASE ADD: 0x%08x%08x\n", i, + base_add_h, base_add_l); + + value = readl_relaxed(ring->tqp->io_base + + HNS3_RING_RX_RING_BD_NUM_REG); + dev_info(&h->pdev->dev, "RX(%d) RING BD NUM: %u\n", i, value); + + value = readl_relaxed(ring->tqp->io_base + + HNS3_RING_RX_RING_BD_LEN_REG); + dev_info(&h->pdev->dev, "RX(%d) RING BD LEN: %u\n", i, value); + + value = readl_relaxed(ring->tqp->io_base + + HNS3_RING_RX_RING_TAIL_REG); + dev_info(&h->pdev->dev, "RX(%d) RING TAIL: %u\n", i, value); + + value = readl_relaxed(ring->tqp->io_base + + HNS3_RING_RX_RING_HEAD_REG); + dev_info(&h->pdev->dev, "RX(%d) RING HEAD: %u\n", i, value); + + value = readl_relaxed(ring->tqp->io_base + + HNS3_RING_RX_RING_FBDNUM_REG); + dev_info(&h->pdev->dev, "RX(%d) RING FBDNUM: %u\n", i, value); + + value = readl_relaxed(ring->tqp->io_base + + HNS3_RING_RX_RING_PKTNUM_RECORD_REG); + dev_info(&h->pdev->dev, "RX(%d) RING PKTNUM: %u\n", i, value); + + ring = ring_data[i].ring; + base_add_h = readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_BASEADDR_H_REG); + base_add_l = readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_BASEADDR_L_REG); + dev_info(&h->pdev->dev, "TX(%d) BASE ADD: 0x%08x%08x\n", i, + base_add_h, base_add_l); + + value = readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_BD_NUM_REG); + dev_info(&h->pdev->dev, "TX(%d) RING BD NUM: %u\n", i, value); + + value = readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_TC_REG); + dev_info(&h->pdev->dev, "TX(%d) RING TC: %u\n", i, value); + + value = readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_TAIL_REG); + dev_info(&h->pdev->dev, "TX(%d) RING TAIL: %u\n", i, value); + + value = readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_HEAD_REG); + dev_info(&h->pdev->dev, "TX(%d) RING HEAD: %u\n", i, value); + + value = readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_FBDNUM_REG); + dev_info(&h->pdev->dev, "TX(%d) RING FBDNUM: %u\n", i, value); + + value = readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_OFFSET_REG); + dev_info(&h->pdev->dev, "TX(%d) RING OFFSET: %u\n", i, value); + + value = readl_relaxed(ring->tqp->io_base + + HNS3_RING_TX_RING_PKTNUM_RECORD_REG); + dev_info(&h->pdev->dev, "TX(%d) RING PKTNUM: %u\n\n", i, + value); + } + + return 0; +} + +static int hns3_dbg_queue_map(struct hnae3_handle *h) +{ + struct hns3_nic_priv *priv = h->priv; + struct hns3_nic_ring_data *ring_data; + int i; + + if (!h->ae_algo->ops->get_global_queue_id) + return -EOPNOTSUPP; + + dev_info(&h->pdev->dev, "map info for queue id and vector id\n"); + dev_info(&h->pdev->dev, + "local queue id | global queue id | vector id\n"); + for (i = 0; i < h->kinfo.num_tqps; i++) { + u16 global_qid; + + global_qid = h->ae_algo->ops->get_global_queue_id(h, i); + ring_data = &priv->ring_data[i]; + if (!ring_data || !ring_data->ring || + !ring_data->ring->tqp_vector) + continue; + + dev_info(&h->pdev->dev, + " %4d %4d %4d\n", + i, global_qid, + ring_data->ring->tqp_vector->vector_irq); + } + + return 0; +} + +static int hns3_dbg_bd_info(struct hnae3_handle *h, char *cmd_buf) +{ + struct hns3_nic_priv *priv = h->priv; + struct hns3_nic_ring_data *ring_data; + struct hns3_desc *rx_desc, *tx_desc; + struct device *dev = &h->pdev->dev; + struct hns3_enet_ring *ring; + u32 tx_index, rx_index; + u32 q_num, value; + int cnt; + + cnt = sscanf(&cmd_buf[8], "%u %u", &q_num, &tx_index); + if (cnt == 2) { + rx_index = tx_index; + } else if (cnt != 1) { + dev_err(dev, "bd info: bad command string, cnt=%d\n", cnt); + return -EINVAL; + } + + if (q_num >= h->kinfo.num_tqps) { + dev_err(dev, "Queue number(%u) is out of range(%u)\n", q_num, + h->kinfo.num_tqps - 1); + return -EINVAL; + } + + ring_data = priv->ring_data; + ring = ring_data[q_num].ring; + value = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG); + tx_index = (cnt == 1) ? value : tx_index; + + if (tx_index >= ring->desc_num) { + dev_err(dev, "bd index (%u) is out of range(%u)\n", tx_index, + ring->desc_num - 1); + return -EINVAL; + } + + tx_desc = &ring->desc[tx_index]; + dev_info(dev, "TX Queue Num: %u, BD Index: %u\n", q_num, tx_index); + dev_info(dev, "(TX) addr: 0x%llx\n", tx_desc->addr); + dev_info(dev, "(TX)vlan_tag: %u\n", tx_desc->tx.vlan_tag); + dev_info(dev, "(TX)send_size: %u\n", tx_desc->tx.send_size); + dev_info(dev, "(TX)vlan_tso: %u\n", tx_desc->tx.type_cs_vlan_tso); + dev_info(dev, "(TX)l2_len: %u\n", tx_desc->tx.l2_len); + dev_info(dev, "(TX)l3_len: %u\n", tx_desc->tx.l3_len); + dev_info(dev, "(TX)l4_len: %u\n", tx_desc->tx.l4_len); + dev_info(dev, "(TX)vlan_tag: %u\n", tx_desc->tx.outer_vlan_tag); + dev_info(dev, "(TX)tv: %u\n", tx_desc->tx.tv); + dev_info(dev, "(TX)vlan_msec: %u\n", tx_desc->tx.ol_type_vlan_msec); + dev_info(dev, "(TX)ol2_len: %u\n", tx_desc->tx.ol2_len); + dev_info(dev, "(TX)ol3_len: %u\n", tx_desc->tx.ol3_len); + dev_info(dev, "(TX)ol4_len: %u\n", tx_desc->tx.ol4_len); + dev_info(dev, "(TX)paylen: %u\n", tx_desc->tx.paylen); + dev_info(dev, "(TX)vld_ra_ri: %u\n", tx_desc->tx.bdtp_fe_sc_vld_ra_ri); + dev_info(dev, "(TX)mss: %u\n", tx_desc->tx.mss); + + ring = ring_data[q_num + h->kinfo.num_tqps].ring; + value = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_TAIL_REG); + rx_index = (cnt == 1) ? value : tx_index; + rx_desc = &ring->desc[rx_index]; + + dev_info(dev, "RX Queue Num: %u, BD Index: %u\n", q_num, rx_index); + dev_info(dev, "(RX)addr: 0x%llx\n", rx_desc->addr); + dev_info(dev, "(RX)pkt_len: %u\n", rx_desc->rx.pkt_len); + dev_info(dev, "(RX)size: %u\n", rx_desc->rx.size); + dev_info(dev, "(RX)rss_hash: %u\n", rx_desc->rx.rss_hash); + dev_info(dev, "(RX)fd_id: %u\n", rx_desc->rx.fd_id); + dev_info(dev, "(RX)vlan_tag: %u\n", rx_desc->rx.vlan_tag); + dev_info(dev, "(RX)o_dm_vlan_id_fb: %u\n", rx_desc->rx.o_dm_vlan_id_fb); + dev_info(dev, "(RX)ot_vlan_tag: %u\n", rx_desc->rx.ot_vlan_tag); + dev_info(dev, "(RX)bd_base_info: %u\n", rx_desc->rx.bd_base_info); + + return 0; +} + +static void hns3_dbg_help(struct hnae3_handle *h) +{ +#define HNS3_DBG_BUF_LEN 256 + + char printf_buf[HNS3_DBG_BUF_LEN]; + + dev_info(&h->pdev->dev, "available commands\n"); + dev_info(&h->pdev->dev, "queue info [number]\n"); + dev_info(&h->pdev->dev, "queue map\n"); + dev_info(&h->pdev->dev, "bd info [q_num] <bd index>\n"); + dev_info(&h->pdev->dev, "dump fd tcam\n"); + dev_info(&h->pdev->dev, "dump tc\n"); + dev_info(&h->pdev->dev, "dump tm map [q_num]\n"); + dev_info(&h->pdev->dev, "dump tm\n"); + dev_info(&h->pdev->dev, "dump qos pause cfg\n"); + dev_info(&h->pdev->dev, "dump qos pri map\n"); + dev_info(&h->pdev->dev, "dump qos buf cfg\n"); + dev_info(&h->pdev->dev, "dump mng tbl\n"); + + memset(printf_buf, 0, HNS3_DBG_BUF_LEN); + strncat(printf_buf, "dump reg [[bios common] [ssu <prt_id>]", + HNS3_DBG_BUF_LEN - 1); + strncat(printf_buf + strlen(printf_buf), + " [igu egu <prt_id>] [rpu <tc_queue_num>]", + HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1); + strncat(printf_buf + strlen(printf_buf), + " [rtc] [ppp] [rcb] [tqp <q_num>]]\n", + HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1); + dev_info(&h->pdev->dev, "%s", printf_buf); + + memset(printf_buf, 0, HNS3_DBG_BUF_LEN); + strncat(printf_buf, "dump reg dcb [port_id] [pri_id] [pg_id]", + HNS3_DBG_BUF_LEN - 1); + strncat(printf_buf + strlen(printf_buf), " [rq_id] [nq_id] [qset_id]\n", + HNS3_DBG_BUF_LEN - strlen(printf_buf) - 1); + dev_info(&h->pdev->dev, "%s", printf_buf); +} + +static ssize_t hns3_dbg_cmd_read(struct file *filp, char __user *buffer, + size_t count, loff_t *ppos) +{ + int uncopy_bytes; + char *buf; + int len; + + if (*ppos != 0) + return 0; + + if (count < HNS3_DBG_READ_LEN) + return -ENOSPC; + + buf = kzalloc(HNS3_DBG_READ_LEN, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + len = snprintf(buf, HNS3_DBG_READ_LEN, "%s\n", + "Please echo help to cmd to get help information"); + uncopy_bytes = copy_to_user(buffer, buf, len); + + kfree(buf); + + if (uncopy_bytes) + return -EFAULT; + + return (*ppos = len); +} + +static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer, + size_t count, loff_t *ppos) +{ + struct hnae3_handle *handle = filp->private_data; + struct hns3_nic_priv *priv = handle->priv; + char *cmd_buf, *cmd_buf_tmp; + int uncopied_bytes; + int ret = 0; + + if (*ppos != 0) + return 0; + + /* Judge if the instance is being reset. */ + if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state) || + test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) + return 0; + + cmd_buf = kzalloc(count + 1, GFP_KERNEL); + if (!cmd_buf) + return count; + + uncopied_bytes = copy_from_user(cmd_buf, buffer, count); + if (uncopied_bytes) { + kfree(cmd_buf); + return -EFAULT; + } + + cmd_buf[count] = '\0'; + + cmd_buf_tmp = strchr(cmd_buf, '\n'); + if (cmd_buf_tmp) { + *cmd_buf_tmp = '\0'; + count = cmd_buf_tmp - cmd_buf + 1; + } + + if (strncmp(cmd_buf, "help", 4) == 0) + hns3_dbg_help(handle); + else if (strncmp(cmd_buf, "queue info", 10) == 0) + ret = hns3_dbg_queue_info(handle, cmd_buf); + else if (strncmp(cmd_buf, "queue map", 9) == 0) + ret = hns3_dbg_queue_map(handle); + else if (strncmp(cmd_buf, "bd info", 7) == 0) + ret = hns3_dbg_bd_info(handle, cmd_buf); + else if (handle->ae_algo->ops->dbg_run_cmd) + ret = handle->ae_algo->ops->dbg_run_cmd(handle, cmd_buf); + + if (ret) + hns3_dbg_help(handle); + + kfree(cmd_buf); + cmd_buf = NULL; + + return count; +} + +static const struct file_operations hns3_dbg_cmd_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = hns3_dbg_cmd_read, + .write = hns3_dbg_cmd_write, +}; + +void hns3_dbg_init(struct hnae3_handle *handle) +{ + const char *name = pci_name(handle->pdev); + struct dentry *pfile; + + handle->hnae3_dbgfs = debugfs_create_dir(name, hns3_dbgfs_root); + if (!handle->hnae3_dbgfs) + return; + + pfile = debugfs_create_file("cmd", 0600, handle->hnae3_dbgfs, handle, + &hns3_dbg_cmd_fops); + if (!pfile) { + debugfs_remove_recursive(handle->hnae3_dbgfs); + handle->hnae3_dbgfs = NULL; + dev_warn(&handle->pdev->dev, "create file for %s fail\n", + name); + } +} + +void hns3_dbg_uninit(struct hnae3_handle *handle) +{ + debugfs_remove_recursive(handle->hnae3_dbgfs); + handle->hnae3_dbgfs = NULL; +} + +void hns3_dbg_register_debugfs(const char *debugfs_dir_name) +{ + hns3_dbgfs_root = debugfs_create_dir(debugfs_dir_name, NULL); + if (!hns3_dbgfs_root) { + pr_warn("Register debugfs for %s fail\n", debugfs_dir_name); + return; + } +} + +void hns3_dbg_unregister_debugfs(void) +{ + debugfs_remove_recursive(hns3_dbgfs_root); + hns3_dbgfs_root = NULL; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 20fcf0d1c2ce..d3b9aaf96c1c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -15,6 +15,7 @@ #include <linux/vermagic.h> #include <net/gre.h> #include <net/pkt_cls.h> +#include <net/tcp.h> #include <net/vxlan.h> #include "hnae3.h" @@ -239,7 +240,6 @@ static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector, tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K; tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K; - tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START; tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW; tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW; } @@ -312,6 +312,24 @@ static u16 hns3_get_max_available_channels(struct hnae3_handle *h) return min_t(u16, rss_size, max_rss_size); } +static void hns3_tqp_enable(struct hnae3_queue *tqp) +{ + u32 rcb_reg; + + rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG); + rcb_reg |= BIT(HNS3_RING_EN_B); + hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg); +} + +static void hns3_tqp_disable(struct hnae3_queue *tqp) +{ + u32 rcb_reg; + + rcb_reg = hns3_read_dev(tqp, HNS3_RING_EN_REG); + rcb_reg &= ~BIT(HNS3_RING_EN_B); + hns3_write_dev(tqp, HNS3_RING_EN_REG, rcb_reg); +} + static int hns3_nic_net_up(struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); @@ -334,6 +352,10 @@ static int hns3_nic_net_up(struct net_device *netdev) for (i = 0; i < priv->vector_num; i++) hns3_vector_enable(&priv->tqp_vector[i]); + /* enable rcb */ + for (j = 0; j < h->kinfo.num_tqps; j++) + hns3_tqp_enable(h->kinfo.tqp[j]); + /* start the ae_dev */ ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0; if (ret) @@ -344,6 +366,9 @@ static int hns3_nic_net_up(struct net_device *netdev) return 0; out_start_err: + while (j--) + hns3_tqp_disable(h->kinfo.tqp[j]); + for (j = i - 1; j >= 0; j--) hns3_vector_disable(&priv->tqp_vector[j]); @@ -359,6 +384,9 @@ static int hns3_nic_net_open(struct net_device *netdev) struct hnae3_knic_private_info *kinfo; int i, ret; + if (hns3_nic_resetting(netdev)) + return -EBUSY; + netif_carrier_off(netdev); ret = hns3_nic_set_real_num_queue(netdev); @@ -378,23 +406,27 @@ static int hns3_nic_net_open(struct net_device *netdev) kinfo->prio_tc[i]); } - priv->ae_handle->last_reset_time = jiffies; + if (h->ae_algo->ops->set_timer_task) + h->ae_algo->ops->set_timer_task(priv->ae_handle, true); + return 0; } static void hns3_nic_net_down(struct net_device *netdev) { struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = hns3_get_handle(netdev); const struct hnae3_ae_ops *ops; int i; - if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) - return; - /* disable vectors */ for (i = 0; i < priv->vector_num; i++) hns3_vector_disable(&priv->tqp_vector[i]); + /* disable rcb */ + for (i = 0; i < h->kinfo.num_tqps; i++) + hns3_tqp_disable(h->kinfo.tqp[i]); + /* stop ae_dev */ ops = priv->ae_handle->ae_algo->ops; if (ops->stop) @@ -408,6 +440,15 @@ static void hns3_nic_net_down(struct net_device *netdev) static int hns3_nic_net_stop(struct net_device *netdev) { + struct hns3_nic_priv *priv = netdev_priv(netdev); + struct hnae3_handle *h = hns3_get_handle(netdev); + + if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) + return 0; + + if (h->ae_algo->ops->set_timer_task) + h->ae_algo->ops->set_timer_task(priv->ae_handle, false); + netif_tx_stop_all_queues(netdev); netif_carrier_off(netdev); @@ -1312,6 +1353,15 @@ static int hns3_nic_set_features(struct net_device *netdev, priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; } + if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) { + if (features & NETIF_F_GRO_HW) + ret = h->ae_algo->ops->set_gro_en(h, true); + else + ret = h->ae_algo->ops->set_gro_en(h, false); + if (ret) + return ret; + } + if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) && h->ae_algo->ops->enable_vlan_filter) { if (features & NETIF_F_HW_VLAN_CTAG_FILTER) @@ -1530,18 +1580,11 @@ static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) { struct hnae3_handle *h = hns3_get_handle(netdev); - bool if_running = netif_running(netdev); int ret; if (!h->ae_algo->ops->set_mtu) return -EOPNOTSUPP; - /* if this was called with netdev up then bring netdevice down */ - if (if_running) { - (void)hns3_nic_net_stop(netdev); - msleep(100); - } - ret = h->ae_algo->ops->set_mtu(h, new_mtu); if (ret) netdev_err(netdev, "failed to change MTU in hardware %d\n", @@ -1549,10 +1592,6 @@ static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu) else netdev->mtu = new_mtu; - /* if the netdev was running earlier, bring it up again */ - if (if_running && hns3_nic_net_open(netdev)) - ret = -EINVAL; - return ret; } @@ -1615,10 +1654,9 @@ static void hns3_nic_net_timeout(struct net_device *ndev) priv->tx_timeout_count++; - if (time_before(jiffies, (h->last_reset_time + ndev->watchdog_timeo))) - return; - - /* request the reset */ + /* request the reset, and let the hclge to determine + * which reset level should be done + */ if (h->ae_algo->ops->reset_event) h->ae_algo->ops->reset_event(h->pdev, h); } @@ -1682,8 +1720,10 @@ static void hns3_disable_sriov(struct pci_dev *pdev) static void hns3_get_dev_capability(struct pci_dev *pdev, struct hnae3_ae_dev *ae_dev) { - if (pdev->revision >= 0x21) + if (pdev->revision >= 0x21) { hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1); + hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B, 1); + } } /* hns3_probe - Device initialization routine @@ -1795,8 +1835,8 @@ static pci_ers_result_t hns3_error_detected(struct pci_dev *pdev, return PCI_ERS_RESULT_NONE; } - if (ae_dev->ops->process_hw_error) - ret = ae_dev->ops->process_hw_error(ae_dev); + if (ae_dev->ops->handle_hw_ras_error) + ret = ae_dev->ops->handle_hw_ras_error(ae_dev); else return PCI_ERS_RESULT_NONE; @@ -1819,9 +1859,29 @@ static pci_ers_result_t hns3_slot_reset(struct pci_dev *pdev) return PCI_ERS_RESULT_DISCONNECT; } +static void hns3_reset_prepare(struct pci_dev *pdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + + dev_info(&pdev->dev, "hns3 flr prepare\n"); + if (ae_dev && ae_dev->ops && ae_dev->ops->flr_prepare) + ae_dev->ops->flr_prepare(ae_dev); +} + +static void hns3_reset_done(struct pci_dev *pdev) +{ + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + + dev_info(&pdev->dev, "hns3 flr done\n"); + if (ae_dev && ae_dev->ops && ae_dev->ops->flr_done) + ae_dev->ops->flr_done(ae_dev); +} + static const struct pci_error_handlers hns3_err_handler = { .error_detected = hns3_error_detected, .slot_reset = hns3_slot_reset, + .reset_prepare = hns3_reset_prepare, + .reset_done = hns3_reset_done, }; static struct pci_driver hns3_driver = { @@ -1875,7 +1935,9 @@ static void hns3_set_default_feature(struct net_device *netdev) NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC; if (pdev->revision >= 0x21) { - netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_GRO_HW; + netdev->features |= NETIF_F_GRO_HW; if (!(h->flags & HNAE3_SUPPORT_VF)) { netdev->hw_features |= NETIF_F_NTUPLE; @@ -2253,6 +2315,12 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, if (!(netdev->features & NETIF_F_RXCSUM)) return; + /* We MUST enable hardware checksum before enabling hardware GRO */ + if (skb_shinfo(skb)->gso_size) { + skb->ip_summed = CHECKSUM_UNNECESSARY; + return; + } + /* check if hardware has done checksum */ if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B)) return; @@ -2296,6 +2364,9 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb) { + if (skb_has_frag_list(skb)) + napi_gro_flush(&ring->tqp_vector->napi, false); + napi_gro_receive(&ring->tqp_vector->napi, skb); } @@ -2329,12 +2400,166 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring, } } +static int hns3_alloc_skb(struct hns3_enet_ring *ring, int length, + unsigned char *va) +{ +#define HNS3_NEED_ADD_FRAG 1 + struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; + struct net_device *netdev = ring->tqp->handle->kinfo.netdev; + struct sk_buff *skb; + + ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE); + skb = ring->skb; + if (unlikely(!skb)) { + netdev_err(netdev, "alloc rx skb fail\n"); + + u64_stats_update_begin(&ring->syncp); + ring->stats.sw_err_cnt++; + u64_stats_update_end(&ring->syncp); + + return -ENOMEM; + } + + prefetchw(skb->data); + + ring->pending_buf = 1; + ring->frag_num = 0; + ring->tail_skb = NULL; + if (length <= HNS3_RX_HEAD_SIZE) { + memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); + + /* We can reuse buffer as-is, just make sure it is local */ + if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) + desc_cb->reuse_flag = 1; + else /* This page cannot be reused so discard it */ + put_page(desc_cb->priv); + + ring_ptr_move_fw(ring, next_to_clean); + return 0; + } + u64_stats_update_begin(&ring->syncp); + ring->stats.seg_pkt_cnt++; + u64_stats_update_end(&ring->syncp); + + ring->pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE); + __skb_put(skb, ring->pull_len); + hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len, + desc_cb); + ring_ptr_move_fw(ring, next_to_clean); + + return HNS3_NEED_ADD_FRAG; +} + +static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc, + struct sk_buff **out_skb, bool pending) +{ + struct sk_buff *skb = *out_skb; + struct sk_buff *head_skb = *out_skb; + struct sk_buff *new_skb; + struct hns3_desc_cb *desc_cb; + struct hns3_desc *pre_desc; + u32 bd_base_info; + int pre_bd; + + /* if there is pending bd, the SW param next_to_clean has moved + * to next and the next is NULL + */ + if (pending) { + pre_bd = (ring->next_to_clean - 1 + ring->desc_num) % + ring->desc_num; + pre_desc = &ring->desc[pre_bd]; + bd_base_info = le32_to_cpu(pre_desc->rx.bd_base_info); + } else { + bd_base_info = le32_to_cpu(desc->rx.bd_base_info); + } + + while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) { + desc = &ring->desc[ring->next_to_clean]; + desc_cb = &ring->desc_cb[ring->next_to_clean]; + bd_base_info = le32_to_cpu(desc->rx.bd_base_info); + if (!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B)) + return -ENXIO; + + if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) { + new_skb = napi_alloc_skb(&ring->tqp_vector->napi, + HNS3_RX_HEAD_SIZE); + if (unlikely(!new_skb)) { + netdev_err(ring->tqp->handle->kinfo.netdev, + "alloc rx skb frag fail\n"); + return -ENXIO; + } + ring->frag_num = 0; + + if (ring->tail_skb) { + ring->tail_skb->next = new_skb; + ring->tail_skb = new_skb; + } else { + skb_shinfo(skb)->frag_list = new_skb; + ring->tail_skb = new_skb; + } + } + + if (ring->tail_skb) { + head_skb->truesize += hnae3_buf_size(ring); + head_skb->data_len += le16_to_cpu(desc->rx.size); + head_skb->len += le16_to_cpu(desc->rx.size); + skb = ring->tail_skb; + } + + hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb); + ring_ptr_move_fw(ring, next_to_clean); + ring->pending_buf++; + } + + return 0; +} + +static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info, + u32 bd_base_info) +{ + u16 gro_count; + u32 l3_type; + + gro_count = hnae3_get_field(l234info, HNS3_RXD_GRO_COUNT_M, + HNS3_RXD_GRO_COUNT_S); + /* if there is no HW GRO, do not set gro params */ + if (!gro_count) + return; + + /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count + * to skb_shinfo(skb)->gso_segs + */ + NAPI_GRO_CB(skb)->count = gro_count; + + l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, + HNS3_RXD_L3ID_S); + if (l3_type == HNS3_L3_TYPE_IPV4) + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; + else if (l3_type == HNS3_L3_TYPE_IPV6) + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; + else + return; + + skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info, + HNS3_RXD_GRO_SIZE_M, + HNS3_RXD_GRO_SIZE_S); + if (skb_shinfo(skb)->gso_size) + tcp_gro_complete(skb); +} + static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring, struct sk_buff *skb) { - struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; struct hnae3_handle *handle = ring->tqp->handle; enum pkt_hash_types rss_type; + struct hns3_desc *desc; + int last_bd; + + /* When driver handle the rss type, ring->next_to_clean indicates the + * first descriptor of next packet, need -1 here. + */ + last_bd = (ring->next_to_clean - 1 + ring->desc_num) % ring->desc_num; + desc = &ring->desc[last_bd]; if (le32_to_cpu(desc->rx.rss_hash)) rss_type = handle->kinfo.rss_type; @@ -2345,18 +2570,16 @@ static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring, } static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, - struct sk_buff **out_skb, int *out_bnum) + struct sk_buff **out_skb) { struct net_device *netdev = ring->tqp->handle->kinfo.netdev; + struct sk_buff *skb = ring->skb; struct hns3_desc_cb *desc_cb; struct hns3_desc *desc; - struct sk_buff *skb; - unsigned char *va; u32 bd_base_info; - int pull_len; u32 l234info; int length; - int bnum; + int ret; desc = &ring->desc[ring->next_to_clean]; desc_cb = &ring->desc_cb[ring->next_to_clean]; @@ -2368,9 +2591,10 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, /* Check valid BD */ if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))) - return -EFAULT; + return -ENXIO; - va = (unsigned char *)desc_cb->buf + desc_cb->page_offset; + if (!skb) + ring->va = (unsigned char *)desc_cb->buf + desc_cb->page_offset; /* Prefetch first cache line of first page * Idea is to cache few bytes of the header of the packet. Our L1 Cache @@ -2379,62 +2603,42 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, * lines. In such a case, single fetch would suffice to cache in the * relevant part of the header. */ - prefetch(va); + prefetch(ring->va); #if L1_CACHE_BYTES < 128 - prefetch(va + L1_CACHE_BYTES); + prefetch(ring->va + L1_CACHE_BYTES); #endif - skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi, - HNS3_RX_HEAD_SIZE); - if (unlikely(!skb)) { - netdev_err(netdev, "alloc rx skb fail\n"); - - u64_stats_update_begin(&ring->syncp); - ring->stats.sw_err_cnt++; - u64_stats_update_end(&ring->syncp); - - return -ENOMEM; - } - - prefetchw(skb->data); - - bnum = 1; - if (length <= HNS3_RX_HEAD_SIZE) { - memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long))); + if (!skb) { + ret = hns3_alloc_skb(ring, length, ring->va); + *out_skb = skb = ring->skb; - /* We can reuse buffer as-is, just make sure it is local */ - if (likely(page_to_nid(desc_cb->priv) == numa_node_id())) - desc_cb->reuse_flag = 1; - else /* This page cannot be reused so discard it */ - put_page(desc_cb->priv); + if (ret < 0) /* alloc buffer fail */ + return ret; + if (ret > 0) { /* need add frag */ + ret = hns3_add_frag(ring, desc, &skb, false); + if (ret) + return ret; - ring_ptr_move_fw(ring, next_to_clean); + /* As the head data may be changed when GRO enable, copy + * the head data in after other data rx completed + */ + memcpy(skb->data, ring->va, + ALIGN(ring->pull_len, sizeof(long))); + } } else { - u64_stats_update_begin(&ring->syncp); - ring->stats.seg_pkt_cnt++; - u64_stats_update_end(&ring->syncp); - - pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE); - - memcpy(__skb_put(skb, pull_len), va, - ALIGN(pull_len, sizeof(long))); - - hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb); - ring_ptr_move_fw(ring, next_to_clean); + ret = hns3_add_frag(ring, desc, &skb, true); + if (ret) + return ret; - while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) { - desc = &ring->desc[ring->next_to_clean]; - desc_cb = &ring->desc_cb[ring->next_to_clean]; - bd_base_info = le32_to_cpu(desc->rx.bd_base_info); - hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb); - ring_ptr_move_fw(ring, next_to_clean); - bnum++; - } + /* As the head data may be changed when GRO enable, copy + * the head data in after other data rx completed + */ + memcpy(skb->data, ring->va, + ALIGN(ring->pull_len, sizeof(long))); } - *out_bnum = bnum; - l234info = le32_to_cpu(desc->rx.l234_info); + bd_base_info = le32_to_cpu(desc->rx.bd_base_info); /* Based on hw strategy, the tag offloaded will be stored at * ot_vlan_tag in two layer tag case, and stored at vlan_tag @@ -2484,7 +2688,11 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, ring->tqp_vector->rx_group.total_bytes += skb->len; + /* This is needed in order to enable forwarding support */ + hns3_set_gro_param(skb, l234info, bd_base_info); + hns3_rx_checksum(ring, skb, desc); + *out_skb = skb; hns3_set_rx_skb_rss_type(ring, skb); return 0; @@ -2497,9 +2705,9 @@ int hns3_clean_rx_ring( #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 struct net_device *netdev = ring->tqp->handle->kinfo.netdev; int recv_pkts, recv_bds, clean_count, err; - int unused_count = hns3_desc_unused(ring); - struct sk_buff *skb = NULL; - int num, bnum = 0; + int unused_count = hns3_desc_unused(ring) - ring->pending_buf; + struct sk_buff *skb = ring->skb; + int num; num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG); rmb(); /* Make sure num taken effect before the other data is touched */ @@ -2513,24 +2721,32 @@ int hns3_clean_rx_ring( hns3_nic_alloc_rx_buffers(ring, clean_count + unused_count); clean_count = 0; - unused_count = hns3_desc_unused(ring); + unused_count = hns3_desc_unused(ring) - + ring->pending_buf; } /* Poll one pkt */ - err = hns3_handle_rx_bd(ring, &skb, &bnum); + err = hns3_handle_rx_bd(ring, &skb); if (unlikely(!skb)) /* This fault cannot be repaired */ goto out; - recv_bds += bnum; - clean_count += bnum; - if (unlikely(err)) { /* Do jump the err */ - recv_pkts++; + if (err == -ENXIO) { /* Do not get FE for the packet */ + goto out; + } else if (unlikely(err)) { /* Do jump the err */ + recv_bds += ring->pending_buf; + clean_count += ring->pending_buf; + ring->skb = NULL; + ring->pending_buf = 0; continue; } /* Do update ip stack process */ skb->protocol = eth_type_trans(skb, netdev); rx_fn(ring, skb); + recv_bds += ring->pending_buf; + clean_count += ring->pending_buf; + ring->skb = NULL; + ring->pending_buf = 0; recv_pkts++; } @@ -2644,10 +2860,10 @@ static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector) struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group; bool rx_update, tx_update; - if (tqp_vector->int_adapt_down > 0) { - tqp_vector->int_adapt_down--; + /* update param every 1000ms */ + if (time_before(jiffies, + tqp_vector->last_jiffies + msecs_to_jiffies(1000))) return; - } if (rx_group->coal.gl_adapt_enable) { rx_update = hns3_get_new_int_gl(rx_group); @@ -2664,11 +2880,11 @@ static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector) } tqp_vector->last_jiffies = jiffies; - tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START; } static int hns3_nic_common_poll(struct napi_struct *napi, int budget) { + struct hns3_nic_priv *priv = netdev_priv(napi->dev); struct hns3_enet_ring *ring; int rx_pkt_total = 0; @@ -2677,6 +2893,11 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget) bool clean_complete = true; int rx_budget; + if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { + napi_complete(napi); + return 0; + } + /* Since the actual Tx work is minimal, we can give the Tx a larger * budget and be more aggressive about cleaning up the Tx descriptors. */ @@ -2701,9 +2922,11 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget) if (!clean_complete) return budget; - napi_complete(napi); - hns3_update_new_int_gl(tqp_vector); - hns3_mask_vector_irq(tqp_vector, 1); + if (napi_complete(napi) && + likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { + hns3_update_new_int_gl(tqp_vector); + hns3_mask_vector_irq(tqp_vector, 1); + } return rx_pkt_total; } @@ -2783,9 +3006,10 @@ err_free_chain: cur_chain = head->next; while (cur_chain) { chain = cur_chain->next; - devm_kfree(&pdev->dev, chain); + devm_kfree(&pdev->dev, cur_chain); cur_chain = chain; } + head->next = NULL; return -ENOMEM; } @@ -2876,7 +3100,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) ret = hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain); if (ret) - return ret; + goto map_ring_fail; ret = h->ae_algo->ops->map_ring_to_vector(h, tqp_vector->vector_irq, &vector_ring_chain); @@ -2901,6 +3125,8 @@ map_ring_fail: static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv) { +#define HNS3_VECTOR_PF_MAX_NUM 64 + struct hnae3_handle *h = priv->ae_handle; struct hns3_enet_tqp_vector *tqp_vector; struct hnae3_vector_info *vector; @@ -2913,6 +3139,8 @@ static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv) /* RSS size, cpu online and vector_num should be the same */ /* Should consider 2p/4p later */ vector_num = min_t(u16, num_online_cpus(), tqp_num); + vector_num = min_t(u16, vector_num, HNS3_VECTOR_PF_MAX_NUM); + vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector), GFP_KERNEL); if (!vector) @@ -2970,12 +3198,12 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); - if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) { - (void)irq_set_affinity_hint( - priv->tqp_vector[i].vector_irq, - NULL); - free_irq(priv->tqp_vector[i].vector_irq, - &priv->tqp_vector[i]); + if (tqp_vector->irq_init_flag == HNS3_VECTOR_INITED) { + irq_set_affinity_notifier(tqp_vector->vector_irq, + NULL); + irq_set_affinity_hint(tqp_vector->vector_irq, NULL); + free_irq(tqp_vector->vector_irq, tqp_vector); + tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED; } priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED; @@ -3319,6 +3547,22 @@ static void hns3_nic_set_priv_ops(struct net_device *netdev) priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; } +static int hns3_client_start(struct hnae3_handle *handle) +{ + if (!handle->ae_algo->ops->client_start) + return 0; + + return handle->ae_algo->ops->client_start(handle); +} + +static void hns3_client_stop(struct hnae3_handle *handle) +{ + if (!handle->ae_algo->ops->client_stop) + return; + + handle->ae_algo->ops->client_stop(handle); +} + static int hns3_client_init(struct hnae3_handle *handle) { struct pci_dev *pdev = handle->pdev; @@ -3337,7 +3581,6 @@ static int hns3_client_init(struct hnae3_handle *handle) priv->dev = &pdev->dev; priv->netdev = netdev; priv->ae_handle = handle; - priv->ae_handle->last_reset_time = jiffies; priv->tx_timeout_count = 0; handle->kinfo.netdev = netdev; @@ -3357,11 +3600,6 @@ static int hns3_client_init(struct hnae3_handle *handle) /* Carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); - if (handle->flags & HNAE3_SUPPORT_VF) - handle->reset_level = HNAE3_VF_RESET; - else - handle->reset_level = HNAE3_FUNC_RESET; - ret = hns3_get_ring_config(priv); if (ret) { ret = -ENOMEM; @@ -3392,10 +3630,20 @@ static int hns3_client_init(struct hnae3_handle *handle) goto out_reg_netdev_fail; } + ret = hns3_client_start(handle); + if (ret) { + dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); + goto out_reg_netdev_fail; + } + hns3_dcbnl_setup(handle); - /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */ - netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN); + hns3_dbg_init(handle); + + /* MTU range: (ETH_MIN_MTU(kernel default) - 9702) */ + netdev->max_mtu = HNS3_MAX_MTU; + + set_bit(HNS3_NIC_STATE_INITED, &priv->state); return ret; @@ -3418,11 +3666,18 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) struct hns3_nic_priv *priv = netdev_priv(netdev); int ret; + hns3_client_stop(handle); + hns3_remove_hw_addr(netdev); if (netdev->reg_state != NETREG_UNINITIALIZED) unregister_netdev(netdev); + if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { + netdev_warn(netdev, "already uninitialized\n"); + goto out_netdev_free; + } + hns3_del_all_fd_rules(netdev, true); hns3_force_clear_all_rx_ring(handle); @@ -3441,8 +3696,11 @@ static void hns3_client_uninit(struct hnae3_handle *handle, bool reset) hns3_put_ring_config(priv); + hns3_dbg_uninit(handle); + priv->ring_data = NULL; +out_netdev_free: free_netdev(netdev); } @@ -3708,8 +3966,22 @@ static void hns3_restore_coal(struct hns3_nic_priv *priv) static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) { + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct net_device *ndev = kinfo->netdev; + struct hns3_nic_priv *priv = netdev_priv(ndev); + + if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) + return 0; + + /* it is cumbersome for hardware to pick-and-choose entries for deletion + * from table space. Hence, for function reset software intervention is + * required to delete the entries + */ + if (hns3_dev_ongoing_func_reset(ae_dev)) { + hns3_remove_hw_addr(ndev); + hns3_del_all_fd_rules(ndev, false); + } if (!netif_running(ndev)) return 0; @@ -3720,6 +3992,7 @@ static int hns3_reset_notify_down_enet(struct hnae3_handle *handle) static int hns3_reset_notify_up_enet(struct hnae3_handle *handle) { struct hnae3_knic_private_info *kinfo = &handle->kinfo; + struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev); int ret = 0; if (netif_running(kinfo->netdev)) { @@ -3729,9 +4002,10 @@ static int hns3_reset_notify_up_enet(struct hnae3_handle *handle) "hns net up fail, ret=%d!\n", ret); return ret; } - handle->last_reset_time = jiffies; } + clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state); + return ret; } @@ -3771,28 +4045,44 @@ static int hns3_reset_notify_init_enet(struct hnae3_handle *handle) /* Carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); + ret = hns3_nic_alloc_vector_data(priv); + if (ret) + return ret; + hns3_restore_coal(priv); ret = hns3_nic_init_vector_data(priv); if (ret) - return ret; + goto err_dealloc_vector; ret = hns3_init_all_ring(priv); - if (ret) { - hns3_nic_uninit_vector_data(priv); - priv->ring_data = NULL; - } + if (ret) + goto err_uninit_vector; + + set_bit(HNS3_NIC_STATE_INITED, &priv->state); + + return ret; + +err_uninit_vector: + hns3_nic_uninit_vector_data(priv); + priv->ring_data = NULL; +err_dealloc_vector: + hns3_nic_dealloc_vector_data(priv); return ret; } static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) { - struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); struct net_device *netdev = handle->kinfo.netdev; struct hns3_nic_priv *priv = netdev_priv(netdev); int ret; + if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) { + netdev_warn(netdev, "already uninitialized\n"); + return 0; + } + hns3_force_clear_all_rx_ring(handle); ret = hns3_nic_uninit_vector_data(priv); @@ -3803,18 +4093,15 @@ static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle) hns3_store_coal(priv); + ret = hns3_nic_dealloc_vector_data(priv); + if (ret) + netdev_err(netdev, "dealloc vector error\n"); + ret = hns3_uninit_all_ring(priv); if (ret) netdev_err(netdev, "uninit ring error\n"); - /* it is cumbersome for hardware to pick-and-choose entries for deletion - * from table space. Hence, for function reset software intervention is - * required to delete the entries - */ - if (hns3_dev_ongoing_func_reset(ae_dev)) { - hns3_remove_hw_addr(netdev); - hns3_del_all_fd_rules(netdev, false); - } + clear_bit(HNS3_NIC_STATE_INITED, &priv->state); return ret; } @@ -3980,15 +4267,23 @@ static int __init hns3_init_module(void) INIT_LIST_HEAD(&client.node); + hns3_dbg_register_debugfs(hns3_driver_name); + ret = hnae3_register_client(&client); if (ret) - return ret; + goto err_reg_client; ret = pci_register_driver(&hns3_driver); if (ret) - hnae3_unregister_client(&client); + goto err_reg_driver; return ret; + +err_reg_driver: + hnae3_unregister_client(&client); +err_reg_client: + hns3_dbg_unregister_debugfs(); + return ret; } module_init(hns3_init_module); @@ -4000,6 +4295,7 @@ static void __exit hns3_exit_module(void) { pci_unregister_driver(&hns3_driver); hnae3_unregister_client(&client); + hns3_dbg_unregister_debugfs(); } module_exit(hns3_exit_module); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index d3636d088aa3..e55995e93bb0 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -15,7 +15,7 @@ extern const char hns3_driver_version[]; enum hns3_nic_state { HNS3_NIC_STATE_TESTING, HNS3_NIC_STATE_RESETTING, - HNS3_NIC_STATE_REINITING, + HNS3_NIC_STATE_INITED, HNS3_NIC_STATE_DOWN, HNS3_NIC_STATE_DISABLED, HNS3_NIC_STATE_REMOVING, @@ -47,7 +47,7 @@ enum hns3_nic_state { #define HNS3_RING_PREFETCH_EN_REG 0x0007C #define HNS3_RING_CFG_VF_NUM_REG 0x00080 #define HNS3_RING_ASID_REG 0x0008C -#define HNS3_RING_RX_VM_REG 0x00090 +#define HNS3_RING_EN_REG 0x00090 #define HNS3_RING_T0_BE_RST 0x00094 #define HNS3_RING_COULD_BE_RST 0x00098 #define HNS3_RING_WRR_WEIGHT_REG 0x0009c @@ -76,7 +76,10 @@ enum hns3_nic_state { #define HNS3_RING_MAX_PENDING 32768 #define HNS3_RING_MIN_PENDING 8 #define HNS3_RING_BD_MULTIPLE 8 -#define HNS3_MAX_MTU 9728 +/* max frame size of mac */ +#define HNS3_MAC_MAX_FRAME 9728 +#define HNS3_MAX_MTU \ + (HNS3_MAC_MAX_FRAME - (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN)) #define HNS3_BD_SIZE_512_TYPE 0 #define HNS3_BD_SIZE_1024_TYPE 1 @@ -109,6 +112,10 @@ enum hns3_nic_state { #define HNS3_RXD_DOI_B 21 #define HNS3_RXD_OL3E_B 22 #define HNS3_RXD_OL4E_B 23 +#define HNS3_RXD_GRO_COUNT_S 24 +#define HNS3_RXD_GRO_COUNT_M (0x3f << HNS3_RXD_GRO_COUNT_S) +#define HNS3_RXD_GRO_FIXID_B 30 +#define HNS3_RXD_GRO_ECN_B 31 #define HNS3_RXD_ODMAC_S 0 #define HNS3_RXD_ODMAC_M (0x3 << HNS3_RXD_ODMAC_S) @@ -135,9 +142,8 @@ enum hns3_nic_state { #define HNS3_RXD_TSIND_S 12 #define HNS3_RXD_TSIND_M (0x7 << HNS3_RXD_TSIND_S) #define HNS3_RXD_LKBK_B 15 -#define HNS3_RXD_HDL_S 16 -#define HNS3_RXD_HDL_M (0x7ff << HNS3_RXD_HDL_S) -#define HNS3_RXD_HSIND_B 31 +#define HNS3_RXD_GRO_SIZE_S 16 +#define HNS3_RXD_GRO_SIZE_M (0x3ff << HNS3_RXD_GRO_SIZE_S) #define HNS3_TXD_L3T_S 0 #define HNS3_TXD_L3T_M (0x3 << HNS3_TXD_L3T_S) @@ -194,6 +200,8 @@ enum hns3_nic_state { #define HNS3_VECTOR_RL_OFFSET 0x900 #define HNS3_VECTOR_RL_EN_B 6 +#define HNS3_RING_EN_B 0 + enum hns3_pkt_l3t_type { HNS3_L3T_NONE, HNS3_L3T_IPV6, @@ -399,11 +407,19 @@ struct hns3_enet_ring { */ int next_to_clean; + int pull_len; /* head length for current packet */ + u32 frag_num; + unsigned char *va; /* first buffer address for current packet */ + u32 flag; /* ring attribute */ int irq_init_flag; int numa_node; cpumask_t affinity_mask; + + int pending_buf; + struct sk_buff *skb; + struct sk_buff *tail_skb; }; struct hns_queue; @@ -460,8 +476,6 @@ enum hns3_link_mode_bits { #define HNS3_INT_RL_MAX 0x00EC #define HNS3_INT_RL_ENABLE_MASK 0x40 -#define HNS3_INT_ADAPT_DOWN_START 100 - struct hns3_enet_coalesce { u16 int_gl; u8 gl_adapt_enable; @@ -496,8 +510,6 @@ struct hns3_enet_tqp_vector { char name[HNAE3_INT_NAME_LEN]; - /* when 0 should adjust interrupt coalesce parameter */ - u8 int_adapt_down; unsigned long last_jiffies; } ____cacheline_internodealigned_in_smp; @@ -577,6 +589,11 @@ static inline int is_ring_empty(struct hns3_enet_ring *ring) return ring->next_to_use == ring->next_to_clean; } +static inline u32 hns3_read_reg(void __iomem *base, u32 reg) +{ + return readl(base + reg); +} + static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) { u8 __iomem *reg_addr = READ_ONCE(base); @@ -586,7 +603,21 @@ static inline void hns3_write_reg(void __iomem *base, u32 reg, u32 value) static inline bool hns3_dev_ongoing_func_reset(struct hnae3_ae_dev *ae_dev) { - return (ae_dev && (ae_dev->reset_type == HNAE3_FUNC_RESET)); + return (ae_dev && (ae_dev->reset_type == HNAE3_FUNC_RESET || + ae_dev->reset_type == HNAE3_FLR_RESET || + ae_dev->reset_type == HNAE3_VF_FUNC_RESET || + ae_dev->reset_type == HNAE3_VF_FULL_RESET || + ae_dev->reset_type == HNAE3_VF_PF_FUNC_RESET)); +} + +#define hns3_read_dev(a, reg) \ + hns3_read_reg((a)->io_base, (reg)) + +static inline bool hns3_nic_resetting(struct net_device *netdev) +{ + struct hns3_nic_priv *priv = netdev_priv(netdev); + + return test_bit(HNS3_NIC_STATE_RESETTING, &priv->state); } #define hns3_write_dev(a, reg, value) \ @@ -648,4 +679,8 @@ void hns3_dcbnl_setup(struct hnae3_handle *handle); static inline void hns3_dcbnl_setup(struct hnae3_handle *handle) {} #endif +void hns3_dbg_init(struct hnae3_handle *handle); +void hns3_dbg_uninit(struct hnae3_handle *handle); +void hns3_dbg_register_debugfs(const char *debugfs_dir_name); +void hns3_dbg_unregister_debugfs(void); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c index a4762c2b8ba1..e678b6939da3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_ethtool.c @@ -291,6 +291,11 @@ static void hns3_self_test(struct net_device *ndev, int test_index = 0; u32 i; + if (hns3_nic_resetting(ndev)) { + netdev_err(ndev, "dev resetting!"); + return; + } + /* Only do offline selftest, or pass by default */ if (eth_test->flags != ETH_TEST_FL_OFFLINE) return; @@ -530,6 +535,11 @@ static void hns3_get_ringparam(struct net_device *netdev, struct hnae3_handle *h = priv->ae_handle; int queue_num = h->kinfo.num_tqps; + if (hns3_nic_resetting(netdev)) { + netdev_err(netdev, "dev resetting!"); + return; + } + param->tx_max_pending = HNS3_RING_MAX_PENDING; param->rx_max_pending = HNS3_RING_MAX_PENDING; @@ -760,6 +770,9 @@ static int hns3_set_ringparam(struct net_device *ndev, u32 old_desc_num, new_desc_num; int ret; + if (hns3_nic_resetting(ndev)) + return -EBUSY; + if (param->rx_mini_pending || param->rx_jumbo_pending) return -EINVAL; @@ -808,7 +821,7 @@ static int hns3_set_ringparam(struct net_device *ndev, } if (if_running) - ret = dev_open(ndev); + ret = dev_open(ndev, NULL); return ret; } @@ -872,6 +885,9 @@ static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue, struct hnae3_handle *h = priv->ae_handle; u16 queue_num = h->kinfo.num_tqps; + if (hns3_nic_resetting(netdev)) + return -EBUSY; + if (queue >= queue_num) { netdev_err(netdev, "Invalid queue value %d! Queue max id=%d\n", @@ -1033,6 +1049,9 @@ static int hns3_set_coalesce(struct net_device *netdev, int ret; int i; + if (hns3_nic_resetting(netdev)) + return -EBUSY; + ret = hns3_check_coalesce_para(netdev, cmd); if (ret) return ret; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile index 580e81743681..fffe8c1c45d3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/Makefile @@ -6,6 +6,6 @@ ccflags-y := -Idrivers/net/ethernet/hisilicon/hns3 obj-$(CONFIG_HNS3_HCLGE) += hclge.o -hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o +hclge-objs = hclge_main.o hclge_cmd.o hclge_mdio.o hclge_tm.o hclge_mbx.o hclge_err.o hclge_debugfs.o hclge-$(CONFIG_HNS3_DCB) += hclge_dcb.o diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c index 690f62ed87dc..8af0cef5609b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.c @@ -350,11 +350,20 @@ int hclge_cmd_init(struct hclge_dev *hdev) hdev->hw.cmq.crq.next_to_use = 0; hclge_cmd_init_regs(&hdev->hw); - clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); spin_unlock_bh(&hdev->hw.cmq.crq.lock); spin_unlock_bh(&hdev->hw.cmq.csq.lock); + clear_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + + /* Check if there is new reset pending, because the higher level + * reset may happen when lower level reset is being processed. + */ + if ((hclge_is_reset_pending(hdev))) { + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + return -EBUSY; + } + ret = hclge_cmd_query_firmware_version(&hdev->hw, &version); if (ret) { dev_err(&hdev->pdev->dev, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h index 872cd4bdd70d..f23042b24c09 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_cmd.h @@ -86,11 +86,24 @@ enum hclge_opcode_type { HCLGE_OPC_QUERY_REG_NUM = 0x0040, HCLGE_OPC_QUERY_32_BIT_REG = 0x0041, HCLGE_OPC_QUERY_64_BIT_REG = 0x0042, + HCLGE_OPC_DFX_BD_NUM = 0x0043, + HCLGE_OPC_DFX_BIOS_COMMON_REG = 0x0044, + HCLGE_OPC_DFX_SSU_REG_0 = 0x0045, + HCLGE_OPC_DFX_SSU_REG_1 = 0x0046, + HCLGE_OPC_DFX_IGU_EGU_REG = 0x0047, + HCLGE_OPC_DFX_RPU_REG_0 = 0x0048, + HCLGE_OPC_DFX_RPU_REG_1 = 0x0049, + HCLGE_OPC_DFX_NCSI_REG = 0x004A, + HCLGE_OPC_DFX_RTC_REG = 0x004B, + HCLGE_OPC_DFX_PPP_REG = 0x004C, + HCLGE_OPC_DFX_RCB_REG = 0x004D, + HCLGE_OPC_DFX_TQP_REG = 0x004E, + HCLGE_OPC_DFX_SSU_REG_2 = 0x004F, + HCLGE_OPC_DFX_QUERY_CHIP_CAP = 0x0050, /* MAC command */ HCLGE_OPC_CONFIG_MAC_MODE = 0x0301, HCLGE_OPC_CONFIG_AN_MODE = 0x0304, - HCLGE_OPC_QUERY_AN_RESULT = 0x0306, HCLGE_OPC_QUERY_LINK_STATUS = 0x0307, HCLGE_OPC_CONFIG_MAX_FRM_SIZE = 0x0308, HCLGE_OPC_CONFIG_SPEED_DUP = 0x0309, @@ -126,6 +139,16 @@ enum hclge_opcode_type { HCLGE_OPC_TM_PRI_SCH_MODE_CFG = 0x0813, HCLGE_OPC_TM_QS_SCH_MODE_CFG = 0x0814, HCLGE_OPC_TM_BP_TO_QSET_MAPPING = 0x0815, + HCLGE_OPC_ETS_TC_WEIGHT = 0x0843, + HCLGE_OPC_QSET_DFX_STS = 0x0844, + HCLGE_OPC_PRI_DFX_STS = 0x0845, + HCLGE_OPC_PG_DFX_STS = 0x0846, + HCLGE_OPC_PORT_DFX_STS = 0x0847, + HCLGE_OPC_SCH_NQ_CNT = 0x0848, + HCLGE_OPC_SCH_RQ_CNT = 0x0849, + HCLGE_OPC_TM_INTERNAL_STS = 0x0850, + HCLGE_OPC_TM_INTERNAL_CNT = 0x0851, + HCLGE_OPC_TM_INTERNAL_STS_1 = 0x0852, /* Packet buffer allocate commands */ HCLGE_OPC_TX_BUFF_ALLOC = 0x0901, @@ -142,6 +165,7 @@ enum hclge_opcode_type { HCLGE_OPC_CFG_TX_QUEUE = 0x0B01, HCLGE_OPC_QUERY_TX_POINTER = 0x0B02, HCLGE_OPC_QUERY_TX_STATUS = 0x0B03, + HCLGE_OPC_TQP_TX_QUEUE_TC = 0x0B04, HCLGE_OPC_CFG_RX_QUEUE = 0x0B11, HCLGE_OPC_QUERY_RX_POINTER = 0x0B12, HCLGE_OPC_QUERY_RX_STATUS = 0x0B13, @@ -152,6 +176,7 @@ enum hclge_opcode_type { /* TSO command */ HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01, + HCLGE_OPC_GRO_GENERIC_CONFIG = 0x0C10, /* RSS commands */ HCLGE_OPC_RSS_GENERIC_CONFIG = 0x0D01, @@ -210,27 +235,34 @@ enum hclge_opcode_type { /* Led command */ HCLGE_OPC_LED_STATUS_CFG = 0xB000, + /* SFP command */ + HCLGE_OPC_SFP_GET_SPEED = 0x7104, + /* Error INT commands */ + HCLGE_MAC_COMMON_INT_EN = 0x030E, HCLGE_TM_SCH_ECC_INT_EN = 0x0829, - HCLGE_TM_SCH_ECC_ERR_RINT_CMD = 0x082d, - HCLGE_TM_SCH_ECC_ERR_RINT_CE = 0x082f, - HCLGE_TM_SCH_ECC_ERR_RINT_NFE = 0x0830, - HCLGE_TM_SCH_ECC_ERR_RINT_FE = 0x0831, - HCLGE_TM_SCH_MBIT_ECC_INFO_CMD = 0x0833, + HCLGE_SSU_ECC_INT_CMD = 0x0989, + HCLGE_SSU_COMMON_INT_CMD = 0x098C, + HCLGE_PPU_MPF_ECC_INT_CMD = 0x0B40, + HCLGE_PPU_MPF_OTHER_INT_CMD = 0x0B41, + HCLGE_PPU_PF_OTHER_INT_CMD = 0x0B42, HCLGE_COMMON_ECC_INT_CFG = 0x1505, - HCLGE_IGU_EGU_TNL_INT_QUERY = 0x1802, + HCLGE_QUERY_RAS_INT_STS_BD_NUM = 0x1510, + HCLGE_QUERY_CLEAR_MPF_RAS_INT = 0x1511, + HCLGE_QUERY_CLEAR_PF_RAS_INT = 0x1512, + HCLGE_QUERY_MSIX_INT_STS_BD_NUM = 0x1513, + HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT = 0x1514, + HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT = 0x1515, + HCLGE_CONFIG_ROCEE_RAS_INT_EN = 0x1580, + HCLGE_QUERY_CLEAR_ROCEE_RAS_INT = 0x1581, + HCLGE_ROCEE_PF_RAS_INT_CMD = 0x1584, HCLGE_IGU_EGU_TNL_INT_EN = 0x1803, - HCLGE_IGU_EGU_TNL_INT_CLR = 0x1804, - HCLGE_IGU_COMMON_INT_QUERY = 0x1805, HCLGE_IGU_COMMON_INT_EN = 0x1806, - HCLGE_IGU_COMMON_INT_CLR = 0x1807, HCLGE_TM_QCN_MEM_INT_CFG = 0x1A14, - HCLGE_TM_QCN_MEM_INT_INFO_CMD = 0x1A17, HCLGE_PPP_CMD0_INT_CMD = 0x2100, HCLGE_PPP_CMD1_INT_CMD = 0x2101, - HCLGE_NCSI_INT_QUERY = 0x2400, + HCLGE_MAC_ETHERTYPE_IDX_RD = 0x2105, HCLGE_NCSI_INT_EN = 0x2401, - HCLGE_NCSI_INT_CLR = 0x2402, }; #define HCLGE_TQP_REG_OFFSET 0x80000 @@ -388,7 +420,9 @@ struct hclge_pf_res_cmd { #define HCLGE_PF_VEC_NUM_M GENMASK(7, 0) __le16 pf_intr_vector_number; __le16 pf_own_fun_number; - __le32 rsv[3]; + __le16 tx_buf_size; + __le16 dv_buf_size; + __le32 rsv[2]; }; #define HCLGE_CFG_OFFSET_S 0 @@ -542,20 +576,6 @@ struct hclge_config_mac_speed_dup_cmd { u8 rsv[22]; }; -#define HCLGE_QUERY_SPEED_S 3 -#define HCLGE_QUERY_AN_B 0 -#define HCLGE_QUERY_DUPLEX_B 2 - -#define HCLGE_QUERY_SPEED_M GENMASK(4, 0) -#define HCLGE_QUERY_AN_M BIT(HCLGE_QUERY_AN_B) -#define HCLGE_QUERY_DUPLEX_M BIT(HCLGE_QUERY_DUPLEX_B) - -struct hclge_query_an_speed_dup_cmd { - u8 an_syn_dup_speed; - u8 pause; - u8 rsv[23]; -}; - #define HCLGE_RING_ID_MASK GENMASK(9, 0) #define HCLGE_TQP_ENABLE_B 0 @@ -572,6 +592,11 @@ struct hclge_config_auto_neg_cmd { u8 rsv[20]; }; +struct hclge_sfp_speed_cmd { + __le32 sfp_speed; + u32 rsv[5]; +}; + #define HCLGE_MAC_UPLINK_PORT 0x100 struct hclge_config_max_frm_size_cmd { @@ -746,6 +771,24 @@ struct hclge_cfg_tx_queue_pointer_cmd { u8 rsv[14]; }; +#pragma pack(1) +struct hclge_mac_ethertype_idx_rd_cmd { + u8 flags; + u8 resp_code; + __le16 vlan_tag; + u8 mac_add[6]; + __le16 index; + __le16 ethter_type; + __le16 egress_port; + __le16 egress_queue; + __le16 rev0; + u8 i_port_bitmap; + u8 i_port_direction; + u8 rev1[2]; +}; + +#pragma pack() + #define HCLGE_TSO_MSS_MIN_S 0 #define HCLGE_TSO_MSS_MIN_M GENMASK(13, 0) @@ -758,6 +801,12 @@ struct hclge_cfg_tso_status_cmd { u8 rsv[20]; }; +#define HCLGE_GRO_EN_B 0 +struct hclge_cfg_gro_status_cmd { + __le16 gro_en; + u8 rsv[22]; +}; + #define HCLGE_TSO_MSS_MIN 256 #define HCLGE_TSO_MSS_MAX 9668 @@ -792,6 +841,7 @@ struct hclge_serdes_lb_cmd { #define HCLGE_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */ #define HCLGE_DEFAULT_DV 0xA000 /* 40k byte */ #define HCLGE_DEFAULT_NON_DCB_DV 0x7800 /* 30K byte */ +#define HCLGE_NON_DCB_ADDITIONAL_BUF 0x200 /* 512 byte */ #define HCLGE_TYPE_CRQ 0 #define HCLGE_TYPE_CSQ 1 diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c index e72f724123d7..f6323b2501dc 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c @@ -35,7 +35,9 @@ static int hclge_ieee_ets_to_tm_info(struct hclge_dev *hdev, } } - return hclge_tm_prio_tc_info_update(hdev, ets->prio_tc); + hclge_tm_prio_tc_info_update(hdev, ets->prio_tc); + + return 0; } static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev, @@ -70,25 +72,61 @@ static int hclge_ieee_getets(struct hnae3_handle *h, struct ieee_ets *ets) return 0; } +static int hclge_dcb_common_validate(struct hclge_dev *hdev, u8 num_tc, + u8 *prio_tc) +{ + int i; + + if (num_tc > hdev->tc_max) { + dev_err(&hdev->pdev->dev, + "tc num checking failed, %u > tc_max(%u)\n", + num_tc, hdev->tc_max); + return -EINVAL; + } + + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { + if (prio_tc[i] >= num_tc) { + dev_err(&hdev->pdev->dev, + "prio_tc[%u] checking failed, %u >= num_tc(%u)\n", + i, prio_tc[i], num_tc); + return -EINVAL; + } + } + + for (i = 0; i < hdev->num_alloc_vport; i++) { + if (num_tc > hdev->vport[i].alloc_tqps) { + dev_err(&hdev->pdev->dev, + "allocated tqp(%u) checking failed, %u > tqp(%u)\n", + i, num_tc, hdev->vport[i].alloc_tqps); + return -EINVAL; + } + } + + return 0; +} + static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets, u8 *tc, bool *changed) { bool has_ets_tc = false; u32 total_ets_bw = 0; u8 max_tc = 0; + int ret; u8 i; - for (i = 0; i < HNAE3_MAX_TC; i++) { - if (ets->prio_tc[i] >= hdev->tc_max || - i >= hdev->tc_max) - return -EINVAL; - + for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { if (ets->prio_tc[i] != hdev->tm_info.prio_tc[i]) *changed = true; if (ets->prio_tc[i] > max_tc) max_tc = ets->prio_tc[i]; + } + ret = hclge_dcb_common_validate(hdev, max_tc + 1, ets->prio_tc); + if (ret) + return ret; + + for (i = 0; i < HNAE3_MAX_TC; i++) { switch (ets->tc_tsa[i]) { case IEEE_8021QAZ_TSA_STRICT: if (hdev->tm_info.tc_info[i].tc_sch_mode != @@ -184,9 +222,7 @@ static int hclge_ieee_setets(struct hnae3_handle *h, struct ieee_ets *ets) if (ret) return ret; - ret = hclge_tm_schd_info_update(hdev, num_tc); - if (ret) - return ret; + hclge_tm_schd_info_update(hdev, num_tc); ret = hclge_ieee_ets_to_tm_info(hdev, ets); if (ret) @@ -305,20 +341,12 @@ static int hclge_setup_tc(struct hnae3_handle *h, u8 tc, u8 *prio_tc) if (hdev->flag & HCLGE_FLAG_DCB_ENABLE) return -EINVAL; - if (tc > hdev->tc_max) { - dev_err(&hdev->pdev->dev, - "setup tc failed, tc(%u) > tc_max(%u)\n", - tc, hdev->tc_max); - return -EINVAL; - } - - ret = hclge_tm_schd_info_update(hdev, tc); + ret = hclge_dcb_common_validate(hdev, tc, prio_tc); if (ret) - return ret; + return -EINVAL; - ret = hclge_tm_prio_tc_info_update(hdev, prio_tc); - if (ret) - return ret; + hclge_tm_schd_info_update(hdev, tc); + hclge_tm_prio_tc_info_update(hdev, prio_tc); ret = hclge_tm_init_hw(hdev); if (ret) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c new file mode 100644 index 000000000000..26d80504c730 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -0,0 +1,933 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2018-2019 Hisilicon Limited. */ + +#include <linux/device.h> + +#include "hclge_debugfs.h" +#include "hclge_cmd.h" +#include "hclge_main.h" +#include "hclge_tm.h" +#include "hnae3.h" + +static int hclge_dbg_get_dfx_bd_num(struct hclge_dev *hdev, int offset) +{ + struct hclge_desc desc[4]; + int ret; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_DFX_BD_NUM, true); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_DFX_BD_NUM, true); + desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_DFX_BD_NUM, true); + desc[2].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[3], HCLGE_OPC_DFX_BD_NUM, true); + + ret = hclge_cmd_send(&hdev->hw, desc, 4); + if (ret != HCLGE_CMD_EXEC_SUCCESS) { + dev_err(&hdev->pdev->dev, + "get dfx bdnum fail, status is %d.\n", ret); + return ret; + } + + return (int)desc[offset / 6].data[offset % 6]; +} + +static int hclge_dbg_cmd_send(struct hclge_dev *hdev, + struct hclge_desc *desc_src, + int index, int bd_num, + enum hclge_opcode_type cmd) +{ + struct hclge_desc *desc = desc_src; + int ret, i; + + hclge_cmd_setup_basic_desc(desc, cmd, true); + desc->data[0] = cpu_to_le32(index); + + for (i = 1; i < bd_num; i++) { + desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + desc++; + hclge_cmd_setup_basic_desc(desc, cmd, true); + } + + ret = hclge_cmd_send(&hdev->hw, desc_src, bd_num); + if (ret) { + dev_err(&hdev->pdev->dev, + "read reg cmd send fail, status is %d.\n", ret); + return ret; + } + + return ret; +} + +static void hclge_dbg_dump_reg_common(struct hclge_dev *hdev, + struct hclge_dbg_dfx_message *dfx_message, + char *cmd_buf, int msg_num, int offset, + enum hclge_opcode_type cmd) +{ + struct hclge_desc *desc_src; + struct hclge_desc *desc; + int bd_num, buf_len; + int ret, i; + int index; + int max; + + ret = kstrtouint(cmd_buf, 10, &index); + index = (ret != 0) ? 0 : index; + + bd_num = hclge_dbg_get_dfx_bd_num(hdev, offset); + if (bd_num <= 0) + return; + + buf_len = sizeof(struct hclge_desc) * bd_num; + desc_src = kzalloc(buf_len, GFP_KERNEL); + if (!desc_src) { + dev_err(&hdev->pdev->dev, "call kzalloc failed\n"); + return; + } + + desc = desc_src; + ret = hclge_dbg_cmd_send(hdev, desc, index, bd_num, cmd); + if (ret != HCLGE_CMD_EXEC_SUCCESS) { + kfree(desc_src); + return; + } + + max = (bd_num * 6) <= msg_num ? (bd_num * 6) : msg_num; + + desc = desc_src; + for (i = 0; i < max; i++) { + (((i / 6) > 0) && ((i % 6) == 0)) ? desc++ : desc; + if (dfx_message->flag) + dev_info(&hdev->pdev->dev, "%s: 0x%x\n", + dfx_message->message, desc->data[i % 6]); + + dfx_message++; + } + + kfree(desc_src); +} + +static void hclge_dbg_dump_dcb(struct hclge_dev *hdev, char *cmd_buf) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_dbg_bitmap_cmd *bitmap; + int rq_id, pri_id, qset_id; + int port_id, nq_id, pg_id; + struct hclge_desc desc[2]; + + int cnt, ret; + + cnt = sscanf(cmd_buf, "%i %i %i %i %i %i", + &port_id, &pri_id, &pg_id, &rq_id, &nq_id, &qset_id); + if (cnt != 6) { + dev_err(&hdev->pdev->dev, + "dump dcb: bad command parameter, cnt=%d\n", cnt); + return; + } + + ret = hclge_dbg_cmd_send(hdev, desc, qset_id, 1, + HCLGE_OPC_QSET_DFX_STS); + if (ret) + return; + + bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1]; + dev_info(dev, "roce_qset_mask: 0x%x\n", bitmap->bit0); + dev_info(dev, "nic_qs_mask: 0x%x\n", bitmap->bit1); + dev_info(dev, "qs_shaping_pass: 0x%x\n", bitmap->bit2); + dev_info(dev, "qs_bp_sts: 0x%x\n", bitmap->bit3); + + ret = hclge_dbg_cmd_send(hdev, desc, pri_id, 1, HCLGE_OPC_PRI_DFX_STS); + if (ret) + return; + + bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1]; + dev_info(dev, "pri_mask: 0x%x\n", bitmap->bit0); + dev_info(dev, "pri_cshaping_pass: 0x%x\n", bitmap->bit1); + dev_info(dev, "pri_pshaping_pass: 0x%x\n", bitmap->bit2); + + ret = hclge_dbg_cmd_send(hdev, desc, pg_id, 1, HCLGE_OPC_PG_DFX_STS); + if (ret) + return; + + bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1]; + dev_info(dev, "pg_mask: 0x%x\n", bitmap->bit0); + dev_info(dev, "pg_cshaping_pass: 0x%x\n", bitmap->bit1); + dev_info(dev, "pg_pshaping_pass: 0x%x\n", bitmap->bit2); + + ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, + HCLGE_OPC_PORT_DFX_STS); + if (ret) + return; + + bitmap = (struct hclge_dbg_bitmap_cmd *)&desc[0].data[1]; + dev_info(dev, "port_mask: 0x%x\n", bitmap->bit0); + dev_info(dev, "port_shaping_pass: 0x%x\n", bitmap->bit1); + + ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, HCLGE_OPC_SCH_NQ_CNT); + if (ret) + return; + + dev_info(dev, "sch_nq_cnt: 0x%x\n", desc[0].data[1]); + + ret = hclge_dbg_cmd_send(hdev, desc, nq_id, 1, HCLGE_OPC_SCH_RQ_CNT); + if (ret) + return; + + dev_info(dev, "sch_rq_cnt: 0x%x\n", desc[0].data[1]); + + ret = hclge_dbg_cmd_send(hdev, desc, 0, 2, HCLGE_OPC_TM_INTERNAL_STS); + if (ret) + return; + + dev_info(dev, "pri_bp: 0x%x\n", desc[0].data[1]); + dev_info(dev, "fifo_dfx_info: 0x%x\n", desc[0].data[2]); + dev_info(dev, "sch_roce_fifo_afull_gap: 0x%x\n", desc[0].data[3]); + dev_info(dev, "tx_private_waterline: 0x%x\n", desc[0].data[4]); + dev_info(dev, "tm_bypass_en: 0x%x\n", desc[0].data[5]); + dev_info(dev, "SSU_TM_BYPASS_EN: 0x%x\n", desc[1].data[0]); + dev_info(dev, "SSU_RESERVE_CFG: 0x%x\n", desc[1].data[1]); + + ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, + HCLGE_OPC_TM_INTERNAL_CNT); + if (ret) + return; + + dev_info(dev, "SCH_NIC_NUM: 0x%x\n", desc[0].data[1]); + dev_info(dev, "SCH_ROCE_NUM: 0x%x\n", desc[0].data[2]); + + ret = hclge_dbg_cmd_send(hdev, desc, port_id, 1, + HCLGE_OPC_TM_INTERNAL_STS_1); + if (ret) + return; + + dev_info(dev, "TC_MAP_SEL: 0x%x\n", desc[0].data[1]); + dev_info(dev, "IGU_PFC_PRI_EN: 0x%x\n", desc[0].data[2]); + dev_info(dev, "MAC_PFC_PRI_EN: 0x%x\n", desc[0].data[3]); + dev_info(dev, "IGU_PRI_MAP_TC_CFG: 0x%x\n", desc[0].data[4]); + dev_info(dev, "IGU_TX_PRI_MAP_TC_CFG: 0x%x\n", desc[0].data[5]); +} + +static void hclge_dbg_dump_reg_cmd(struct hclge_dev *hdev, char *cmd_buf) +{ + int msg_num; + + if (strncmp(&cmd_buf[9], "bios common", 11) == 0) { + msg_num = sizeof(hclge_dbg_bios_common_reg) / + sizeof(struct hclge_dbg_dfx_message); + hclge_dbg_dump_reg_common(hdev, hclge_dbg_bios_common_reg, + &cmd_buf[21], msg_num, + HCLGE_DBG_DFX_BIOS_OFFSET, + HCLGE_OPC_DFX_BIOS_COMMON_REG); + } else if (strncmp(&cmd_buf[9], "ssu", 3) == 0) { + msg_num = sizeof(hclge_dbg_ssu_reg_0) / + sizeof(struct hclge_dbg_dfx_message); + hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_0, + &cmd_buf[13], msg_num, + HCLGE_DBG_DFX_SSU_0_OFFSET, + HCLGE_OPC_DFX_SSU_REG_0); + + msg_num = sizeof(hclge_dbg_ssu_reg_1) / + sizeof(struct hclge_dbg_dfx_message); + hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_1, + &cmd_buf[13], msg_num, + HCLGE_DBG_DFX_SSU_1_OFFSET, + HCLGE_OPC_DFX_SSU_REG_1); + + msg_num = sizeof(hclge_dbg_ssu_reg_2) / + sizeof(struct hclge_dbg_dfx_message); + hclge_dbg_dump_reg_common(hdev, hclge_dbg_ssu_reg_2, + &cmd_buf[13], msg_num, + HCLGE_DBG_DFX_SSU_2_OFFSET, + HCLGE_OPC_DFX_SSU_REG_2); + } else if (strncmp(&cmd_buf[9], "igu egu", 7) == 0) { + msg_num = sizeof(hclge_dbg_igu_egu_reg) / + sizeof(struct hclge_dbg_dfx_message); + hclge_dbg_dump_reg_common(hdev, hclge_dbg_igu_egu_reg, + &cmd_buf[17], msg_num, + HCLGE_DBG_DFX_IGU_OFFSET, + HCLGE_OPC_DFX_IGU_EGU_REG); + } else if (strncmp(&cmd_buf[9], "rpu", 3) == 0) { + msg_num = sizeof(hclge_dbg_rpu_reg_0) / + sizeof(struct hclge_dbg_dfx_message); + hclge_dbg_dump_reg_common(hdev, hclge_dbg_rpu_reg_0, + &cmd_buf[13], msg_num, + HCLGE_DBG_DFX_RPU_0_OFFSET, + HCLGE_OPC_DFX_RPU_REG_0); + + msg_num = sizeof(hclge_dbg_rpu_reg_1) / + sizeof(struct hclge_dbg_dfx_message); + hclge_dbg_dump_reg_common(hdev, hclge_dbg_rpu_reg_1, + &cmd_buf[13], msg_num, + HCLGE_DBG_DFX_RPU_1_OFFSET, + HCLGE_OPC_DFX_RPU_REG_1); + } else if (strncmp(&cmd_buf[9], "ncsi", 4) == 0) { + msg_num = sizeof(hclge_dbg_ncsi_reg) / + sizeof(struct hclge_dbg_dfx_message); + hclge_dbg_dump_reg_common(hdev, hclge_dbg_ncsi_reg, + &cmd_buf[14], msg_num, + HCLGE_DBG_DFX_NCSI_OFFSET, + HCLGE_OPC_DFX_NCSI_REG); + } else if (strncmp(&cmd_buf[9], "rtc", 3) == 0) { + msg_num = sizeof(hclge_dbg_rtc_reg) / + sizeof(struct hclge_dbg_dfx_message); + hclge_dbg_dump_reg_common(hdev, hclge_dbg_rtc_reg, + &cmd_buf[13], msg_num, + HCLGE_DBG_DFX_RTC_OFFSET, + HCLGE_OPC_DFX_RTC_REG); + } else if (strncmp(&cmd_buf[9], "ppp", 3) == 0) { + msg_num = sizeof(hclge_dbg_ppp_reg) / + sizeof(struct hclge_dbg_dfx_message); + hclge_dbg_dump_reg_common(hdev, hclge_dbg_ppp_reg, + &cmd_buf[13], msg_num, + HCLGE_DBG_DFX_PPP_OFFSET, + HCLGE_OPC_DFX_PPP_REG); + } else if (strncmp(&cmd_buf[9], "rcb", 3) == 0) { + msg_num = sizeof(hclge_dbg_rcb_reg) / + sizeof(struct hclge_dbg_dfx_message); + hclge_dbg_dump_reg_common(hdev, hclge_dbg_rcb_reg, + &cmd_buf[13], msg_num, + HCLGE_DBG_DFX_RCB_OFFSET, + HCLGE_OPC_DFX_RCB_REG); + } else if (strncmp(&cmd_buf[9], "tqp", 3) == 0) { + msg_num = sizeof(hclge_dbg_tqp_reg) / + sizeof(struct hclge_dbg_dfx_message); + hclge_dbg_dump_reg_common(hdev, hclge_dbg_tqp_reg, + &cmd_buf[13], msg_num, + HCLGE_DBG_DFX_TQP_OFFSET, + HCLGE_OPC_DFX_TQP_REG); + } else if (strncmp(&cmd_buf[9], "dcb", 3) == 0) { + hclge_dbg_dump_dcb(hdev, &cmd_buf[13]); + } else { + dev_info(&hdev->pdev->dev, "unknown command\n"); + return; + } +} + +static void hclge_title_idx_print(struct hclge_dev *hdev, bool flag, int index, + char *title_buf, char *true_buf, + char *false_buf) +{ + if (flag) + dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index, + true_buf); + else + dev_info(&hdev->pdev->dev, "%s(%d): %s\n", title_buf, index, + false_buf); +} + +static void hclge_dbg_dump_tc(struct hclge_dev *hdev) +{ + struct hclge_ets_tc_weight_cmd *ets_weight; + struct hclge_desc desc; + int i, ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_ETS_TC_WEIGHT, true); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "dump tc fail, status is %d.\n", ret); + return; + } + + ets_weight = (struct hclge_ets_tc_weight_cmd *)desc.data; + + dev_info(&hdev->pdev->dev, "dump tc\n"); + dev_info(&hdev->pdev->dev, "weight_offset: %u\n", + ets_weight->weight_offset); + + for (i = 0; i < HNAE3_MAX_TC; i++) + hclge_title_idx_print(hdev, ets_weight->tc_weight[i], i, + "tc", "no sp mode", "sp mode"); +} + +static void hclge_dbg_dump_tm_pg(struct hclge_dev *hdev) +{ + struct hclge_port_shapping_cmd *port_shap_cfg_cmd; + struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd; + struct hclge_pg_shapping_cmd *pg_shap_cfg_cmd; + enum hclge_opcode_type cmd; + struct hclge_desc desc; + int ret; + + cmd = HCLGE_OPC_TM_PG_C_SHAPPING; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_pg_cmd_send; + + pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; + dev_info(&hdev->pdev->dev, "PG_C pg_id: %u\n", pg_shap_cfg_cmd->pg_id); + dev_info(&hdev->pdev->dev, "PG_C pg_shapping: 0x%x\n", + pg_shap_cfg_cmd->pg_shapping_para); + + cmd = HCLGE_OPC_TM_PG_P_SHAPPING; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_pg_cmd_send; + + pg_shap_cfg_cmd = (struct hclge_pg_shapping_cmd *)desc.data; + dev_info(&hdev->pdev->dev, "PG_P pg_id: %u\n", pg_shap_cfg_cmd->pg_id); + dev_info(&hdev->pdev->dev, "PG_P pg_shapping: 0x%x\n", + pg_shap_cfg_cmd->pg_shapping_para); + + cmd = HCLGE_OPC_TM_PORT_SHAPPING; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_pg_cmd_send; + + port_shap_cfg_cmd = (struct hclge_port_shapping_cmd *)desc.data; + dev_info(&hdev->pdev->dev, "PORT port_shapping: 0x%x\n", + port_shap_cfg_cmd->port_shapping_para); + + cmd = HCLGE_OPC_TM_PG_SCH_MODE_CFG; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_pg_cmd_send; + + dev_info(&hdev->pdev->dev, "PG_SCH pg_id: %u\n", desc.data[0]); + + cmd = HCLGE_OPC_TM_PRI_SCH_MODE_CFG; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_pg_cmd_send; + + dev_info(&hdev->pdev->dev, "PRI_SCH pg_id: %u\n", desc.data[0]); + + cmd = HCLGE_OPC_TM_QS_SCH_MODE_CFG; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_pg_cmd_send; + + dev_info(&hdev->pdev->dev, "QS_SCH pg_id: %u\n", desc.data[0]); + + cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_pg_cmd_send; + + bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data; + dev_info(&hdev->pdev->dev, "BP_TO_QSET pg_id: %u\n", + bp_to_qs_map_cmd->tc_id); + dev_info(&hdev->pdev->dev, "BP_TO_QSET pg_shapping: 0x%x\n", + bp_to_qs_map_cmd->qs_group_id); + dev_info(&hdev->pdev->dev, "BP_TO_QSET qs_bit_map: 0x%x\n", + bp_to_qs_map_cmd->qs_bit_map); + return; + +err_tm_pg_cmd_send: + dev_err(&hdev->pdev->dev, "dump tm_pg fail(0x%x), status is %d\n", + cmd, ret); +} + +static void hclge_dbg_dump_tm(struct hclge_dev *hdev) +{ + struct hclge_priority_weight_cmd *priority_weight; + struct hclge_pg_to_pri_link_cmd *pg_to_pri_map; + struct hclge_qs_to_pri_link_cmd *qs_to_pri_map; + struct hclge_nq_to_qs_link_cmd *nq_to_qs_map; + struct hclge_pri_shapping_cmd *shap_cfg_cmd; + struct hclge_pg_weight_cmd *pg_weight; + struct hclge_qs_weight_cmd *qs_weight; + enum hclge_opcode_type cmd; + struct hclge_desc desc; + int ret; + + cmd = HCLGE_OPC_TM_PG_TO_PRI_LINK; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_cmd_send; + + pg_to_pri_map = (struct hclge_pg_to_pri_link_cmd *)desc.data; + dev_info(&hdev->pdev->dev, "dump tm\n"); + dev_info(&hdev->pdev->dev, "PG_TO_PRI gp_id: %u\n", + pg_to_pri_map->pg_id); + dev_info(&hdev->pdev->dev, "PG_TO_PRI map: 0x%x\n", + pg_to_pri_map->pri_bit_map); + + cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_cmd_send; + + qs_to_pri_map = (struct hclge_qs_to_pri_link_cmd *)desc.data; + dev_info(&hdev->pdev->dev, "QS_TO_PRI qs_id: %u\n", + qs_to_pri_map->qs_id); + dev_info(&hdev->pdev->dev, "QS_TO_PRI priority: %u\n", + qs_to_pri_map->priority); + dev_info(&hdev->pdev->dev, "QS_TO_PRI link_vld: %u\n", + qs_to_pri_map->link_vld); + + cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_cmd_send; + + nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data; + dev_info(&hdev->pdev->dev, "NQ_TO_QS nq_id: %u\n", nq_to_qs_map->nq_id); + dev_info(&hdev->pdev->dev, "NQ_TO_QS qset_id: %u\n", + nq_to_qs_map->qset_id); + + cmd = HCLGE_OPC_TM_PG_WEIGHT; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_cmd_send; + + pg_weight = (struct hclge_pg_weight_cmd *)desc.data; + dev_info(&hdev->pdev->dev, "PG pg_id: %u\n", pg_weight->pg_id); + dev_info(&hdev->pdev->dev, "PG dwrr: %u\n", pg_weight->dwrr); + + cmd = HCLGE_OPC_TM_QS_WEIGHT; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_cmd_send; + + qs_weight = (struct hclge_qs_weight_cmd *)desc.data; + dev_info(&hdev->pdev->dev, "QS qs_id: %u\n", qs_weight->qs_id); + dev_info(&hdev->pdev->dev, "QS dwrr: %u\n", qs_weight->dwrr); + + cmd = HCLGE_OPC_TM_PRI_WEIGHT; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_cmd_send; + + priority_weight = (struct hclge_priority_weight_cmd *)desc.data; + dev_info(&hdev->pdev->dev, "PRI pri_id: %u\n", priority_weight->pri_id); + dev_info(&hdev->pdev->dev, "PRI dwrr: %u\n", priority_weight->dwrr); + + cmd = HCLGE_OPC_TM_PRI_C_SHAPPING; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_cmd_send; + + shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; + dev_info(&hdev->pdev->dev, "PRI_C pri_id: %u\n", shap_cfg_cmd->pri_id); + dev_info(&hdev->pdev->dev, "PRI_C pri_shapping: 0x%x\n", + shap_cfg_cmd->pri_shapping_para); + + cmd = HCLGE_OPC_TM_PRI_P_SHAPPING; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_cmd_send; + + shap_cfg_cmd = (struct hclge_pri_shapping_cmd *)desc.data; + dev_info(&hdev->pdev->dev, "PRI_P pri_id: %u\n", shap_cfg_cmd->pri_id); + dev_info(&hdev->pdev->dev, "PRI_P pri_shapping: 0x%x\n", + shap_cfg_cmd->pri_shapping_para); + + hclge_dbg_dump_tm_pg(hdev); + + return; + +err_tm_cmd_send: + dev_err(&hdev->pdev->dev, "dump tm fail(0x%x), status is %d\n", + cmd, ret); +} + +static void hclge_dbg_dump_tm_map(struct hclge_dev *hdev, char *cmd_buf) +{ + struct hclge_bp_to_qs_map_cmd *bp_to_qs_map_cmd; + struct hclge_nq_to_qs_link_cmd *nq_to_qs_map; + struct hclge_qs_to_pri_link_cmd *map; + struct hclge_tqp_tx_queue_tc_cmd *tc; + enum hclge_opcode_type cmd; + struct hclge_desc desc; + int queue_id, group_id; + u32 qset_maping[32]; + int tc_id, qset_id; + int pri_id, ret; + u32 i; + + ret = kstrtouint(&cmd_buf[12], 10, &queue_id); + queue_id = (ret != 0) ? 0 : queue_id; + + cmd = HCLGE_OPC_TM_NQ_TO_QS_LINK; + nq_to_qs_map = (struct hclge_nq_to_qs_link_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + nq_to_qs_map->nq_id = cpu_to_le16(queue_id); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_map_cmd_send; + qset_id = nq_to_qs_map->qset_id & 0x3FF; + + cmd = HCLGE_OPC_TM_QS_TO_PRI_LINK; + map = (struct hclge_qs_to_pri_link_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + map->qs_id = cpu_to_le16(qset_id); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_map_cmd_send; + pri_id = map->priority; + + cmd = HCLGE_OPC_TQP_TX_QUEUE_TC; + tc = (struct hclge_tqp_tx_queue_tc_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, cmd, true); + tc->queue_id = cpu_to_le16(queue_id); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_map_cmd_send; + tc_id = tc->tc_id & 0x7; + + dev_info(&hdev->pdev->dev, "queue_id | qset_id | pri_id | tc_id\n"); + dev_info(&hdev->pdev->dev, "%04d | %04d | %02d | %02d\n", + queue_id, qset_id, pri_id, tc_id); + + cmd = HCLGE_OPC_TM_BP_TO_QSET_MAPPING; + bp_to_qs_map_cmd = (struct hclge_bp_to_qs_map_cmd *)desc.data; + for (group_id = 0; group_id < 32; group_id++) { + hclge_cmd_setup_basic_desc(&desc, cmd, true); + bp_to_qs_map_cmd->tc_id = tc_id; + bp_to_qs_map_cmd->qs_group_id = group_id; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + goto err_tm_map_cmd_send; + + qset_maping[group_id] = bp_to_qs_map_cmd->qs_bit_map; + } + + dev_info(&hdev->pdev->dev, "index | tm bp qset maping:\n"); + + i = 0; + for (group_id = 0; group_id < 4; group_id++) { + dev_info(&hdev->pdev->dev, + "%04d | %08x:%08x:%08x:%08x:%08x:%08x:%08x:%08x\n", + group_id * 256, qset_maping[(u32)(i + 7)], + qset_maping[(u32)(i + 6)], qset_maping[(u32)(i + 5)], + qset_maping[(u32)(i + 4)], qset_maping[(u32)(i + 3)], + qset_maping[(u32)(i + 2)], qset_maping[(u32)(i + 1)], + qset_maping[i]); + i += 8; + } + + return; + +err_tm_map_cmd_send: + dev_err(&hdev->pdev->dev, "dump tqp map fail(0x%x), status is %d\n", + cmd, ret); +} + +static void hclge_dbg_dump_qos_pause_cfg(struct hclge_dev *hdev) +{ + struct hclge_cfg_pause_param_cmd *pause_param; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CFG_MAC_PARA, true); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, "dump checksum fail, status is %d.\n", + ret); + return; + } + + pause_param = (struct hclge_cfg_pause_param_cmd *)desc.data; + dev_info(&hdev->pdev->dev, "dump qos pause cfg\n"); + dev_info(&hdev->pdev->dev, "pause_trans_gap: 0x%x\n", + pause_param->pause_trans_gap); + dev_info(&hdev->pdev->dev, "pause_trans_time: 0x%x\n", + pause_param->pause_trans_time); +} + +static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev) +{ + struct hclge_qos_pri_map_cmd *pri_map; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_PRI_TO_TC_MAPPING, true); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "dump qos pri map fail, status is %d.\n", ret); + return; + } + + pri_map = (struct hclge_qos_pri_map_cmd *)desc.data; + dev_info(&hdev->pdev->dev, "dump qos pri map\n"); + dev_info(&hdev->pdev->dev, "vlan_to_pri: 0x%x\n", pri_map->vlan_pri); + dev_info(&hdev->pdev->dev, "pri_0_to_tc: 0x%x\n", pri_map->pri0_tc); + dev_info(&hdev->pdev->dev, "pri_1_to_tc: 0x%x\n", pri_map->pri1_tc); + dev_info(&hdev->pdev->dev, "pri_2_to_tc: 0x%x\n", pri_map->pri2_tc); + dev_info(&hdev->pdev->dev, "pri_3_to_tc: 0x%x\n", pri_map->pri3_tc); + dev_info(&hdev->pdev->dev, "pri_4_to_tc: 0x%x\n", pri_map->pri4_tc); + dev_info(&hdev->pdev->dev, "pri_5_to_tc: 0x%x\n", pri_map->pri5_tc); + dev_info(&hdev->pdev->dev, "pri_6_to_tc: 0x%x\n", pri_map->pri6_tc); + dev_info(&hdev->pdev->dev, "pri_7_to_tc: 0x%x\n", pri_map->pri7_tc); +} + +static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) +{ + struct hclge_tx_buff_alloc_cmd *tx_buf_cmd; + struct hclge_rx_priv_buff_cmd *rx_buf_cmd; + struct hclge_rx_priv_wl_buf *rx_priv_wl; + struct hclge_rx_com_wl *rx_packet_cnt; + struct hclge_rx_com_thrd *rx_com_thrd; + struct hclge_rx_com_wl *rx_com_wl; + enum hclge_opcode_type cmd; + struct hclge_desc desc[2]; + int i, ret; + + cmd = HCLGE_OPC_TX_BUFF_ALLOC; + hclge_cmd_setup_basic_desc(desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, desc, 1); + if (ret) + goto err_qos_cmd_send; + + dev_info(&hdev->pdev->dev, "dump qos buf cfg\n"); + + tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data; + for (i = 0; i < HCLGE_TC_NUM; i++) + dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i, + tx_buf_cmd->tx_pkt_buff[i]); + + cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC; + hclge_cmd_setup_basic_desc(desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, desc, 1); + if (ret) + goto err_qos_cmd_send; + + dev_info(&hdev->pdev->dev, "\n"); + rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data; + for (i = 0; i < HCLGE_TC_NUM; i++) + dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i, + rx_buf_cmd->buf_num[i]); + + dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n", + rx_buf_cmd->shared_buf); + + cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC; + hclge_cmd_setup_basic_desc(&desc[0], cmd, true); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], cmd, true); + ret = hclge_cmd_send(&hdev->hw, desc, 2); + if (ret) + goto err_qos_cmd_send; + + dev_info(&hdev->pdev->dev, "\n"); + rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data; + for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) + dev_info(&hdev->pdev->dev, + "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i, + rx_priv_wl->tc_wl[i].high, rx_priv_wl->tc_wl[i].low); + + rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[1].data; + for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) + dev_info(&hdev->pdev->dev, + "rx_priv_wl_tc_%d: high: 0x%x, low: 0x%x\n", i + 4, + rx_priv_wl->tc_wl[i].high, rx_priv_wl->tc_wl[i].low); + + cmd = HCLGE_OPC_RX_COM_THRD_ALLOC; + hclge_cmd_setup_basic_desc(&desc[0], cmd, true); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], cmd, true); + ret = hclge_cmd_send(&hdev->hw, desc, 2); + if (ret) + goto err_qos_cmd_send; + + dev_info(&hdev->pdev->dev, "\n"); + rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data; + for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) + dev_info(&hdev->pdev->dev, + "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i, + rx_com_thrd->com_thrd[i].high, + rx_com_thrd->com_thrd[i].low); + + rx_com_thrd = (struct hclge_rx_com_thrd *)desc[1].data; + for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) + dev_info(&hdev->pdev->dev, + "rx_com_thrd_tc_%d: high: 0x%x, low: 0x%x\n", i + 4, + rx_com_thrd->com_thrd[i].high, + rx_com_thrd->com_thrd[i].low); + + cmd = HCLGE_OPC_RX_COM_WL_ALLOC; + hclge_cmd_setup_basic_desc(desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, desc, 1); + if (ret) + goto err_qos_cmd_send; + + rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data; + dev_info(&hdev->pdev->dev, "\n"); + dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n", + rx_com_wl->com_wl.high, rx_com_wl->com_wl.low); + + cmd = HCLGE_OPC_RX_GBL_PKT_CNT; + hclge_cmd_setup_basic_desc(desc, cmd, true); + ret = hclge_cmd_send(&hdev->hw, desc, 1); + if (ret) + goto err_qos_cmd_send; + + rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data; + dev_info(&hdev->pdev->dev, + "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n", + rx_packet_cnt->com_wl.high, rx_packet_cnt->com_wl.low); + + return; + +err_qos_cmd_send: + dev_err(&hdev->pdev->dev, + "dump qos buf cfg fail(0x%x), status is %d\n", cmd, ret); +} + +static void hclge_dbg_dump_mng_table(struct hclge_dev *hdev) +{ + struct hclge_mac_ethertype_idx_rd_cmd *req0; + char printf_buf[HCLGE_DBG_BUF_LEN]; + struct hclge_desc desc; + int ret, i; + + dev_info(&hdev->pdev->dev, "mng tab:\n"); + memset(printf_buf, 0, HCLGE_DBG_BUF_LEN); + strncat(printf_buf, + "entry|mac_addr |mask|ether|mask|vlan|mask", + HCLGE_DBG_BUF_LEN - 1); + strncat(printf_buf + strlen(printf_buf), + "|i_map|i_dir|e_type|pf_id|vf_id|q_id|drop\n", + HCLGE_DBG_BUF_LEN - strlen(printf_buf) - 1); + + dev_info(&hdev->pdev->dev, "%s", printf_buf); + + for (i = 0; i < HCLGE_DBG_MNG_TBL_MAX; i++) { + hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_ETHERTYPE_IDX_RD, + true); + req0 = (struct hclge_mac_ethertype_idx_rd_cmd *)&desc.data; + req0->index = cpu_to_le16(i); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) { + dev_err(&hdev->pdev->dev, + "call hclge_cmd_send fail, ret = %d\n", ret); + return; + } + + if (!req0->resp_code) + continue; + + memset(printf_buf, 0, HCLGE_DBG_BUF_LEN); + snprintf(printf_buf, HCLGE_DBG_BUF_LEN, + "%02u |%02x:%02x:%02x:%02x:%02x:%02x|", + req0->index, req0->mac_add[0], req0->mac_add[1], + req0->mac_add[2], req0->mac_add[3], req0->mac_add[4], + req0->mac_add[5]); + + snprintf(printf_buf + strlen(printf_buf), + HCLGE_DBG_BUF_LEN - strlen(printf_buf), + "%x |%04x |%x |%04x|%x |%02x |%02x |", + !!(req0->flags & HCLGE_DBG_MNG_MAC_MASK_B), + req0->ethter_type, + !!(req0->flags & HCLGE_DBG_MNG_ETHER_MASK_B), + req0->vlan_tag & HCLGE_DBG_MNG_VLAN_TAG, + !!(req0->flags & HCLGE_DBG_MNG_VLAN_MASK_B), + req0->i_port_bitmap, req0->i_port_direction); + + snprintf(printf_buf + strlen(printf_buf), + HCLGE_DBG_BUF_LEN - strlen(printf_buf), + "%d |%d |%02d |%04d|%x\n", + !!(req0->egress_port & HCLGE_DBG_MNG_E_TYPE_B), + req0->egress_port & HCLGE_DBG_MNG_PF_ID, + (req0->egress_port >> 3) & HCLGE_DBG_MNG_VF_ID, + req0->egress_queue, + !!(req0->egress_port & HCLGE_DBG_MNG_DROP_B)); + + dev_info(&hdev->pdev->dev, "%s", printf_buf); + } +} + +static void hclge_dbg_fd_tcam_read(struct hclge_dev *hdev, u8 stage, + bool sel_x, u32 loc) +{ + struct hclge_fd_tcam_config_1_cmd *req1; + struct hclge_fd_tcam_config_2_cmd *req2; + struct hclge_fd_tcam_config_3_cmd *req3; + struct hclge_desc desc[3]; + int ret, i; + u32 *req; + + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_FD_TCAM_OP, true); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_FD_TCAM_OP, true); + desc[1].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[2], HCLGE_OPC_FD_TCAM_OP, true); + + req1 = (struct hclge_fd_tcam_config_1_cmd *)desc[0].data; + req2 = (struct hclge_fd_tcam_config_2_cmd *)desc[1].data; + req3 = (struct hclge_fd_tcam_config_3_cmd *)desc[2].data; + + req1->stage = stage; + req1->xy_sel = sel_x ? 1 : 0; + req1->index = cpu_to_le32(loc); + + ret = hclge_cmd_send(&hdev->hw, desc, 3); + if (ret) + return; + + dev_info(&hdev->pdev->dev, " read result tcam key %s(%u):\n", + sel_x ? "x" : "y", loc); + + req = (u32 *)req1->tcam_data; + for (i = 0; i < 2; i++) + dev_info(&hdev->pdev->dev, "%08x\n", *req++); + + req = (u32 *)req2->tcam_data; + for (i = 0; i < 6; i++) + dev_info(&hdev->pdev->dev, "%08x\n", *req++); + + req = (u32 *)req3->tcam_data; + for (i = 0; i < 5; i++) + dev_info(&hdev->pdev->dev, "%08x\n", *req++); +} + +static void hclge_dbg_fd_tcam(struct hclge_dev *hdev) +{ + u32 i; + + for (i = 0; i < hdev->fd_cfg.rule_num[0]; i++) { + hclge_dbg_fd_tcam_read(hdev, 0, true, i); + hclge_dbg_fd_tcam_read(hdev, 0, false, i); + } +} + +int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + if (strncmp(cmd_buf, "dump fd tcam", 12) == 0) { + hclge_dbg_fd_tcam(hdev); + } else if (strncmp(cmd_buf, "dump tc", 7) == 0) { + hclge_dbg_dump_tc(hdev); + } else if (strncmp(cmd_buf, "dump tm map", 11) == 0) { + hclge_dbg_dump_tm_map(hdev, cmd_buf); + } else if (strncmp(cmd_buf, "dump tm", 7) == 0) { + hclge_dbg_dump_tm(hdev); + } else if (strncmp(cmd_buf, "dump qos pause cfg", 18) == 0) { + hclge_dbg_dump_qos_pause_cfg(hdev); + } else if (strncmp(cmd_buf, "dump qos pri map", 16) == 0) { + hclge_dbg_dump_qos_pri_map(hdev); + } else if (strncmp(cmd_buf, "dump qos buf cfg", 16) == 0) { + hclge_dbg_dump_qos_buf_cfg(hdev); + } else if (strncmp(cmd_buf, "dump mng tbl", 12) == 0) { + hclge_dbg_dump_mng_table(hdev); + } else if (strncmp(cmd_buf, "dump reg", 8) == 0) { + hclge_dbg_dump_reg_cmd(hdev, cmd_buf); + } else { + dev_info(&hdev->pdev->dev, "unknown command\n"); + return -EINVAL; + } + + return 0; +} diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h new file mode 100644 index 000000000000..d055fda41775 --- /dev/null +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.h @@ -0,0 +1,713 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2018-2019 Hisilicon Limited. */ + +#ifndef __HCLGE_DEBUGFS_H +#define __HCLGE_DEBUGFS_H + +#define HCLGE_DBG_BUF_LEN 256 +#define HCLGE_DBG_MNG_TBL_MAX 64 + +#define HCLGE_DBG_MNG_VLAN_MASK_B BIT(0) +#define HCLGE_DBG_MNG_MAC_MASK_B BIT(1) +#define HCLGE_DBG_MNG_ETHER_MASK_B BIT(2) +#define HCLGE_DBG_MNG_E_TYPE_B BIT(11) +#define HCLGE_DBG_MNG_DROP_B BIT(13) +#define HCLGE_DBG_MNG_VLAN_TAG 0x0FFF +#define HCLGE_DBG_MNG_PF_ID 0x0007 +#define HCLGE_DBG_MNG_VF_ID 0x00FF + +/* Get DFX BD number offset */ +#define HCLGE_DBG_DFX_BIOS_OFFSET 1 +#define HCLGE_DBG_DFX_SSU_0_OFFSET 2 +#define HCLGE_DBG_DFX_SSU_1_OFFSET 3 +#define HCLGE_DBG_DFX_IGU_OFFSET 4 +#define HCLGE_DBG_DFX_RPU_0_OFFSET 5 + +#define HCLGE_DBG_DFX_RPU_1_OFFSET 6 +#define HCLGE_DBG_DFX_NCSI_OFFSET 7 +#define HCLGE_DBG_DFX_RTC_OFFSET 8 +#define HCLGE_DBG_DFX_PPP_OFFSET 9 +#define HCLGE_DBG_DFX_RCB_OFFSET 10 +#define HCLGE_DBG_DFX_TQP_OFFSET 11 + +#define HCLGE_DBG_DFX_SSU_2_OFFSET 12 + +#pragma pack(1) + +struct hclge_qos_pri_map_cmd { + u8 pri0_tc : 4, + pri1_tc : 4; + u8 pri2_tc : 4, + pri3_tc : 4; + u8 pri4_tc : 4, + pri5_tc : 4; + u8 pri6_tc : 4, + pri7_tc : 4; + u8 vlan_pri : 4, + rev : 4; +}; + +struct hclge_dbg_bitmap_cmd { + union { + u8 bitmap; + struct { + u8 bit0 : 1, + bit1 : 1, + bit2 : 1, + bit3 : 1, + bit4 : 1, + bit5 : 1, + bit6 : 1, + bit7 : 1; + }; + }; +}; + +struct hclge_dbg_dfx_message { + int flag; + char message[60]; +}; + +#pragma pack() + +static struct hclge_dbg_dfx_message hclge_dbg_bios_common_reg[] = { + {false, "Reserved"}, + {true, "BP_CPU_STATE"}, + {true, "DFX_MSIX_INFO_NIC_0"}, + {true, "DFX_MSIX_INFO_NIC_1"}, + {true, "DFX_MSIX_INFO_NIC_2"}, + {true, "DFX_MSIX_INFO_NIC_3"}, + + {true, "DFX_MSIX_INFO_ROC_0"}, + {true, "DFX_MSIX_INFO_ROC_1"}, + {true, "DFX_MSIX_INFO_ROC_2"}, + {true, "DFX_MSIX_INFO_ROC_3"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_0[] = { + {false, "Reserved"}, + {true, "SSU_ETS_PORT_STATUS"}, + {true, "SSU_ETS_TCG_STATUS"}, + {false, "Reserved"}, + {false, "Reserved"}, + {true, "SSU_BP_STATUS_0"}, + + {true, "SSU_BP_STATUS_1"}, + {true, "SSU_BP_STATUS_2"}, + {true, "SSU_BP_STATUS_3"}, + {true, "SSU_BP_STATUS_4"}, + {true, "SSU_BP_STATUS_5"}, + {true, "SSU_MAC_TX_PFC_IND"}, + + {true, "MAC_SSU_RX_PFC_IND"}, + {true, "BTMP_AGEING_ST_B0"}, + {true, "BTMP_AGEING_ST_B1"}, + {true, "BTMP_AGEING_ST_B2"}, + {false, "Reserved"}, + {false, "Reserved"}, + + {true, "FULL_DROP_NUM"}, + {true, "PART_DROP_NUM"}, + {true, "PPP_KEY_DROP_NUM"}, + {true, "PPP_RLT_DROP_NUM"}, + {true, "LO_PRI_UNICAST_RLT_DROP_NUM"}, + {true, "HI_PRI_MULTICAST_RLT_DROP_NUM"}, + + {true, "LO_PRI_MULTICAST_RLT_DROP_NUM"}, + {true, "NCSI_PACKET_CURR_BUFFER_CNT"}, + {true, "BTMP_AGEING_RLS_CNT_BANK0"}, + {true, "BTMP_AGEING_RLS_CNT_BANK1"}, + {true, "BTMP_AGEING_RLS_CNT_BANK2"}, + {true, "SSU_MB_RD_RLT_DROP_CNT"}, + + {true, "SSU_PPP_MAC_KEY_NUM_L"}, + {true, "SSU_PPP_MAC_KEY_NUM_H"}, + {true, "SSU_PPP_HOST_KEY_NUM_L"}, + {true, "SSU_PPP_HOST_KEY_NUM_H"}, + {true, "PPP_SSU_MAC_RLT_NUM_L"}, + {true, "PPP_SSU_MAC_RLT_NUM_H"}, + + {true, "PPP_SSU_HOST_RLT_NUM_L"}, + {true, "PPP_SSU_HOST_RLT_NUM_H"}, + {true, "NCSI_RX_PACKET_IN_CNT_L"}, + {true, "NCSI_RX_PACKET_IN_CNT_H"}, + {true, "NCSI_TX_PACKET_OUT_CNT_L"}, + {true, "NCSI_TX_PACKET_OUT_CNT_H"}, + + {true, "SSU_KEY_DROP_NUM"}, + {true, "MB_UNCOPY_NUM"}, + {true, "RX_OQ_DROP_PKT_CNT"}, + {true, "TX_OQ_DROP_PKT_CNT"}, + {true, "BANK_UNBALANCE_DROP_CNT"}, + {true, "BANK_UNBALANCE_RX_DROP_CNT"}, + + {true, "NIC_L2_ERR_DROP_PKT_CNT"}, + {true, "ROC_L2_ERR_DROP_PKT_CNT"}, + {true, "NIC_L2_ERR_DROP_PKT_CNT_RX"}, + {true, "ROC_L2_ERR_DROP_PKT_CNT_RX"}, + {true, "RX_OQ_GLB_DROP_PKT_CNT"}, + {false, "Reserved"}, + + {true, "LO_PRI_UNICAST_CUR_CNT"}, + {true, "HI_PRI_MULTICAST_CUR_CNT"}, + {true, "LO_PRI_MULTICAST_CUR_CNT"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_1[] = { + {true, "prt_id"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_0"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_1"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_2"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_3"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_4"}, + + {true, "PACKET_TC_CURR_BUFFER_CNT_5"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_6"}, + {true, "PACKET_TC_CURR_BUFFER_CNT_7"}, + {true, "PACKET_CURR_BUFFER_CNT"}, + {false, "Reserved"}, + {false, "Reserved"}, + + {true, "RX_PACKET_IN_CNT_L"}, + {true, "RX_PACKET_IN_CNT_H"}, + {true, "RX_PACKET_OUT_CNT_L"}, + {true, "RX_PACKET_OUT_CNT_H"}, + {true, "TX_PACKET_IN_CNT_L"}, + {true, "TX_PACKET_IN_CNT_H"}, + + {true, "TX_PACKET_OUT_CNT_L"}, + {true, "TX_PACKET_OUT_CNT_H"}, + {true, "ROC_RX_PACKET_IN_CNT_L"}, + {true, "ROC_RX_PACKET_IN_CNT_H"}, + {true, "ROC_TX_PACKET_OUT_CNT_L"}, + {true, "ROC_TX_PACKET_OUT_CNT_H"}, + + {true, "RX_PACKET_TC_IN_CNT_0_L"}, + {true, "RX_PACKET_TC_IN_CNT_0_H"}, + {true, "RX_PACKET_TC_IN_CNT_1_L"}, + {true, "RX_PACKET_TC_IN_CNT_1_H"}, + {true, "RX_PACKET_TC_IN_CNT_2_L"}, + {true, "RX_PACKET_TC_IN_CNT_2_H"}, + + {true, "RX_PACKET_TC_IN_CNT_3_L"}, + {true, "RX_PACKET_TC_IN_CNT_3_H"}, + {true, "RX_PACKET_TC_IN_CNT_4_L"}, + {true, "RX_PACKET_TC_IN_CNT_4_H"}, + {true, "RX_PACKET_TC_IN_CNT_5_L"}, + {true, "RX_PACKET_TC_IN_CNT_5_H"}, + + {true, "RX_PACKET_TC_IN_CNT_6_L"}, + {true, "RX_PACKET_TC_IN_CNT_6_H"}, + {true, "RX_PACKET_TC_IN_CNT_7_L"}, + {true, "RX_PACKET_TC_IN_CNT_7_H"}, + {true, "RX_PACKET_TC_OUT_CNT_0_L"}, + {true, "RX_PACKET_TC_OUT_CNT_0_H"}, + + {true, "RX_PACKET_TC_OUT_CNT_1_L"}, + {true, "RX_PACKET_TC_OUT_CNT_1_H"}, + {true, "RX_PACKET_TC_OUT_CNT_2_L"}, + {true, "RX_PACKET_TC_OUT_CNT_2_H"}, + {true, "RX_PACKET_TC_OUT_CNT_3_L"}, + {true, "RX_PACKET_TC_OUT_CNT_3_H"}, + + {true, "RX_PACKET_TC_OUT_CNT_4_L"}, + {true, "RX_PACKET_TC_OUT_CNT_4_H"}, + {true, "RX_PACKET_TC_OUT_CNT_5_L"}, + {true, "RX_PACKET_TC_OUT_CNT_5_H"}, + {true, "RX_PACKET_TC_OUT_CNT_6_L"}, + {true, "RX_PACKET_TC_OUT_CNT_6_H"}, + + {true, "RX_PACKET_TC_OUT_CNT_7_L"}, + {true, "RX_PACKET_TC_OUT_CNT_7_H"}, + {true, "TX_PACKET_TC_IN_CNT_0_L"}, + {true, "TX_PACKET_TC_IN_CNT_0_H"}, + {true, "TX_PACKET_TC_IN_CNT_1_L"}, + {true, "TX_PACKET_TC_IN_CNT_1_H"}, + + {true, "TX_PACKET_TC_IN_CNT_2_L"}, + {true, "TX_PACKET_TC_IN_CNT_2_H"}, + {true, "TX_PACKET_TC_IN_CNT_3_L"}, + {true, "TX_PACKET_TC_IN_CNT_3_H"}, + {true, "TX_PACKET_TC_IN_CNT_4_L"}, + {true, "TX_PACKET_TC_IN_CNT_4_H"}, + + {true, "TX_PACKET_TC_IN_CNT_5_L"}, + {true, "TX_PACKET_TC_IN_CNT_5_H"}, + {true, "TX_PACKET_TC_IN_CNT_6_L"}, + {true, "TX_PACKET_TC_IN_CNT_6_H"}, + {true, "TX_PACKET_TC_IN_CNT_7_L"}, + {true, "TX_PACKET_TC_IN_CNT_7_H"}, + + {true, "TX_PACKET_TC_OUT_CNT_0_L"}, + {true, "TX_PACKET_TC_OUT_CNT_0_H"}, + {true, "TX_PACKET_TC_OUT_CNT_1_L"}, + {true, "TX_PACKET_TC_OUT_CNT_1_H"}, + {true, "TX_PACKET_TC_OUT_CNT_2_L"}, + {true, "TX_PACKET_TC_OUT_CNT_2_H"}, + + {true, "TX_PACKET_TC_OUT_CNT_3_L"}, + {true, "TX_PACKET_TC_OUT_CNT_3_H"}, + {true, "TX_PACKET_TC_OUT_CNT_4_L"}, + {true, "TX_PACKET_TC_OUT_CNT_4_H"}, + {true, "TX_PACKET_TC_OUT_CNT_5_L"}, + {true, "TX_PACKET_TC_OUT_CNT_5_H"}, + + {true, "TX_PACKET_TC_OUT_CNT_6_L"}, + {true, "TX_PACKET_TC_OUT_CNT_6_H"}, + {true, "TX_PACKET_TC_OUT_CNT_7_L"}, + {true, "TX_PACKET_TC_OUT_CNT_7_H"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static struct hclge_dbg_dfx_message hclge_dbg_ssu_reg_2[] = { + {true, "OQ_INDEX"}, + {true, "QUEUE_CNT"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static struct hclge_dbg_dfx_message hclge_dbg_igu_egu_reg[] = { + {true, "prt_id"}, + {true, "IGU_RX_ERR_PKT"}, + {true, "IGU_RX_NO_SOF_PKT"}, + {true, "EGU_TX_1588_SHORT_PKT"}, + {true, "EGU_TX_1588_PKT"}, + {true, "EGU_TX_ERR_PKT"}, + + {true, "IGU_RX_OUT_L2_PKT"}, + {true, "IGU_RX_OUT_L3_PKT"}, + {true, "IGU_RX_OUT_L4_PKT"}, + {true, "IGU_RX_IN_L2_PKT"}, + {true, "IGU_RX_IN_L3_PKT"}, + {true, "IGU_RX_IN_L4_PKT"}, + + {true, "IGU_RX_EL3E_PKT"}, + {true, "IGU_RX_EL4E_PKT"}, + {true, "IGU_RX_L3E_PKT"}, + {true, "IGU_RX_L4E_PKT"}, + {true, "IGU_RX_ROCEE_PKT"}, + {true, "IGU_RX_OUT_UDP0_PKT"}, + + {true, "IGU_RX_IN_UDP0_PKT"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + + {true, "IGU_RX_OVERSIZE_PKT_L"}, + {true, "IGU_RX_OVERSIZE_PKT_H"}, + {true, "IGU_RX_UNDERSIZE_PKT_L"}, + {true, "IGU_RX_UNDERSIZE_PKT_H"}, + {true, "IGU_RX_OUT_ALL_PKT_L"}, + {true, "IGU_RX_OUT_ALL_PKT_H"}, + + {true, "IGU_TX_OUT_ALL_PKT_L"}, + {true, "IGU_TX_OUT_ALL_PKT_H"}, + {true, "IGU_RX_UNI_PKT_L"}, + {true, "IGU_RX_UNI_PKT_H"}, + {true, "IGU_RX_MULTI_PKT_L"}, + {true, "IGU_RX_MULTI_PKT_H"}, + + {true, "IGU_RX_BROAD_PKT_L"}, + {true, "IGU_RX_BROAD_PKT_H"}, + {true, "EGU_TX_OUT_ALL_PKT_L"}, + {true, "EGU_TX_OUT_ALL_PKT_H"}, + {true, "EGU_TX_UNI_PKT_L"}, + {true, "EGU_TX_UNI_PKT_H"}, + + {true, "EGU_TX_MULTI_PKT_L"}, + {true, "EGU_TX_MULTI_PKT_H"}, + {true, "EGU_TX_BROAD_PKT_L"}, + {true, "EGU_TX_BROAD_PKT_H"}, + {true, "IGU_TX_KEY_NUM_L"}, + {true, "IGU_TX_KEY_NUM_H"}, + + {true, "IGU_RX_NON_TUN_PKT_L"}, + {true, "IGU_RX_NON_TUN_PKT_H"}, + {true, "IGU_RX_TUN_PKT_L"}, + {true, "IGU_RX_TUN_PKT_H"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_0[] = { + {true, "tc_queue_num"}, + {true, "FSM_DFX_ST0"}, + {true, "FSM_DFX_ST1"}, + {true, "RPU_RX_PKT_DROP_CNT"}, + {true, "BUF_WAIT_TIMEOUT"}, + {true, "BUF_WAIT_TIMEOUT_QID"}, +}; + +static struct hclge_dbg_dfx_message hclge_dbg_rpu_reg_1[] = { + {false, "Reserved"}, + {true, "FIFO_DFX_ST0"}, + {true, "FIFO_DFX_ST1"}, + {true, "FIFO_DFX_ST2"}, + {true, "FIFO_DFX_ST3"}, + {true, "FIFO_DFX_ST4"}, + + {true, "FIFO_DFX_ST5"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static struct hclge_dbg_dfx_message hclge_dbg_ncsi_reg[] = { + {false, "Reserved"}, + {true, "NCSI_EGU_TX_FIFO_STS"}, + {true, "NCSI_PAUSE_STATUS"}, + {true, "NCSI_RX_CTRL_DMAC_ERR_CNT"}, + {true, "NCSI_RX_CTRL_SMAC_ERR_CNT"}, + {true, "NCSI_RX_CTRL_CKS_ERR_CNT"}, + + {true, "NCSI_RX_CTRL_PKT_CNT"}, + {true, "NCSI_RX_PT_DMAC_ERR_CNT"}, + {true, "NCSI_RX_PT_SMAC_ERR_CNT"}, + {true, "NCSI_RX_PT_PKT_CNT"}, + {true, "NCSI_RX_FCS_ERR_CNT"}, + {true, "NCSI_TX_CTRL_DMAC_ERR_CNT"}, + + {true, "NCSI_TX_CTRL_SMAC_ERR_CNT"}, + {true, "NCSI_TX_CTRL_PKT_CNT"}, + {true, "NCSI_TX_PT_DMAC_ERR_CNT"}, + {true, "NCSI_TX_PT_SMAC_ERR_CNT"}, + {true, "NCSI_TX_PT_PKT_CNT"}, + {true, "NCSI_TX_PT_PKT_TRUNC_CNT"}, + + {true, "NCSI_TX_PT_PKT_ERR_CNT"}, + {true, "NCSI_TX_CTRL_PKT_ERR_CNT"}, + {true, "NCSI_RX_CTRL_PKT_TRUNC_CNT"}, + {true, "NCSI_RX_CTRL_PKT_CFLIT_CNT"}, + {false, "Reserved"}, + {false, "Reserved"}, + + {true, "NCSI_MAC_RX_OCTETS_OK"}, + {true, "NCSI_MAC_RX_OCTETS_BAD"}, + {true, "NCSI_MAC_RX_UC_PKTS"}, + {true, "NCSI_MAC_RX_MC_PKTS"}, + {true, "NCSI_MAC_RX_BC_PKTS"}, + {true, "NCSI_MAC_RX_PKTS_64OCTETS"}, + + {true, "NCSI_MAC_RX_PKTS_65TO127OCTETS"}, + {true, "NCSI_MAC_RX_PKTS_128TO255OCTETS"}, + {true, "NCSI_MAC_RX_PKTS_255TO511OCTETS"}, + {true, "NCSI_MAC_RX_PKTS_512TO1023OCTETS"}, + {true, "NCSI_MAC_RX_PKTS_1024TO1518OCTETS"}, + {true, "NCSI_MAC_RX_PKTS_1519TOMAXOCTETS"}, + + {true, "NCSI_MAC_RX_FCS_ERRORS"}, + {true, "NCSI_MAC_RX_LONG_ERRORS"}, + {true, "NCSI_MAC_RX_JABBER_ERRORS"}, + {true, "NCSI_MAC_RX_RUNT_ERR_CNT"}, + {true, "NCSI_MAC_RX_SHORT_ERR_CNT"}, + {true, "NCSI_MAC_RX_FILT_PKT_CNT"}, + + {true, "NCSI_MAC_RX_OCTETS_TOTAL_FILT"}, + {true, "NCSI_MAC_TX_OCTETS_OK"}, + {true, "NCSI_MAC_TX_OCTETS_BAD"}, + {true, "NCSI_MAC_TX_UC_PKTS"}, + {true, "NCSI_MAC_TX_MC_PKTS"}, + {true, "NCSI_MAC_TX_BC_PKTS"}, + + {true, "NCSI_MAC_TX_PKTS_64OCTETS"}, + {true, "NCSI_MAC_TX_PKTS_65TO127OCTETS"}, + {true, "NCSI_MAC_TX_PKTS_128TO255OCTETS"}, + {true, "NCSI_MAC_TX_PKTS_256TO511OCTETS"}, + {true, "NCSI_MAC_TX_PKTS_512TO1023OCTETS"}, + {true, "NCSI_MAC_TX_PKTS_1024TO1518OCTETS"}, + + {true, "NCSI_MAC_TX_PKTS_1519TOMAXOCTETS"}, + {true, "NCSI_MAC_TX_UNDERRUN"}, + {true, "NCSI_MAC_TX_CRC_ERROR"}, + {true, "NCSI_MAC_TX_PAUSE_FRAMES"}, + {true, "NCSI_MAC_RX_PAD_PKTS"}, + {true, "NCSI_MAC_RX_PAUSE_FRAMES"}, +}; + +static struct hclge_dbg_dfx_message hclge_dbg_rtc_reg[] = { + {false, "Reserved"}, + {true, "LGE_IGU_AFIFO_DFX_0"}, + {true, "LGE_IGU_AFIFO_DFX_1"}, + {true, "LGE_IGU_AFIFO_DFX_2"}, + {true, "LGE_IGU_AFIFO_DFX_3"}, + {true, "LGE_IGU_AFIFO_DFX_4"}, + + {true, "LGE_IGU_AFIFO_DFX_5"}, + {true, "LGE_IGU_AFIFO_DFX_6"}, + {true, "LGE_IGU_AFIFO_DFX_7"}, + {true, "LGE_EGU_AFIFO_DFX_0"}, + {true, "LGE_EGU_AFIFO_DFX_1"}, + {true, "LGE_EGU_AFIFO_DFX_2"}, + + {true, "LGE_EGU_AFIFO_DFX_3"}, + {true, "LGE_EGU_AFIFO_DFX_4"}, + {true, "LGE_EGU_AFIFO_DFX_5"}, + {true, "LGE_EGU_AFIFO_DFX_6"}, + {true, "LGE_EGU_AFIFO_DFX_7"}, + {true, "CGE_IGU_AFIFO_DFX_0"}, + + {true, "CGE_IGU_AFIFO_DFX_1"}, + {true, "CGE_EGU_AFIFO_DFX_0"}, + {true, "CGE_EGU_AFIFO_DFX_1"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static struct hclge_dbg_dfx_message hclge_dbg_ppp_reg[] = { + {false, "Reserved"}, + {true, "DROP_FROM_PRT_PKT_CNT"}, + {true, "DROP_FROM_HOST_PKT_CNT"}, + {true, "DROP_TX_VLAN_PROC_CNT"}, + {true, "DROP_MNG_CNT"}, + {true, "DROP_FD_CNT"}, + + {true, "DROP_NO_DST_CNT"}, + {true, "DROP_MC_MBID_FULL_CNT"}, + {true, "DROP_SC_FILTERED"}, + {true, "PPP_MC_DROP_PKT_CNT"}, + {true, "DROP_PT_CNT"}, + {true, "DROP_MAC_ANTI_SPOOF_CNT"}, + + {true, "DROP_IG_VFV_CNT"}, + {true, "DROP_IG_PRTV_CNT"}, + {true, "DROP_CNM_PFC_PAUSE_CNT"}, + {true, "DROP_TORUS_TC_CNT"}, + {true, "DROP_TORUS_LPBK_CNT"}, + {true, "PPP_HFS_STS"}, + + {true, "PPP_MC_RSLT_STS"}, + {true, "PPP_P3U_STS"}, + {true, "PPP_RSLT_DESCR_STS"}, + {true, "PPP_UMV_STS_0"}, + {true, "PPP_UMV_STS_1"}, + {true, "PPP_VFV_STS"}, + + {true, "PPP_GRO_KEY_CNT"}, + {true, "PPP_GRO_INFO_CNT"}, + {true, "PPP_GRO_DROP_CNT"}, + {true, "PPP_GRO_OUT_CNT"}, + {true, "PPP_GRO_KEY_MATCH_DATA_CNT"}, + {true, "PPP_GRO_KEY_MATCH_TCAM_CNT"}, + + {true, "PPP_GRO_INFO_MATCH_CNT"}, + {true, "PPP_GRO_FREE_ENTRY_CNT"}, + {true, "PPP_GRO_INNER_DFX_SIGNAL"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + + {true, "GET_RX_PKT_CNT_L"}, + {true, "GET_RX_PKT_CNT_H"}, + {true, "GET_TX_PKT_CNT_L"}, + {true, "GET_TX_PKT_CNT_H"}, + {true, "SEND_UC_PRT2HOST_PKT_CNT_L"}, + {true, "SEND_UC_PRT2HOST_PKT_CNT_H"}, + + {true, "SEND_UC_PRT2PRT_PKT_CNT_L"}, + {true, "SEND_UC_PRT2PRT_PKT_CNT_H"}, + {true, "SEND_UC_HOST2HOST_PKT_CNT_L"}, + {true, "SEND_UC_HOST2HOST_PKT_CNT_H"}, + {true, "SEND_UC_HOST2PRT_PKT_CNT_L"}, + {true, "SEND_UC_HOST2PRT_PKT_CNT_H"}, + + {true, "SEND_MC_FROM_PRT_CNT_L"}, + {true, "SEND_MC_FROM_PRT_CNT_H"}, + {true, "SEND_MC_FROM_HOST_CNT_L"}, + {true, "SEND_MC_FROM_HOST_CNT_H"}, + {true, "SSU_MC_RD_CNT_L"}, + {true, "SSU_MC_RD_CNT_H"}, + + {true, "SSU_MC_DROP_CNT_L"}, + {true, "SSU_MC_DROP_CNT_H"}, + {true, "SSU_MC_RD_PKT_CNT_L"}, + {true, "SSU_MC_RD_PKT_CNT_H"}, + {true, "PPP_MC_2HOST_PKT_CNT_L"}, + {true, "PPP_MC_2HOST_PKT_CNT_H"}, + + {true, "PPP_MC_2PRT_PKT_CNT_L"}, + {true, "PPP_MC_2PRT_PKT_CNT_H"}, + {true, "NTSNOS_PKT_CNT_L"}, + {true, "NTSNOS_PKT_CNT_H"}, + {true, "NTUP_PKT_CNT_L"}, + {true, "NTUP_PKT_CNT_H"}, + + {true, "NTLCL_PKT_CNT_L"}, + {true, "NTLCL_PKT_CNT_H"}, + {true, "NTTGT_PKT_CNT_L"}, + {true, "NTTGT_PKT_CNT_H"}, + {true, "RTNS_PKT_CNT_L"}, + {true, "RTNS_PKT_CNT_H"}, + + {true, "RTLPBK_PKT_CNT_L"}, + {true, "RTLPBK_PKT_CNT_H"}, + {true, "NR_PKT_CNT_L"}, + {true, "NR_PKT_CNT_H"}, + {true, "RR_PKT_CNT_L"}, + {true, "RR_PKT_CNT_H"}, + + {true, "MNG_TBL_HIT_CNT_L"}, + {true, "MNG_TBL_HIT_CNT_H"}, + {true, "FD_TBL_HIT_CNT_L"}, + {true, "FD_TBL_HIT_CNT_H"}, + {true, "FD_LKUP_CNT_L"}, + {true, "FD_LKUP_CNT_H"}, + + {true, "BC_HIT_CNT_L"}, + {true, "BC_HIT_CNT_H"}, + {true, "UM_TBL_UC_HIT_CNT_L"}, + {true, "UM_TBL_UC_HIT_CNT_H"}, + {true, "UM_TBL_MC_HIT_CNT_L"}, + {true, "UM_TBL_MC_HIT_CNT_H"}, + + {true, "UM_TBL_VMDQ1_HIT_CNT_L"}, + {true, "UM_TBL_VMDQ1_HIT_CNT_H"}, + {true, "MTA_TBL_HIT_CNT_L"}, + {true, "MTA_TBL_HIT_CNT_H"}, + {true, "FWD_BONDING_HIT_CNT_L"}, + {true, "FWD_BONDING_HIT_CNT_H"}, + + {true, "PROMIS_TBL_HIT_CNT_L"}, + {true, "PROMIS_TBL_HIT_CNT_H"}, + {true, "GET_TUNL_PKT_CNT_L"}, + {true, "GET_TUNL_PKT_CNT_H"}, + {true, "GET_BMC_PKT_CNT_L"}, + {true, "GET_BMC_PKT_CNT_H"}, + + {true, "SEND_UC_PRT2BMC_PKT_CNT_L"}, + {true, "SEND_UC_PRT2BMC_PKT_CNT_H"}, + {true, "SEND_UC_HOST2BMC_PKT_CNT_L"}, + {true, "SEND_UC_HOST2BMC_PKT_CNT_H"}, + {true, "SEND_UC_BMC2HOST_PKT_CNT_L"}, + {true, "SEND_UC_BMC2HOST_PKT_CNT_H"}, + + {true, "SEND_UC_BMC2PRT_PKT_CNT_L"}, + {true, "SEND_UC_BMC2PRT_PKT_CNT_H"}, + {true, "PPP_MC_2BMC_PKT_CNT_L"}, + {true, "PPP_MC_2BMC_PKT_CNT_H"}, + {true, "VLAN_MIRR_CNT_L"}, + {true, "VLAN_MIRR_CNT_H"}, + + {true, "IG_MIRR_CNT_L"}, + {true, "IG_MIRR_CNT_H"}, + {true, "EG_MIRR_CNT_L"}, + {true, "EG_MIRR_CNT_H"}, + {true, "RX_DEFAULT_HOST_HIT_CNT_L"}, + {true, "RX_DEFAULT_HOST_HIT_CNT_H"}, + + {true, "LAN_PAIR_CNT_L"}, + {true, "LAN_PAIR_CNT_H"}, + {true, "UM_TBL_MC_HIT_PKT_CNT_L"}, + {true, "UM_TBL_MC_HIT_PKT_CNT_H"}, + {true, "MTA_TBL_HIT_PKT_CNT_L"}, + {true, "MTA_TBL_HIT_PKT_CNT_H"}, + + {true, "PROMIS_TBL_HIT_PKT_CNT_L"}, + {true, "PROMIS_TBL_HIT_PKT_CNT_H"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static struct hclge_dbg_dfx_message hclge_dbg_rcb_reg[] = { + {false, "Reserved"}, + {true, "FSM_DFX_ST0"}, + {true, "FSM_DFX_ST1"}, + {true, "FSM_DFX_ST2"}, + {true, "FIFO_DFX_ST0"}, + {true, "FIFO_DFX_ST1"}, + + {true, "FIFO_DFX_ST2"}, + {true, "FIFO_DFX_ST3"}, + {true, "FIFO_DFX_ST4"}, + {true, "FIFO_DFX_ST5"}, + {true, "FIFO_DFX_ST6"}, + {true, "FIFO_DFX_ST7"}, + + {true, "FIFO_DFX_ST8"}, + {true, "FIFO_DFX_ST9"}, + {true, "FIFO_DFX_ST10"}, + {true, "FIFO_DFX_ST11"}, + {true, "Q_CREDIT_VLD_0"}, + {true, "Q_CREDIT_VLD_1"}, + + {true, "Q_CREDIT_VLD_2"}, + {true, "Q_CREDIT_VLD_3"}, + {true, "Q_CREDIT_VLD_4"}, + {true, "Q_CREDIT_VLD_5"}, + {true, "Q_CREDIT_VLD_6"}, + {true, "Q_CREDIT_VLD_7"}, + + {true, "Q_CREDIT_VLD_8"}, + {true, "Q_CREDIT_VLD_9"}, + {true, "Q_CREDIT_VLD_10"}, + {true, "Q_CREDIT_VLD_11"}, + {true, "Q_CREDIT_VLD_12"}, + {true, "Q_CREDIT_VLD_13"}, + + {true, "Q_CREDIT_VLD_14"}, + {true, "Q_CREDIT_VLD_15"}, + {true, "Q_CREDIT_VLD_16"}, + {true, "Q_CREDIT_VLD_17"}, + {true, "Q_CREDIT_VLD_18"}, + {true, "Q_CREDIT_VLD_19"}, + + {true, "Q_CREDIT_VLD_20"}, + {true, "Q_CREDIT_VLD_21"}, + {true, "Q_CREDIT_VLD_22"}, + {true, "Q_CREDIT_VLD_23"}, + {true, "Q_CREDIT_VLD_24"}, + {true, "Q_CREDIT_VLD_25"}, + + {true, "Q_CREDIT_VLD_26"}, + {true, "Q_CREDIT_VLD_27"}, + {true, "Q_CREDIT_VLD_28"}, + {true, "Q_CREDIT_VLD_29"}, + {true, "Q_CREDIT_VLD_30"}, + {true, "Q_CREDIT_VLD_31"}, + + {true, "GRO_BD_SERR_CNT"}, + {true, "GRO_CONTEXT_SERR_CNT"}, + {true, "RX_STASH_CFG_SERR_CNT"}, + {true, "AXI_RD_FBD_SERR_CNT"}, + {true, "GRO_BD_MERR_CNT"}, + {true, "GRO_CONTEXT_MERR_CNT"}, + + {true, "RX_STASH_CFG_MERR_CNT"}, + {true, "AXI_RD_FBD_MERR_CNT"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, + {false, "Reserved"}, +}; + +static struct hclge_dbg_dfx_message hclge_dbg_tqp_reg[] = { + {true, "q_num"}, + {true, "RCB_CFG_RX_RING_TAIL"}, + {true, "RCB_CFG_RX_RING_HEAD"}, + {true, "RCB_CFG_RX_RING_FBDNUM"}, + {true, "RCB_CFG_RX_RING_OFFSET"}, + {true, "RCB_CFG_RX_RING_FBDOFFSET"}, + + {true, "RCB_CFG_RX_RING_PKTNUM_RECORD"}, + {true, "RCB_CFG_TX_RING_TAIL"}, + {true, "RCB_CFG_TX_RING_HEAD"}, + {true, "RCB_CFG_TX_RING_FBDNUM"}, + {true, "RCB_CFG_TX_RING_OFFSET"}, + {true, "RCB_CFG_TX_RING_EBDNUM"}, +}; + +#endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c index 123c37e653f3..d0f654123b9b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.c @@ -4,78 +4,39 @@ #include "hclge_err.h" static const struct hclge_hw_error hclge_imp_tcm_ecc_int[] = { - { .int_msk = BIT(0), .msg = "imp_itcm0_ecc_1bit_err" }, { .int_msk = BIT(1), .msg = "imp_itcm0_ecc_mbit_err" }, - { .int_msk = BIT(2), .msg = "imp_itcm1_ecc_1bit_err" }, { .int_msk = BIT(3), .msg = "imp_itcm1_ecc_mbit_err" }, - { .int_msk = BIT(4), .msg = "imp_itcm2_ecc_1bit_err" }, { .int_msk = BIT(5), .msg = "imp_itcm2_ecc_mbit_err" }, - { .int_msk = BIT(6), .msg = "imp_itcm3_ecc_1bit_err" }, { .int_msk = BIT(7), .msg = "imp_itcm3_ecc_mbit_err" }, - { .int_msk = BIT(8), .msg = "imp_dtcm0_mem0_ecc_1bit_err" }, { .int_msk = BIT(9), .msg = "imp_dtcm0_mem0_ecc_mbit_err" }, - { .int_msk = BIT(10), .msg = "imp_dtcm0_mem1_ecc_1bit_err" }, { .int_msk = BIT(11), .msg = "imp_dtcm0_mem1_ecc_mbit_err" }, - { .int_msk = BIT(12), .msg = "imp_dtcm1_mem0_ecc_1bit_err" }, { .int_msk = BIT(13), .msg = "imp_dtcm1_mem0_ecc_mbit_err" }, - { .int_msk = BIT(14), .msg = "imp_dtcm1_mem1_ecc_1bit_err" }, { .int_msk = BIT(15), .msg = "imp_dtcm1_mem1_ecc_mbit_err" }, - { /* sentinel */ } -}; - -static const struct hclge_hw_error hclge_imp_itcm4_ecc_int[] = { - { .int_msk = BIT(0), .msg = "imp_itcm4_ecc_1bit_err" }, - { .int_msk = BIT(1), .msg = "imp_itcm4_ecc_mbit_err" }, + { .int_msk = BIT(17), .msg = "imp_itcm4_ecc_mbit_err" }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_cmdq_nic_mem_ecc_int[] = { - { .int_msk = BIT(0), .msg = "cmdq_nic_rx_depth_ecc_1bit_err" }, { .int_msk = BIT(1), .msg = "cmdq_nic_rx_depth_ecc_mbit_err" }, - { .int_msk = BIT(2), .msg = "cmdq_nic_tx_depth_ecc_1bit_err" }, { .int_msk = BIT(3), .msg = "cmdq_nic_tx_depth_ecc_mbit_err" }, - { .int_msk = BIT(4), .msg = "cmdq_nic_rx_tail_ecc_1bit_err" }, { .int_msk = BIT(5), .msg = "cmdq_nic_rx_tail_ecc_mbit_err" }, - { .int_msk = BIT(6), .msg = "cmdq_nic_tx_tail_ecc_1bit_err" }, { .int_msk = BIT(7), .msg = "cmdq_nic_tx_tail_ecc_mbit_err" }, - { .int_msk = BIT(8), .msg = "cmdq_nic_rx_head_ecc_1bit_err" }, { .int_msk = BIT(9), .msg = "cmdq_nic_rx_head_ecc_mbit_err" }, - { .int_msk = BIT(10), .msg = "cmdq_nic_tx_head_ecc_1bit_err" }, { .int_msk = BIT(11), .msg = "cmdq_nic_tx_head_ecc_mbit_err" }, - { .int_msk = BIT(12), .msg = "cmdq_nic_rx_addr_ecc_1bit_err" }, { .int_msk = BIT(13), .msg = "cmdq_nic_rx_addr_ecc_mbit_err" }, - { .int_msk = BIT(14), .msg = "cmdq_nic_tx_addr_ecc_1bit_err" }, { .int_msk = BIT(15), .msg = "cmdq_nic_tx_addr_ecc_mbit_err" }, - { /* sentinel */ } -}; - -static const struct hclge_hw_error hclge_cmdq_rocee_mem_ecc_int[] = { - { .int_msk = BIT(0), .msg = "cmdq_rocee_rx_depth_ecc_1bit_err" }, - { .int_msk = BIT(1), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err" }, - { .int_msk = BIT(2), .msg = "cmdq_rocee_tx_depth_ecc_1bit_err" }, - { .int_msk = BIT(3), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err" }, - { .int_msk = BIT(4), .msg = "cmdq_rocee_rx_tail_ecc_1bit_err" }, - { .int_msk = BIT(5), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err" }, - { .int_msk = BIT(6), .msg = "cmdq_rocee_tx_tail_ecc_1bit_err" }, - { .int_msk = BIT(7), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err" }, - { .int_msk = BIT(8), .msg = "cmdq_rocee_rx_head_ecc_1bit_err" }, - { .int_msk = BIT(9), .msg = "cmdq_rocee_rx_head_ecc_mbit_err" }, - { .int_msk = BIT(10), .msg = "cmdq_rocee_tx_head_ecc_1bit_err" }, - { .int_msk = BIT(11), .msg = "cmdq_rocee_tx_head_ecc_mbit_err" }, - { .int_msk = BIT(12), .msg = "cmdq_rocee_rx_addr_ecc_1bit_err" }, - { .int_msk = BIT(13), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err" }, - { .int_msk = BIT(14), .msg = "cmdq_rocee_tx_addr_ecc_1bit_err" }, - { .int_msk = BIT(15), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err" }, + { .int_msk = BIT(17), .msg = "cmdq_rocee_rx_depth_ecc_mbit_err" }, + { .int_msk = BIT(19), .msg = "cmdq_rocee_tx_depth_ecc_mbit_err" }, + { .int_msk = BIT(21), .msg = "cmdq_rocee_rx_tail_ecc_mbit_err" }, + { .int_msk = BIT(23), .msg = "cmdq_rocee_tx_tail_ecc_mbit_err" }, + { .int_msk = BIT(25), .msg = "cmdq_rocee_rx_head_ecc_mbit_err" }, + { .int_msk = BIT(27), .msg = "cmdq_rocee_tx_head_ecc_mbit_err" }, + { .int_msk = BIT(29), .msg = "cmdq_rocee_rx_addr_ecc_mbit_err" }, + { .int_msk = BIT(31), .msg = "cmdq_rocee_tx_addr_ecc_mbit_err" }, { /* sentinel */ } }; static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = { - { .int_msk = BIT(0), .msg = "tqp_int_cfg_even_ecc_1bit_err" }, - { .int_msk = BIT(1), .msg = "tqp_int_cfg_odd_ecc_1bit_err" }, - { .int_msk = BIT(2), .msg = "tqp_int_ctrl_even_ecc_1bit_err" }, - { .int_msk = BIT(3), .msg = "tqp_int_ctrl_odd_ecc_1bit_err" }, - { .int_msk = BIT(4), .msg = "tx_que_scan_int_ecc_1bit_err" }, - { .int_msk = BIT(5), .msg = "rx_que_scan_int_ecc_1bit_err" }, { .int_msk = BIT(6), .msg = "tqp_int_cfg_even_ecc_mbit_err" }, { .int_msk = BIT(7), .msg = "tqp_int_cfg_odd_ecc_mbit_err" }, { .int_msk = BIT(8), .msg = "tqp_int_ctrl_even_ecc_mbit_err" }, @@ -85,15 +46,19 @@ static const struct hclge_hw_error hclge_tqp_int_ecc_int[] = { { /* sentinel */ } }; -static const struct hclge_hw_error hclge_igu_com_err_int[] = { +static const struct hclge_hw_error hclge_msix_sram_ecc_int[] = { + { .int_msk = BIT(1), .msg = "msix_nic_ecc_mbit_err" }, + { .int_msk = BIT(3), .msg = "msix_rocee_ecc_mbit_err" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_igu_int[] = { { .int_msk = BIT(0), .msg = "igu_rx_buf0_ecc_mbit_err" }, - { .int_msk = BIT(1), .msg = "igu_rx_buf0_ecc_1bit_err" }, { .int_msk = BIT(2), .msg = "igu_rx_buf1_ecc_mbit_err" }, - { .int_msk = BIT(3), .msg = "igu_rx_buf1_ecc_1bit_err" }, { /* sentinel */ } }; -static const struct hclge_hw_error hclge_igu_egu_tnl_err_int[] = { +static const struct hclge_hw_error hclge_igu_egu_tnl_int[] = { { .int_msk = BIT(0), .msg = "rx_buf_overflow" }, { .int_msk = BIT(1), .msg = "rx_stp_fifo_overflow" }, { .int_msk = BIT(2), .msg = "rx_stp_fifo_undeflow" }, @@ -104,51 +69,11 @@ static const struct hclge_hw_error hclge_igu_egu_tnl_err_int[] = { }; static const struct hclge_hw_error hclge_ncsi_err_int[] = { - { .int_msk = BIT(0), .msg = "ncsi_tx_ecc_1bit_err" }, { .int_msk = BIT(1), .msg = "ncsi_tx_ecc_mbit_err" }, { /* sentinel */ } }; -static const struct hclge_hw_error hclge_ppp_mpf_int0[] = { - { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_1bit_err" }, - { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_1bit_err" }, - { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_1bit_err" }, - { .int_msk = BIT(3), .msg = "umv_key_mem1_ecc_1bit_err" }, - { .int_msk = BIT(4), .msg = "umv_key_mem2_ecc_1bit_err" }, - { .int_msk = BIT(5), .msg = "umv_key_mem3_ecc_1bit_err" }, - { .int_msk = BIT(6), .msg = "umv_ad_mem_ecc_1bit_err" }, - { .int_msk = BIT(7), .msg = "rss_tc_mode_mem_ecc_1bit_err" }, - { .int_msk = BIT(8), .msg = "rss_idt_mem0_ecc_1bit_err" }, - { .int_msk = BIT(9), .msg = "rss_idt_mem1_ecc_1bit_err" }, - { .int_msk = BIT(10), .msg = "rss_idt_mem2_ecc_1bit_err" }, - { .int_msk = BIT(11), .msg = "rss_idt_mem3_ecc_1bit_err" }, - { .int_msk = BIT(12), .msg = "rss_idt_mem4_ecc_1bit_err" }, - { .int_msk = BIT(13), .msg = "rss_idt_mem5_ecc_1bit_err" }, - { .int_msk = BIT(14), .msg = "rss_idt_mem6_ecc_1bit_err" }, - { .int_msk = BIT(15), .msg = "rss_idt_mem7_ecc_1bit_err" }, - { .int_msk = BIT(16), .msg = "rss_idt_mem8_ecc_1bit_err" }, - { .int_msk = BIT(17), .msg = "rss_idt_mem9_ecc_1bit_err" }, - { .int_msk = BIT(18), .msg = "rss_idt_mem10_ecc_1bit_err" }, - { .int_msk = BIT(19), .msg = "rss_idt_mem11_ecc_1bit_err" }, - { .int_msk = BIT(20), .msg = "rss_idt_mem12_ecc_1bit_err" }, - { .int_msk = BIT(21), .msg = "rss_idt_mem13_ecc_1bit_err" }, - { .int_msk = BIT(22), .msg = "rss_idt_mem14_ecc_1bit_err" }, - { .int_msk = BIT(23), .msg = "rss_idt_mem15_ecc_1bit_err" }, - { .int_msk = BIT(24), .msg = "port_vlan_mem_ecc_1bit_err" }, - { .int_msk = BIT(25), .msg = "mcast_linear_table_mem_ecc_1bit_err" }, - { .int_msk = BIT(26), .msg = "mcast_result_mem_ecc_1bit_err" }, - { .int_msk = BIT(27), - .msg = "flow_director_ad_mem0_ecc_1bit_err" }, - { .int_msk = BIT(28), - .msg = "flow_director_ad_mem1_ecc_1bit_err" }, - { .int_msk = BIT(29), - .msg = "rx_vlan_tag_memory_ecc_1bit_err" }, - { .int_msk = BIT(30), - .msg = "Tx_UP_mapping_config_mem_ecc_1bit_err" }, - { /* sentinel */ } -}; - -static const struct hclge_hw_error hclge_ppp_mpf_int1[] = { +static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st1[] = { { .int_msk = BIT(0), .msg = "vf_vlan_ad_mem_ecc_mbit_err" }, { .int_msk = BIT(1), .msg = "umv_mcast_group_mem_ecc_mbit_err" }, { .int_msk = BIT(2), .msg = "umv_key_mem0_ecc_mbit_err" }, @@ -187,23 +112,13 @@ static const struct hclge_hw_error hclge_ppp_mpf_int1[] = { { /* sentinel */ } }; -static const struct hclge_hw_error hclge_ppp_pf_int[] = { - { .int_msk = BIT(0), .msg = "Tx_vlan_tag_err" }, +static const struct hclge_hw_error hclge_ppp_pf_abnormal_int[] = { + { .int_msk = BIT(0), .msg = "tx_vlan_tag_err" }, { .int_msk = BIT(1), .msg = "rss_list_tc_unassigned_queue_err" }, { /* sentinel */ } }; -static const struct hclge_hw_error hclge_ppp_mpf_int2[] = { - { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_1bit_err" }, - { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_1bit_err" }, - { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_1bit_err" }, - { .int_msk = BIT(3), .msg = "FD_CN0_memory_ecc_1bit_err" }, - { .int_msk = BIT(4), .msg = "FD_CN1_memory_ecc_1bit_err" }, - { .int_msk = BIT(5), .msg = "GRO_AD_memory_ecc_1bit_err" }, - { /* sentinel */ } -}; - -static const struct hclge_hw_error hclge_ppp_mpf_int3[] = { +static const struct hclge_hw_error hclge_ppp_mpf_abnormal_int_st3[] = { { .int_msk = BIT(0), .msg = "hfs_fifo_mem_ecc_mbit_err" }, { .int_msk = BIT(1), .msg = "rslt_descr_fifo_mem_ecc_mbit_err" }, { .int_msk = BIT(2), .msg = "tx_vlan_tag_mem_ecc_mbit_err" }, @@ -213,145 +128,248 @@ static const struct hclge_hw_error hclge_ppp_mpf_int3[] = { { /* sentinel */ } }; -struct hclge_tm_sch_ecc_info { - const char *name; -}; - -static const struct hclge_tm_sch_ecc_info hclge_tm_sch_ecc_err[7][15] = { - { - { .name = "QSET_QUEUE_CTRL:PRI_LEN TAB" }, - { .name = "QSET_QUEUE_CTRL:SPA_LEN TAB" }, - { .name = "QSET_QUEUE_CTRL:SPB_LEN TAB" }, - { .name = "QSET_QUEUE_CTRL:WRRA_LEN TAB" }, - { .name = "QSET_QUEUE_CTRL:WRRB_LEN TAB" }, - { .name = "QSET_QUEUE_CTRL:SPA_HPTR TAB" }, - { .name = "QSET_QUEUE_CTRL:SPB_HPTR TAB" }, - { .name = "QSET_QUEUE_CTRL:WRRA_HPTR TAB" }, - { .name = "QSET_QUEUE_CTRL:WRRB_HPTR TAB" }, - { .name = "QSET_QUEUE_CTRL:QS_LINKLIST TAB" }, - { .name = "QSET_QUEUE_CTRL:SPA_TPTR TAB" }, - { .name = "QSET_QUEUE_CTRL:SPB_TPTR TAB" }, - { .name = "QSET_QUEUE_CTRL:WRRA_TPTR TAB" }, - { .name = "QSET_QUEUE_CTRL:WRRB_TPTR TAB" }, - { .name = "QSET_QUEUE_CTRL:QS_DEFICITCNT TAB" }, - }, - { - { .name = "ROCE_QUEUE_CTRL:QS_LEN TAB" }, - { .name = "ROCE_QUEUE_CTRL:QS_TPTR TAB" }, - { .name = "ROCE_QUEUE_CTRL:QS_HPTR TAB" }, - { .name = "ROCE_QUEUE_CTRL:QLINKLIST TAB" }, - { .name = "ROCE_QUEUE_CTRL:QCLEN TAB" }, - }, - { - { .name = "NIC_QUEUE_CTRL:QS_LEN TAB" }, - { .name = "NIC_QUEUE_CTRL:QS_TPTR TAB" }, - { .name = "NIC_QUEUE_CTRL:QS_HPTR TAB" }, - { .name = "NIC_QUEUE_CTRL:QLINKLIST TAB" }, - { .name = "NIC_QUEUE_CTRL:QCLEN TAB" }, - }, - { - { .name = "RAM_CFG_CTRL:CSHAP TAB" }, - { .name = "RAM_CFG_CTRL:PSHAP TAB" }, - }, - { - { .name = "SHAPER_CTRL:PSHAP TAB" }, - }, - { - { .name = "MSCH_CTRL" }, - }, - { - { .name = "TOP_CTRL" }, - }, -}; - -static const struct hclge_hw_error hclge_tm_sch_err_int[] = { - { .int_msk = BIT(0), .msg = "tm_sch_ecc_1bit_err" }, +static const struct hclge_hw_error hclge_tm_sch_rint[] = { { .int_msk = BIT(1), .msg = "tm_sch_ecc_mbit_err" }, - { .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_full_err" }, - { .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_empty_err" }, - { .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_full_err" }, - { .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_empty_err" }, - { .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_full_err" }, - { .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_empty_err" }, - { .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_full_err" }, - { .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_empty_err" }, - { .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_full_err" }, - { .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_empty_err" }, + { .int_msk = BIT(2), .msg = "tm_sch_port_shap_sub_fifo_wr_err" }, + { .int_msk = BIT(3), .msg = "tm_sch_port_shap_sub_fifo_rd_err" }, + { .int_msk = BIT(4), .msg = "tm_sch_pg_pshap_sub_fifo_wr_err" }, + { .int_msk = BIT(5), .msg = "tm_sch_pg_pshap_sub_fifo_rd_err" }, + { .int_msk = BIT(6), .msg = "tm_sch_pg_cshap_sub_fifo_wr_err" }, + { .int_msk = BIT(7), .msg = "tm_sch_pg_cshap_sub_fifo_rd_err" }, + { .int_msk = BIT(8), .msg = "tm_sch_pri_pshap_sub_fifo_wr_err" }, + { .int_msk = BIT(9), .msg = "tm_sch_pri_pshap_sub_fifo_rd_err" }, + { .int_msk = BIT(10), .msg = "tm_sch_pri_cshap_sub_fifo_wr_err" }, + { .int_msk = BIT(11), .msg = "tm_sch_pri_cshap_sub_fifo_rd_err" }, { .int_msk = BIT(12), - .msg = "tm_sch_port_shap_offset_fifo_wr_full_err" }, + .msg = "tm_sch_port_shap_offset_fifo_wr_err" }, { .int_msk = BIT(13), - .msg = "tm_sch_port_shap_offset_fifo_rd_empty_err" }, + .msg = "tm_sch_port_shap_offset_fifo_rd_err" }, { .int_msk = BIT(14), - .msg = "tm_sch_pg_pshap_offset_fifo_wr_full_err" }, + .msg = "tm_sch_pg_pshap_offset_fifo_wr_err" }, { .int_msk = BIT(15), - .msg = "tm_sch_pg_pshap_offset_fifo_rd_empty_err" }, + .msg = "tm_sch_pg_pshap_offset_fifo_rd_err" }, { .int_msk = BIT(16), - .msg = "tm_sch_pg_cshap_offset_fifo_wr_full_err" }, + .msg = "tm_sch_pg_cshap_offset_fifo_wr_err" }, { .int_msk = BIT(17), - .msg = "tm_sch_pg_cshap_offset_fifo_rd_empty_err" }, + .msg = "tm_sch_pg_cshap_offset_fifo_rd_err" }, { .int_msk = BIT(18), - .msg = "tm_sch_pri_pshap_offset_fifo_wr_full_err" }, + .msg = "tm_sch_pri_pshap_offset_fifo_wr_err" }, { .int_msk = BIT(19), - .msg = "tm_sch_pri_pshap_offset_fifo_rd_empty_err" }, + .msg = "tm_sch_pri_pshap_offset_fifo_rd_err" }, { .int_msk = BIT(20), - .msg = "tm_sch_pri_cshap_offset_fifo_wr_full_err" }, + .msg = "tm_sch_pri_cshap_offset_fifo_wr_err" }, { .int_msk = BIT(21), - .msg = "tm_sch_pri_cshap_offset_fifo_rd_empty_err" }, - { .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_full_err" }, - { .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_empty_err" }, - { .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_full_err" }, - { .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_empty_err" }, - { .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_full_err" }, - { .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_empty_err" }, - { .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_full_err" }, - { .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_empty_err" }, - { .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_full_err" }, - { .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_empty_err" }, + .msg = "tm_sch_pri_cshap_offset_fifo_rd_err" }, + { .int_msk = BIT(22), .msg = "tm_sch_rq_fifo_wr_err" }, + { .int_msk = BIT(23), .msg = "tm_sch_rq_fifo_rd_err" }, + { .int_msk = BIT(24), .msg = "tm_sch_nq_fifo_wr_err" }, + { .int_msk = BIT(25), .msg = "tm_sch_nq_fifo_rd_err" }, + { .int_msk = BIT(26), .msg = "tm_sch_roce_up_fifo_wr_err" }, + { .int_msk = BIT(27), .msg = "tm_sch_roce_up_fifo_rd_err" }, + { .int_msk = BIT(28), .msg = "tm_sch_rcb_byte_fifo_wr_err" }, + { .int_msk = BIT(29), .msg = "tm_sch_rcb_byte_fifo_rd_err" }, + { .int_msk = BIT(30), .msg = "tm_sch_ssu_byte_fifo_wr_err" }, + { .int_msk = BIT(31), .msg = "tm_sch_ssu_byte_fifo_rd_err" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_qcn_fifo_rint[] = { + { .int_msk = BIT(0), .msg = "qcn_shap_gp0_sch_fifo_rd_err" }, + { .int_msk = BIT(1), .msg = "qcn_shap_gp0_sch_fifo_wr_err" }, + { .int_msk = BIT(2), .msg = "qcn_shap_gp1_sch_fifo_rd_err" }, + { .int_msk = BIT(3), .msg = "qcn_shap_gp1_sch_fifo_wr_err" }, + { .int_msk = BIT(4), .msg = "qcn_shap_gp2_sch_fifo_rd_err" }, + { .int_msk = BIT(5), .msg = "qcn_shap_gp2_sch_fifo_wr_err" }, + { .int_msk = BIT(6), .msg = "qcn_shap_gp3_sch_fifo_rd_err" }, + { .int_msk = BIT(7), .msg = "qcn_shap_gp3_sch_fifo_wr_err" }, + { .int_msk = BIT(8), .msg = "qcn_shap_gp0_offset_fifo_rd_err" }, + { .int_msk = BIT(9), .msg = "qcn_shap_gp0_offset_fifo_wr_err" }, + { .int_msk = BIT(10), .msg = "qcn_shap_gp1_offset_fifo_rd_err" }, + { .int_msk = BIT(11), .msg = "qcn_shap_gp1_offset_fifo_wr_err" }, + { .int_msk = BIT(12), .msg = "qcn_shap_gp2_offset_fifo_rd_err" }, + { .int_msk = BIT(13), .msg = "qcn_shap_gp2_offset_fifo_wr_err" }, + { .int_msk = BIT(14), .msg = "qcn_shap_gp3_offset_fifo_rd_err" }, + { .int_msk = BIT(15), .msg = "qcn_shap_gp3_offset_fifo_wr_err" }, + { .int_msk = BIT(16), .msg = "qcn_byte_info_fifo_rd_err" }, + { .int_msk = BIT(17), .msg = "qcn_byte_info_fifo_wr_err" }, { /* sentinel */ } }; -static const struct hclge_hw_error hclge_qcn_ecc_err_int[] = { - { .int_msk = BIT(0), .msg = "qcn_byte_mem_ecc_1bit_err" }, +static const struct hclge_hw_error hclge_qcn_ecc_rint[] = { { .int_msk = BIT(1), .msg = "qcn_byte_mem_ecc_mbit_err" }, - { .int_msk = BIT(2), .msg = "qcn_time_mem_ecc_1bit_err" }, { .int_msk = BIT(3), .msg = "qcn_time_mem_ecc_mbit_err" }, - { .int_msk = BIT(4), .msg = "qcn_fb_mem_ecc_1bit_err" }, { .int_msk = BIT(5), .msg = "qcn_fb_mem_ecc_mbit_err" }, - { .int_msk = BIT(6), .msg = "qcn_link_mem_ecc_1bit_err" }, { .int_msk = BIT(7), .msg = "qcn_link_mem_ecc_mbit_err" }, - { .int_msk = BIT(8), .msg = "qcn_rate_mem_ecc_1bit_err" }, { .int_msk = BIT(9), .msg = "qcn_rate_mem_ecc_mbit_err" }, - { .int_msk = BIT(10), .msg = "qcn_tmplt_mem_ecc_1bit_err" }, { .int_msk = BIT(11), .msg = "qcn_tmplt_mem_ecc_mbit_err" }, - { .int_msk = BIT(12), .msg = "qcn_shap_cfg_mem_ecc_1bit_err" }, { .int_msk = BIT(13), .msg = "qcn_shap_cfg_mem_ecc_mbit_err" }, - { .int_msk = BIT(14), .msg = "qcn_gp0_barrel_mem_ecc_1bit_err" }, { .int_msk = BIT(15), .msg = "qcn_gp0_barrel_mem_ecc_mbit_err" }, - { .int_msk = BIT(16), .msg = "qcn_gp1_barrel_mem_ecc_1bit_err" }, { .int_msk = BIT(17), .msg = "qcn_gp1_barrel_mem_ecc_mbit_err" }, - { .int_msk = BIT(18), .msg = "qcn_gp2_barrel_mem_ecc_1bit_err" }, { .int_msk = BIT(19), .msg = "qcn_gp2_barrel_mem_ecc_mbit_err" }, - { .int_msk = BIT(20), .msg = "qcn_gp3_barral_mem_ecc_1bit_err" }, { .int_msk = BIT(21), .msg = "qcn_gp3_barral_mem_ecc_mbit_err" }, { /* sentinel */ } }; -static void hclge_log_error(struct device *dev, - const struct hclge_hw_error *err_list, +static const struct hclge_hw_error hclge_mac_afifo_tnl_int[] = { + { .int_msk = BIT(0), .msg = "egu_cge_afifo_ecc_1bit_err" }, + { .int_msk = BIT(1), .msg = "egu_cge_afifo_ecc_mbit_err" }, + { .int_msk = BIT(2), .msg = "egu_lge_afifo_ecc_1bit_err" }, + { .int_msk = BIT(3), .msg = "egu_lge_afifo_ecc_mbit_err" }, + { .int_msk = BIT(4), .msg = "cge_igu_afifo_ecc_1bit_err" }, + { .int_msk = BIT(5), .msg = "cge_igu_afifo_ecc_mbit_err" }, + { .int_msk = BIT(6), .msg = "lge_igu_afifo_ecc_1bit_err" }, + { .int_msk = BIT(7), .msg = "lge_igu_afifo_ecc_mbit_err" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st2[] = { + { .int_msk = BIT(13), .msg = "rpu_rx_pkt_bit32_ecc_mbit_err" }, + { .int_msk = BIT(14), .msg = "rpu_rx_pkt_bit33_ecc_mbit_err" }, + { .int_msk = BIT(15), .msg = "rpu_rx_pkt_bit34_ecc_mbit_err" }, + { .int_msk = BIT(16), .msg = "rpu_rx_pkt_bit35_ecc_mbit_err" }, + { .int_msk = BIT(17), .msg = "rcb_tx_ring_ecc_mbit_err" }, + { .int_msk = BIT(18), .msg = "rcb_rx_ring_ecc_mbit_err" }, + { .int_msk = BIT(19), .msg = "rcb_tx_fbd_ecc_mbit_err" }, + { .int_msk = BIT(20), .msg = "rcb_rx_ebd_ecc_mbit_err" }, + { .int_msk = BIT(21), .msg = "rcb_tso_info_ecc_mbit_err" }, + { .int_msk = BIT(22), .msg = "rcb_tx_int_info_ecc_mbit_err" }, + { .int_msk = BIT(23), .msg = "rcb_rx_int_info_ecc_mbit_err" }, + { .int_msk = BIT(24), .msg = "tpu_tx_pkt_0_ecc_mbit_err" }, + { .int_msk = BIT(25), .msg = "tpu_tx_pkt_1_ecc_mbit_err" }, + { .int_msk = BIT(26), .msg = "rd_bus_err" }, + { .int_msk = BIT(27), .msg = "wr_bus_err" }, + { .int_msk = BIT(28), .msg = "reg_search_miss" }, + { .int_msk = BIT(29), .msg = "rx_q_search_miss" }, + { .int_msk = BIT(30), .msg = "ooo_ecc_err_detect" }, + { .int_msk = BIT(31), .msg = "ooo_ecc_err_multpl" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_ppu_mpf_abnormal_int_st3[] = { + { .int_msk = BIT(4), .msg = "gro_bd_ecc_mbit_err" }, + { .int_msk = BIT(5), .msg = "gro_context_ecc_mbit_err" }, + { .int_msk = BIT(6), .msg = "rx_stash_cfg_ecc_mbit_err" }, + { .int_msk = BIT(7), .msg = "axi_rd_fbd_ecc_mbit_err" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_ppu_pf_abnormal_int[] = { + { .int_msk = BIT(0), .msg = "over_8bd_no_fe" }, + { .int_msk = BIT(1), .msg = "tso_mss_cmp_min_err" }, + { .int_msk = BIT(2), .msg = "tso_mss_cmp_max_err" }, + { .int_msk = BIT(3), .msg = "tx_rd_fbd_poison" }, + { .int_msk = BIT(4), .msg = "rx_rd_ebd_poison" }, + { .int_msk = BIT(5), .msg = "buf_wait_timeout" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_ssu_com_err_int[] = { + { .int_msk = BIT(0), .msg = "buf_sum_err" }, + { .int_msk = BIT(1), .msg = "ppp_mb_num_err" }, + { .int_msk = BIT(2), .msg = "ppp_mbid_err" }, + { .int_msk = BIT(3), .msg = "ppp_rlt_mac_err" }, + { .int_msk = BIT(4), .msg = "ppp_rlt_host_err" }, + { .int_msk = BIT(5), .msg = "cks_edit_position_err" }, + { .int_msk = BIT(6), .msg = "cks_edit_condition_err" }, + { .int_msk = BIT(7), .msg = "vlan_edit_condition_err" }, + { .int_msk = BIT(8), .msg = "vlan_num_ot_err" }, + { .int_msk = BIT(9), .msg = "vlan_num_in_err" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_ssu_port_based_err_int[] = { + { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port" }, + { .int_msk = BIT(1), .msg = "tpu_pkt_without_key_port" }, + { .int_msk = BIT(2), .msg = "igu_pkt_without_key_port" }, + { .int_msk = BIT(3), .msg = "roc_eof_mis_match_port" }, + { .int_msk = BIT(4), .msg = "tpu_eof_mis_match_port" }, + { .int_msk = BIT(5), .msg = "igu_eof_mis_match_port" }, + { .int_msk = BIT(6), .msg = "roc_sof_mis_match_port" }, + { .int_msk = BIT(7), .msg = "tpu_sof_mis_match_port" }, + { .int_msk = BIT(8), .msg = "igu_sof_mis_match_port" }, + { .int_msk = BIT(11), .msg = "ets_rd_int_rx_port" }, + { .int_msk = BIT(12), .msg = "ets_wr_int_rx_port" }, + { .int_msk = BIT(13), .msg = "ets_rd_int_tx_port" }, + { .int_msk = BIT(14), .msg = "ets_wr_int_tx_port" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_ssu_fifo_overflow_int[] = { + { .int_msk = BIT(0), .msg = "ig_mac_inf_int" }, + { .int_msk = BIT(1), .msg = "ig_host_inf_int" }, + { .int_msk = BIT(2), .msg = "ig_roc_buf_int" }, + { .int_msk = BIT(3), .msg = "ig_host_data_fifo_int" }, + { .int_msk = BIT(4), .msg = "ig_host_key_fifo_int" }, + { .int_msk = BIT(5), .msg = "tx_qcn_fifo_int" }, + { .int_msk = BIT(6), .msg = "rx_qcn_fifo_int" }, + { .int_msk = BIT(7), .msg = "tx_pf_rd_fifo_int" }, + { .int_msk = BIT(8), .msg = "rx_pf_rd_fifo_int" }, + { .int_msk = BIT(9), .msg = "qm_eof_fifo_int" }, + { .int_msk = BIT(10), .msg = "mb_rlt_fifo_int" }, + { .int_msk = BIT(11), .msg = "dup_uncopy_fifo_int" }, + { .int_msk = BIT(12), .msg = "dup_cnt_rd_fifo_int" }, + { .int_msk = BIT(13), .msg = "dup_cnt_drop_fifo_int" }, + { .int_msk = BIT(14), .msg = "dup_cnt_wrb_fifo_int" }, + { .int_msk = BIT(15), .msg = "host_cmd_fifo_int" }, + { .int_msk = BIT(16), .msg = "mac_cmd_fifo_int" }, + { .int_msk = BIT(17), .msg = "host_cmd_bitmap_empty_int" }, + { .int_msk = BIT(18), .msg = "mac_cmd_bitmap_empty_int" }, + { .int_msk = BIT(19), .msg = "dup_bitmap_empty_int" }, + { .int_msk = BIT(20), .msg = "out_queue_bitmap_empty_int" }, + { .int_msk = BIT(21), .msg = "bank2_bitmap_empty_int" }, + { .int_msk = BIT(22), .msg = "bank1_bitmap_empty_int" }, + { .int_msk = BIT(23), .msg = "bank0_bitmap_empty_int" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_ssu_ets_tcg_int[] = { + { .int_msk = BIT(0), .msg = "ets_rd_int_rx_tcg" }, + { .int_msk = BIT(1), .msg = "ets_wr_int_rx_tcg" }, + { .int_msk = BIT(2), .msg = "ets_rd_int_tx_tcg" }, + { .int_msk = BIT(3), .msg = "ets_wr_int_tx_tcg" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_ssu_port_based_pf_int[] = { + { .int_msk = BIT(0), .msg = "roc_pkt_without_key_port" }, + { .int_msk = BIT(9), .msg = "low_water_line_err_port" }, + { .int_msk = BIT(10), .msg = "hi_water_line_err_port" }, + { /* sentinel */ } +}; + +static const struct hclge_hw_error hclge_rocee_qmm_ovf_err_int[] = { + { .int_msk = 0, .msg = "rocee qmm ovf: sgid invalid err" }, + { .int_msk = 0x4, .msg = "rocee qmm ovf: sgid ovf err" }, + { .int_msk = 0x8, .msg = "rocee qmm ovf: smac invalid err" }, + { .int_msk = 0xC, .msg = "rocee qmm ovf: smac ovf err" }, + { .int_msk = 0x10, .msg = "rocee qmm ovf: cqc invalid err" }, + { .int_msk = 0x11, .msg = "rocee qmm ovf: cqc ovf err" }, + { .int_msk = 0x12, .msg = "rocee qmm ovf: cqc hopnum err" }, + { .int_msk = 0x13, .msg = "rocee qmm ovf: cqc ba0 err" }, + { .int_msk = 0x14, .msg = "rocee qmm ovf: srqc invalid err" }, + { .int_msk = 0x15, .msg = "rocee qmm ovf: srqc ovf err" }, + { .int_msk = 0x16, .msg = "rocee qmm ovf: srqc hopnum err" }, + { .int_msk = 0x17, .msg = "rocee qmm ovf: srqc ba0 err" }, + { .int_msk = 0x18, .msg = "rocee qmm ovf: mpt invalid err" }, + { .int_msk = 0x19, .msg = "rocee qmm ovf: mpt ovf err" }, + { .int_msk = 0x1A, .msg = "rocee qmm ovf: mpt hopnum err" }, + { .int_msk = 0x1B, .msg = "rocee qmm ovf: mpt ba0 err" }, + { .int_msk = 0x1C, .msg = "rocee qmm ovf: qpc invalid err" }, + { .int_msk = 0x1D, .msg = "rocee qmm ovf: qpc ovf err" }, + { .int_msk = 0x1E, .msg = "rocee qmm ovf: qpc hopnum err" }, + { .int_msk = 0x1F, .msg = "rocee qmm ovf: qpc ba0 err" }, + { /* sentinel */ } +}; + +static void hclge_log_error(struct device *dev, char *reg, + const struct hclge_hw_error *err, u32 err_sts) { - const struct hclge_hw_error *err; - int i = 0; - - while (err_list[i].msg) { - err = &err_list[i]; - if (!(err->int_msk & err_sts)) { - i++; - continue; - } - dev_warn(dev, "%s [error status=0x%x] found\n", - err->msg, err_sts); - i++; + while (err->msg) { + if (err->int_msk & err_sts) + dev_warn(dev, "%s %s found [error status=0x%x]\n", + reg, err->msg, err_sts); + err++; } } @@ -391,96 +409,44 @@ static int hclge_cmd_query_error(struct hclge_dev *hdev, return ret; } -/* hclge_cmd_clear_error: clear the error status - * @hdev: pointer to struct hclge_dev - * @desc: descriptor for describing the command - * @desc_src: prefilled descriptor from the previous command for reusing - * @cmd: command opcode - * @flag: flag for extended command structure - * - * This function clear the error status in the hw register/s using command - */ -static int hclge_cmd_clear_error(struct hclge_dev *hdev, - struct hclge_desc *desc, - struct hclge_desc *desc_src, - u32 cmd, u16 flag) -{ - struct device *dev = &hdev->pdev->dev; - int num = 1; - int ret, i; - - if (cmd) { - hclge_cmd_setup_basic_desc(&desc[0], cmd, false); - if (flag) { - desc[0].flag |= cpu_to_le16(flag); - hclge_cmd_setup_basic_desc(&desc[1], cmd, false); - num = 2; - } - if (desc_src) { - for (i = 0; i < 6; i++) { - desc[0].data[i] = desc_src[0].data[i]; - if (flag) - desc[1].data[i] = desc_src[1].data[i]; - } - } - } else { - hclge_cmd_reuse_desc(&desc[0], false); - if (flag) { - desc[0].flag |= cpu_to_le16(flag); - hclge_cmd_reuse_desc(&desc[1], false); - num = 2; - } - } - ret = hclge_cmd_send(&hdev->hw, &desc[0], num); - if (ret) - dev_err(dev, "clear error cmd failed (%d)\n", ret); - - return ret; -} - -static int hclge_enable_common_error(struct hclge_dev *hdev, bool en) +static int hclge_config_common_hw_err_int(struct hclge_dev *hdev, bool en) { struct device *dev = &hdev->pdev->dev; struct hclge_desc desc[2]; int ret; + /* configure common error interrupts */ hclge_cmd_setup_basic_desc(&desc[0], HCLGE_COMMON_ECC_INT_CFG, false); desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); hclge_cmd_setup_basic_desc(&desc[1], HCLGE_COMMON_ECC_INT_CFG, false); if (en) { - /* enable COMMON error interrupts */ desc[0].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN); desc[0].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN | HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN); desc[0].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN); - desc[0].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN); + desc[0].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN | + HCLGE_MSIX_SRAM_ECC_ERR_INT_EN); desc[0].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN); - } else { - /* disable COMMON error interrupts */ - desc[0].data[0] = 0; - desc[0].data[2] = 0; - desc[0].data[3] = 0; - desc[0].data[4] = 0; - desc[0].data[5] = 0; } + desc[1].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_ERR_INT_EN_MASK); desc[1].data[2] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_ERR_INT_EN_MASK | HCLGE_CMDQ_ROCEE_ECC_ERR_INT_EN_MASK); desc[1].data[3] = cpu_to_le32(HCLGE_IMP_RD_POISON_ERR_INT_EN_MASK); - desc[1].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN_MASK); + desc[1].data[4] = cpu_to_le32(HCLGE_TQP_ECC_ERR_INT_EN_MASK | + HCLGE_MSIX_SRAM_ECC_ERR_INT_EN_MASK); desc[1].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_ERR_INT_EN_MASK); ret = hclge_cmd_send(&hdev->hw, &desc[0], 2); if (ret) dev_err(dev, - "failed(%d) to enable/disable COMMON err interrupts\n", - ret); + "fail(%d) to configure common err interrupts\n", ret); return ret; } -static int hclge_enable_ncsi_error(struct hclge_dev *hdev, bool en) +static int hclge_config_ncsi_hw_err_int(struct hclge_dev *hdev, bool en) { struct device *dev = &hdev->pdev->dev; struct hclge_desc desc; @@ -489,74 +455,65 @@ static int hclge_enable_ncsi_error(struct hclge_dev *hdev, bool en) if (hdev->pdev->revision < 0x21) return 0; - /* enable/disable NCSI error interrupts */ + /* configure NCSI error interrupts */ hclge_cmd_setup_basic_desc(&desc, HCLGE_NCSI_INT_EN, false); if (en) desc.data[0] = cpu_to_le32(HCLGE_NCSI_ERR_INT_EN); - else - desc.data[0] = 0; ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) dev_err(dev, - "failed(%d) to enable/disable NCSI error interrupts\n", - ret); + "fail(%d) to configure NCSI error interrupts\n", ret); return ret; } -static int hclge_enable_igu_egu_error(struct hclge_dev *hdev, bool en) +static int hclge_config_igu_egu_hw_err_int(struct hclge_dev *hdev, bool en) { struct device *dev = &hdev->pdev->dev; struct hclge_desc desc; int ret; - /* enable/disable error interrupts */ + /* configure IGU,EGU error interrupts */ hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_COMMON_INT_EN, false); if (en) desc.data[0] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN); - else - desc.data[0] = 0; + desc.data[1] = cpu_to_le32(HCLGE_IGU_ERR_INT_EN_MASK); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(dev, - "failed(%d) to enable/disable IGU common interrupts\n", - ret); + "fail(%d) to configure IGU common interrupts\n", ret); return ret; } hclge_cmd_setup_basic_desc(&desc, HCLGE_IGU_EGU_TNL_INT_EN, false); if (en) desc.data[0] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN); - else - desc.data[0] = 0; + desc.data[1] = cpu_to_le32(HCLGE_IGU_TNL_ERR_INT_EN_MASK); ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { dev_err(dev, - "failed(%d) to enable/disable IGU-EGU TNL interrupts\n", - ret); + "fail(%d) to configure IGU-EGU TNL interrupts\n", ret); return ret; } - ret = hclge_enable_ncsi_error(hdev, en); - if (ret) - dev_err(dev, "fail(%d) to en/disable err int\n", ret); + ret = hclge_config_ncsi_hw_err_int(hdev, en); return ret; } -static int hclge_enable_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd, +static int hclge_config_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd, bool en) { struct device *dev = &hdev->pdev->dev; struct hclge_desc desc[2]; int ret; - /* enable/disable PPP error interrupts */ + /* configure PPP error interrupts */ hclge_cmd_setup_basic_desc(&desc[0], cmd, false); desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); hclge_cmd_setup_basic_desc(&desc[1], cmd, false); @@ -567,24 +524,24 @@ static int hclge_enable_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd, cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN); desc[0].data[1] = cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN); - } else { - desc[0].data[0] = 0; - desc[0].data[1] = 0; + desc[0].data[4] = cpu_to_le32(HCLGE_PPP_PF_ERR_INT_EN); } + desc[1].data[0] = cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT0_EN_MASK); desc[1].data[1] = cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT1_EN_MASK); + if (hdev->pdev->revision >= 0x21) + desc[1].data[2] = + cpu_to_le32(HCLGE_PPP_PF_ERR_INT_EN_MASK); } else if (cmd == HCLGE_PPP_CMD1_INT_CMD) { if (en) { desc[0].data[0] = cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN); desc[0].data[1] = cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT3_EN); - } else { - desc[0].data[0] = 0; - desc[0].data[1] = 0; } + desc[1].data[0] = cpu_to_le32(HCLGE_PPP_MPF_ECC_ERR_INT2_EN_MASK); desc[1].data[1] = @@ -593,498 +550,863 @@ static int hclge_enable_ppp_error_interrupt(struct hclge_dev *hdev, u32 cmd, ret = hclge_cmd_send(&hdev->hw, &desc[0], 2); if (ret) - dev_err(dev, - "failed(%d) to enable/disable PPP error interrupts\n", - ret); + dev_err(dev, "fail(%d) to configure PPP error intr\n", ret); return ret; } -static int hclge_enable_ppp_error(struct hclge_dev *hdev, bool en) +static int hclge_config_ppp_hw_err_int(struct hclge_dev *hdev, bool en) { - struct device *dev = &hdev->pdev->dev; int ret; - ret = hclge_enable_ppp_error_interrupt(hdev, HCLGE_PPP_CMD0_INT_CMD, + ret = hclge_config_ppp_error_interrupt(hdev, HCLGE_PPP_CMD0_INT_CMD, en); - if (ret) { - dev_err(dev, - "failed(%d) to enable/disable PPP error intr 0,1\n", - ret); + if (ret) return ret; - } - ret = hclge_enable_ppp_error_interrupt(hdev, HCLGE_PPP_CMD1_INT_CMD, + ret = hclge_config_ppp_error_interrupt(hdev, HCLGE_PPP_CMD1_INT_CMD, en); - if (ret) - dev_err(dev, - "failed(%d) to enable/disable PPP error intr 2,3\n", - ret); return ret; } -int hclge_enable_tm_hw_error(struct hclge_dev *hdev, bool en) +static int hclge_config_tm_hw_err_int(struct hclge_dev *hdev, bool en) { struct device *dev = &hdev->pdev->dev; struct hclge_desc desc; int ret; - /* enable TM SCH hw errors */ + /* configure TM SCH hw errors */ hclge_cmd_setup_basic_desc(&desc, HCLGE_TM_SCH_ECC_INT_EN, false); if (en) desc.data[0] = cpu_to_le32(HCLGE_TM_SCH_ECC_ERR_INT_EN); - else - desc.data[0] = 0; ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) { - dev_err(dev, "failed(%d) to configure TM SCH errors\n", ret); + dev_err(dev, "fail(%d) to configure TM SCH errors\n", ret); return ret; } - /* enable TM QCN hw errors */ + /* configure TM QCN hw errors */ ret = hclge_cmd_query_error(hdev, &desc, HCLGE_TM_QCN_MEM_INT_CFG, 0, 0, 0); if (ret) { - dev_err(dev, "failed(%d) to read TM QCN CFG status\n", ret); + dev_err(dev, "fail(%d) to read TM QCN CFG status\n", ret); return ret; } hclge_cmd_reuse_desc(&desc, false); if (en) desc.data[1] = cpu_to_le32(HCLGE_TM_QCN_MEM_ERR_INT_EN); - else - desc.data[1] = 0; ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) dev_err(dev, - "failed(%d) to configure TM QCN mem errors\n", ret); + "fail(%d) to configure TM QCN mem errors\n", ret); return ret; } -static void hclge_process_common_error(struct hclge_dev *hdev, - enum hclge_err_int_type type) +static int hclge_config_mac_err_int(struct hclge_dev *hdev, bool en) { struct device *dev = &hdev->pdev->dev; - struct hclge_desc desc[2]; - u32 err_sts; + struct hclge_desc desc; int ret; - /* read err sts */ - ret = hclge_cmd_query_error(hdev, &desc[0], - HCLGE_COMMON_ECC_INT_CFG, - HCLGE_CMD_FLAG_NEXT, 0, 0); - if (ret) { + /* configure MAC common error interrupts */ + hclge_cmd_setup_basic_desc(&desc, HCLGE_MAC_COMMON_INT_EN, false); + if (en) + desc.data[0] = cpu_to_le32(HCLGE_MAC_COMMON_ERR_INT_EN); + + desc.data[1] = cpu_to_le32(HCLGE_MAC_COMMON_ERR_INT_EN_MASK); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) dev_err(dev, - "failed(=%d) to query COMMON error interrupt status\n", - ret); - return; - } + "fail(%d) to configure MAC COMMON error intr\n", ret); - /* log err */ - err_sts = (le32_to_cpu(desc[0].data[0])) & HCLGE_IMP_TCM_ECC_INT_MASK; - hclge_log_error(dev, &hclge_imp_tcm_ecc_int[0], err_sts); + return ret; +} - err_sts = (le32_to_cpu(desc[0].data[1])) & HCLGE_CMDQ_ECC_INT_MASK; - hclge_log_error(dev, &hclge_cmdq_nic_mem_ecc_int[0], err_sts); +static int hclge_config_ppu_error_interrupts(struct hclge_dev *hdev, u32 cmd, + bool en) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc[2]; + int num = 1; + int ret; - err_sts = (le32_to_cpu(desc[0].data[1]) >> HCLGE_CMDQ_ROC_ECC_INT_SHIFT) - & HCLGE_CMDQ_ECC_INT_MASK; - hclge_log_error(dev, &hclge_cmdq_rocee_mem_ecc_int[0], err_sts); + /* configure PPU error interrupts */ + if (cmd == HCLGE_PPU_MPF_ECC_INT_CMD) { + hclge_cmd_setup_basic_desc(&desc[0], cmd, false); + desc[0].flag |= HCLGE_CMD_FLAG_NEXT; + hclge_cmd_setup_basic_desc(&desc[1], cmd, false); + if (en) { + desc[0].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT0_EN; + desc[0].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN; + desc[1].data[3] = HCLGE_PPU_MPF_ABNORMAL_INT3_EN; + desc[1].data[4] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN; + } - if ((le32_to_cpu(desc[0].data[3])) & BIT(0)) - dev_warn(dev, "imp_rd_data_poison_err found\n"); + desc[1].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK; + desc[1].data[1] = HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK; + desc[1].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK; + desc[1].data[3] |= HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK; + num = 2; + } else if (cmd == HCLGE_PPU_MPF_OTHER_INT_CMD) { + hclge_cmd_setup_basic_desc(&desc[0], cmd, false); + if (en) + desc[0].data[0] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN2; - err_sts = (le32_to_cpu(desc[0].data[3]) >> HCLGE_TQP_ECC_INT_SHIFT) & - HCLGE_TQP_ECC_INT_MASK; - hclge_log_error(dev, &hclge_tqp_int_ecc_int[0], err_sts); + desc[0].data[2] = HCLGE_PPU_MPF_ABNORMAL_INT2_EN2_MASK; + } else if (cmd == HCLGE_PPU_PF_OTHER_INT_CMD) { + hclge_cmd_setup_basic_desc(&desc[0], cmd, false); + if (en) + desc[0].data[0] = HCLGE_PPU_PF_ABNORMAL_INT_EN; - err_sts = (le32_to_cpu(desc[0].data[5])) & - HCLGE_IMP_ITCM4_ECC_INT_MASK; - hclge_log_error(dev, &hclge_imp_itcm4_ecc_int[0], err_sts); + desc[0].data[2] = HCLGE_PPU_PF_ABNORMAL_INT_EN_MASK; + } else { + dev_err(dev, "Invalid cmd to configure PPU error interrupts\n"); + return -EINVAL; + } - /* clear error interrupts */ - desc[1].data[0] = cpu_to_le32(HCLGE_IMP_TCM_ECC_CLR_MASK); - desc[1].data[1] = cpu_to_le32(HCLGE_CMDQ_NIC_ECC_CLR_MASK | - HCLGE_CMDQ_ROCEE_ECC_CLR_MASK); - desc[1].data[3] = cpu_to_le32(HCLGE_TQP_IMP_ERR_CLR_MASK); - desc[1].data[5] = cpu_to_le32(HCLGE_IMP_ITCM4_ECC_CLR_MASK); + ret = hclge_cmd_send(&hdev->hw, &desc[0], num); - ret = hclge_cmd_clear_error(hdev, &desc[0], NULL, 0, - HCLGE_CMD_FLAG_NEXT); - if (ret) - dev_err(dev, - "failed(%d) to clear COMMON error interrupt status\n", - ret); + return ret; } -static void hclge_process_ncsi_error(struct hclge_dev *hdev, - enum hclge_err_int_type type) +static int hclge_config_ppu_hw_err_int(struct hclge_dev *hdev, bool en) { struct device *dev = &hdev->pdev->dev; - struct hclge_desc desc_rd; - struct hclge_desc desc_wr; - u32 err_sts; int ret; - if (hdev->pdev->revision < 0x21) - return; - - /* read NCSI error status */ - ret = hclge_cmd_query_error(hdev, &desc_rd, HCLGE_NCSI_INT_QUERY, - 0, 1, HCLGE_NCSI_ERR_INT_TYPE); + ret = hclge_config_ppu_error_interrupts(hdev, HCLGE_PPU_MPF_ECC_INT_CMD, + en); if (ret) { - dev_err(dev, - "failed(=%d) to query NCSI error interrupt status\n", + dev_err(dev, "fail(%d) to configure PPU MPF ECC error intr\n", ret); - return; + return ret; } - /* log err */ - err_sts = le32_to_cpu(desc_rd.data[0]); - hclge_log_error(dev, &hclge_ncsi_err_int[0], err_sts); + ret = hclge_config_ppu_error_interrupts(hdev, + HCLGE_PPU_MPF_OTHER_INT_CMD, + en); + if (ret) { + dev_err(dev, "fail(%d) to configure PPU MPF other intr\n", ret); + return ret; + } - /* clear err int */ - ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd, - HCLGE_NCSI_INT_CLR, 0); + ret = hclge_config_ppu_error_interrupts(hdev, + HCLGE_PPU_PF_OTHER_INT_CMD, en); if (ret) - dev_err(dev, "failed(=%d) to clear NCSI interrupt status\n", + dev_err(dev, "fail(%d) to configure PPU PF error interrupts\n", ret); + return ret; } -static void hclge_process_igu_egu_error(struct hclge_dev *hdev, - enum hclge_err_int_type int_type) +static int hclge_config_ssu_hw_err_int(struct hclge_dev *hdev, bool en) { struct device *dev = &hdev->pdev->dev; - struct hclge_desc desc_rd; - struct hclge_desc desc_wr; - u32 err_sts; + struct hclge_desc desc[2]; int ret; - /* read IGU common err sts */ - ret = hclge_cmd_query_error(hdev, &desc_rd, - HCLGE_IGU_COMMON_INT_QUERY, - 0, 1, int_type); - if (ret) { - dev_err(dev, "failed(=%d) to query IGU common int status\n", - ret); - return; + /* configure SSU ecc error interrupts */ + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_SSU_ECC_INT_CMD, false); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_ECC_INT_CMD, false); + if (en) { + desc[0].data[0] = cpu_to_le32(HCLGE_SSU_1BIT_ECC_ERR_INT_EN); + desc[0].data[1] = + cpu_to_le32(HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN); + desc[0].data[4] = cpu_to_le32(HCLGE_SSU_BIT32_ECC_ERR_INT_EN); } - /* log err */ - err_sts = le32_to_cpu(desc_rd.data[0]) & - HCLGE_IGU_COM_INT_MASK; - hclge_log_error(dev, &hclge_igu_com_err_int[0], err_sts); + desc[1].data[0] = cpu_to_le32(HCLGE_SSU_1BIT_ECC_ERR_INT_EN_MASK); + desc[1].data[1] = cpu_to_le32(HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN_MASK); + desc[1].data[2] = cpu_to_le32(HCLGE_SSU_BIT32_ECC_ERR_INT_EN_MASK); - /* clear err int */ - ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd, - HCLGE_IGU_COMMON_INT_CLR, 0); + ret = hclge_cmd_send(&hdev->hw, &desc[0], 2); if (ret) { - dev_err(dev, "failed(=%d) to clear IGU common int status\n", - ret); - return; + dev_err(dev, + "fail(%d) to configure SSU ECC error interrupt\n", ret); + return ret; } - /* read IGU-EGU TNL err sts */ - ret = hclge_cmd_query_error(hdev, &desc_rd, - HCLGE_IGU_EGU_TNL_INT_QUERY, - 0, 1, int_type); - if (ret) { - dev_err(dev, "failed(=%d) to query IGU-EGU TNL int status\n", - ret); - return; + /* configure SSU common error interrupts */ + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_SSU_COMMON_INT_CMD, false); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + hclge_cmd_setup_basic_desc(&desc[1], HCLGE_SSU_COMMON_INT_CMD, false); + + if (en) { + if (hdev->pdev->revision >= 0x21) + desc[0].data[0] = + cpu_to_le32(HCLGE_SSU_COMMON_INT_EN); + else + desc[0].data[0] = + cpu_to_le32(HCLGE_SSU_COMMON_INT_EN & ~BIT(5)); + desc[0].data[1] = cpu_to_le32(HCLGE_SSU_PORT_BASED_ERR_INT_EN); + desc[0].data[2] = + cpu_to_le32(HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN); } - /* log err */ - err_sts = le32_to_cpu(desc_rd.data[0]) & - HCLGE_IGU_EGU_TNL_INT_MASK; - hclge_log_error(dev, &hclge_igu_egu_tnl_err_int[0], err_sts); + desc[1].data[0] = cpu_to_le32(HCLGE_SSU_COMMON_INT_EN_MASK | + HCLGE_SSU_PORT_BASED_ERR_INT_EN_MASK); + desc[1].data[1] = cpu_to_le32(HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN_MASK); - /* clear err int */ - ret = hclge_cmd_clear_error(hdev, &desc_wr, &desc_rd, - HCLGE_IGU_EGU_TNL_INT_CLR, 0); - if (ret) { - dev_err(dev, "failed(=%d) to clear IGU-EGU TNL int status\n", - ret); - return; - } + ret = hclge_cmd_send(&hdev->hw, &desc[0], 2); + if (ret) + dev_err(dev, + "fail(%d) to configure SSU COMMON error intr\n", ret); - hclge_process_ncsi_error(hdev, HCLGE_ERR_INT_RAS_NFE); + return ret; } -static int hclge_log_and_clear_ppp_error(struct hclge_dev *hdev, u32 cmd, - enum hclge_err_int_type int_type) +#define HCLGE_SET_DEFAULT_RESET_REQUEST(reset_type) \ + do { \ + if (ae_dev->ops->set_default_reset_request) \ + ae_dev->ops->set_default_reset_request(ae_dev, \ + reset_type); \ + } while (0) + +/* hclge_handle_mpf_ras_error: handle all main PF RAS errors + * @hdev: pointer to struct hclge_dev + * @desc: descriptor for describing the command + * @num: number of extended command structures + * + * This function handles all the main PF RAS errors in the + * hw register/s using command. + */ +static int hclge_handle_mpf_ras_error(struct hclge_dev *hdev, + struct hclge_desc *desc, + int num) { - enum hnae3_reset_type reset_level = HNAE3_NONE_RESET; + struct hnae3_ae_dev *ae_dev = hdev->ae_dev; struct device *dev = &hdev->pdev->dev; - const struct hclge_hw_error *hw_err_lst1, *hw_err_lst2, *hw_err_lst3; - struct hclge_desc desc[2]; - u32 err_sts; + __le32 *desc_data; + u32 status; int ret; - /* read PPP INT sts */ - ret = hclge_cmd_query_error(hdev, &desc[0], cmd, - HCLGE_CMD_FLAG_NEXT, 5, int_type); + /* query all main PF RAS errors */ + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_MPF_RAS_INT, + true); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + + ret = hclge_cmd_send(&hdev->hw, &desc[0], num); if (ret) { - dev_err(dev, "failed(=%d) to query PPP interrupt status\n", - ret); - return -EIO; + dev_err(dev, "query all mpf ras int cmd failed (%d)\n", ret); + return ret; } - /* log error */ - if (cmd == HCLGE_PPP_CMD0_INT_CMD) { - hw_err_lst1 = &hclge_ppp_mpf_int0[0]; - hw_err_lst2 = &hclge_ppp_mpf_int1[0]; - hw_err_lst3 = &hclge_ppp_pf_int[0]; - } else if (cmd == HCLGE_PPP_CMD1_INT_CMD) { - hw_err_lst1 = &hclge_ppp_mpf_int2[0]; - hw_err_lst2 = &hclge_ppp_mpf_int3[0]; - } else { - dev_err(dev, "invalid command(=%d)\n", cmd); - return -EINVAL; + /* log HNS common errors */ + status = le32_to_cpu(desc[0].data[0]); + if (status) { + hclge_log_error(dev, "IMP_TCM_ECC_INT_STS", + &hclge_imp_tcm_ecc_int[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); } - err_sts = le32_to_cpu(desc[0].data[2]); - if (err_sts) { - hclge_log_error(dev, hw_err_lst1, err_sts); - reset_level = HNAE3_FUNC_RESET; + status = le32_to_cpu(desc[0].data[1]); + if (status) { + hclge_log_error(dev, "CMDQ_MEM_ECC_INT_STS", + &hclge_cmdq_nic_mem_ecc_int[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); } - err_sts = le32_to_cpu(desc[0].data[3]); - if (err_sts) { - hclge_log_error(dev, hw_err_lst2, err_sts); - reset_level = HNAE3_FUNC_RESET; + if ((le32_to_cpu(desc[0].data[2])) & BIT(0)) { + dev_warn(dev, "imp_rd_data_poison_err found\n"); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); } - if (cmd == HCLGE_PPP_CMD0_INT_CMD) { - err_sts = (le32_to_cpu(desc[0].data[4]) >> 8) & 0x3; - if (err_sts) { - hclge_log_error(dev, hw_err_lst3, err_sts); - reset_level = HNAE3_FUNC_RESET; - } + status = le32_to_cpu(desc[0].data[3]); + if (status) { + hclge_log_error(dev, "TQP_INT_ECC_INT_STS", + &hclge_tqp_int_ecc_int[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); } - /* clear PPP INT */ - ret = hclge_cmd_clear_error(hdev, &desc[0], NULL, 0, - HCLGE_CMD_FLAG_NEXT); - if (ret) { - dev_err(dev, "failed(=%d) to clear PPP interrupt status\n", - ret); - return -EIO; + status = le32_to_cpu(desc[0].data[4]); + if (status) { + hclge_log_error(dev, "MSIX_ECC_INT_STS", + &hclge_msix_sram_ecc_int[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); } - return 0; + /* log SSU(Storage Switch Unit) errors */ + desc_data = (__le32 *)&desc[2]; + status = le32_to_cpu(*(desc_data + 2)); + if (status) { + dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_0 ssu_ecc_mbit_int[31:0]\n"); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + } + + status = le32_to_cpu(*(desc_data + 3)) & BIT(0); + if (status) { + dev_warn(dev, "SSU_ECC_MULTI_BIT_INT_1 ssu_ecc_mbit_int[32]\n"); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + } + + status = le32_to_cpu(*(desc_data + 4)) & HCLGE_SSU_COMMON_ERR_INT_MASK; + if (status) { + hclge_log_error(dev, "SSU_COMMON_ERR_INT", + &hclge_ssu_com_err_int[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); + } + + /* log IGU(Ingress Unit) errors */ + desc_data = (__le32 *)&desc[3]; + status = le32_to_cpu(*desc_data) & HCLGE_IGU_INT_MASK; + if (status) + hclge_log_error(dev, "IGU_INT_STS", + &hclge_igu_int[0], status); + + /* log PPP(Programmable Packet Process) errors */ + desc_data = (__le32 *)&desc[4]; + status = le32_to_cpu(*(desc_data + 1)); + if (status) + hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST1", + &hclge_ppp_mpf_abnormal_int_st1[0], status); + + status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPP_MPF_INT_ST3_MASK; + if (status) + hclge_log_error(dev, "PPP_MPF_ABNORMAL_INT_ST3", + &hclge_ppp_mpf_abnormal_int_st3[0], status); + + /* log PPU(RCB) errors */ + desc_data = (__le32 *)&desc[5]; + status = le32_to_cpu(*(desc_data + 1)); + if (status) { + dev_warn(dev, "PPU_MPF_ABNORMAL_INT_ST1 %s found\n", + "rpu_rx_pkt_ecc_mbit_err"); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + } + + status = le32_to_cpu(*(desc_data + 2)); + if (status) { + hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST2", + &hclge_ppu_mpf_abnormal_int_st2[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + } + + status = le32_to_cpu(*(desc_data + 3)) & HCLGE_PPU_MPF_INT_ST3_MASK; + if (status) { + hclge_log_error(dev, "PPU_MPF_ABNORMAL_INT_ST3", + &hclge_ppu_mpf_abnormal_int_st3[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + } + + /* log TM(Traffic Manager) errors */ + desc_data = (__le32 *)&desc[6]; + status = le32_to_cpu(*desc_data); + if (status) { + hclge_log_error(dev, "TM_SCH_RINT", + &hclge_tm_sch_rint[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + } + + /* log QCN(Quantized Congestion Control) errors */ + desc_data = (__le32 *)&desc[7]; + status = le32_to_cpu(*desc_data) & HCLGE_QCN_FIFO_INT_MASK; + if (status) { + hclge_log_error(dev, "QCN_FIFO_RINT", + &hclge_qcn_fifo_rint[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + } + + status = le32_to_cpu(*(desc_data + 1)) & HCLGE_QCN_ECC_INT_MASK; + if (status) { + hclge_log_error(dev, "QCN_ECC_RINT", + &hclge_qcn_ecc_rint[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + } + + /* log NCSI errors */ + desc_data = (__le32 *)&desc[9]; + status = le32_to_cpu(*desc_data) & HCLGE_NCSI_ECC_INT_MASK; + if (status) { + hclge_log_error(dev, "NCSI_ECC_INT_RPT", + &hclge_ncsi_err_int[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_CORE_RESET); + } + + /* clear all main PF RAS errors */ + hclge_cmd_reuse_desc(&desc[0], false); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + + ret = hclge_cmd_send(&hdev->hw, &desc[0], num); + if (ret) + dev_err(dev, "clear all mpf ras int cmd failed (%d)\n", ret); + + return ret; } -static void hclge_process_ppp_error(struct hclge_dev *hdev, - enum hclge_err_int_type int_type) +/* hclge_handle_pf_ras_error: handle all PF RAS errors + * @hdev: pointer to struct hclge_dev + * @desc: descriptor for describing the command + * @num: number of extended command structures + * + * This function handles all the PF RAS errors in the + * hw register/s using command. + */ +static int hclge_handle_pf_ras_error(struct hclge_dev *hdev, + struct hclge_desc *desc, + int num) { + struct hnae3_ae_dev *ae_dev = hdev->ae_dev; struct device *dev = &hdev->pdev->dev; + __le32 *desc_data; + u32 status; int ret; - /* read PPP INT0,1 sts */ - ret = hclge_log_and_clear_ppp_error(hdev, HCLGE_PPP_CMD0_INT_CMD, - int_type); - if (ret < 0) { - dev_err(dev, "failed(=%d) to clear PPP interrupt 0,1 status\n", - ret); - return; + /* query all PF RAS errors */ + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_PF_RAS_INT, + true); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + + ret = hclge_cmd_send(&hdev->hw, &desc[0], num); + if (ret) { + dev_err(dev, "query all pf ras int cmd failed (%d)\n", ret); + return ret; } - /* read err PPP INT2,3 sts */ - ret = hclge_log_and_clear_ppp_error(hdev, HCLGE_PPP_CMD1_INT_CMD, - int_type); - if (ret < 0) - dev_err(dev, "failed(=%d) to clear PPP interrupt 2,3 status\n", - ret); + /* log SSU(Storage Switch Unit) errors */ + status = le32_to_cpu(desc[0].data[0]); + if (status) { + hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT", + &hclge_ssu_port_based_err_int[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); + } + + status = le32_to_cpu(desc[0].data[1]); + if (status) { + hclge_log_error(dev, "SSU_FIFO_OVERFLOW_INT", + &hclge_ssu_fifo_overflow_int[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); + } + + status = le32_to_cpu(desc[0].data[2]); + if (status) { + hclge_log_error(dev, "SSU_ETS_TCG_INT", + &hclge_ssu_ets_tcg_int[0], status); + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); + } + + /* log IGU(Ingress Unit) EGU(Egress Unit) TNL errors */ + desc_data = (__le32 *)&desc[1]; + status = le32_to_cpu(*desc_data) & HCLGE_IGU_EGU_TNL_INT_MASK; + if (status) + hclge_log_error(dev, "IGU_EGU_TNL_INT_STS", + &hclge_igu_egu_tnl_int[0], status); + + /* clear all PF RAS errors */ + hclge_cmd_reuse_desc(&desc[0], false); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + + ret = hclge_cmd_send(&hdev->hw, &desc[0], num); + if (ret) + dev_err(dev, "clear all pf ras int cmd failed (%d)\n", ret); + + return ret; } -static void hclge_process_tm_sch_error(struct hclge_dev *hdev) +static int hclge_handle_all_ras_errors(struct hclge_dev *hdev) { struct device *dev = &hdev->pdev->dev; - const struct hclge_tm_sch_ecc_info *tm_sch_ecc_info; - struct hclge_desc desc; - u32 ecc_info; - u8 module_no; - u8 ram_no; + u32 mpf_bd_num, pf_bd_num, bd_num; + struct hclge_desc desc_bd; + struct hclge_desc *desc; int ret; - /* read TM scheduler errors */ - ret = hclge_cmd_query_error(hdev, &desc, - HCLGE_TM_SCH_MBIT_ECC_INFO_CMD, 0, 0, 0); + /* query the number of registers in the RAS int status */ + hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_RAS_INT_STS_BD_NUM, + true); + ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1); if (ret) { - dev_err(dev, "failed(%d) to read SCH mbit ECC err info\n", ret); - return; + dev_err(dev, "fail(%d) to query ras int status bd num\n", ret); + return ret; } - ecc_info = le32_to_cpu(desc.data[0]); + mpf_bd_num = le32_to_cpu(desc_bd.data[0]); + pf_bd_num = le32_to_cpu(desc_bd.data[1]); + bd_num = max_t(u32, mpf_bd_num, pf_bd_num); - ret = hclge_cmd_query_error(hdev, &desc, - HCLGE_TM_SCH_ECC_ERR_RINT_CMD, 0, 0, 0); + desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL); + if (!desc) + return -ENOMEM; + + /* handle all main PF RAS errors */ + ret = hclge_handle_mpf_ras_error(hdev, desc, mpf_bd_num); if (ret) { - dev_err(dev, "failed(%d) to read SCH ECC err status\n", ret); - return; + kfree(desc); + return ret; } + memset(desc, 0, bd_num * sizeof(struct hclge_desc)); + + /* handle all PF RAS errors */ + ret = hclge_handle_pf_ras_error(hdev, desc, pf_bd_num); + kfree(desc); - /* log TM scheduler errors */ - if (le32_to_cpu(desc.data[0])) { - hclge_log_error(dev, &hclge_tm_sch_err_int[0], - le32_to_cpu(desc.data[0])); - if (le32_to_cpu(desc.data[0]) & 0x2) { - module_no = (ecc_info >> 20) & 0xF; - ram_no = (ecc_info >> 16) & 0xF; - tm_sch_ecc_info = - &hclge_tm_sch_ecc_err[module_no][ram_no]; - dev_warn(dev, "ecc err module:ram=%s\n", - tm_sch_ecc_info->name); - dev_warn(dev, "ecc memory address = 0x%x\n", - ecc_info & 0xFFFF); + return ret; +} + +static int hclge_log_rocee_ovf_error(struct hclge_dev *hdev) +{ + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc[2]; + int ret; + + /* read overflow error status */ + ret = hclge_cmd_query_error(hdev, &desc[0], + HCLGE_ROCEE_PF_RAS_INT_CMD, + 0, 0, 0); + if (ret) { + dev_err(dev, "failed(%d) to query ROCEE OVF error sts\n", ret); + return ret; + } + + /* log overflow error */ + if (le32_to_cpu(desc[0].data[0]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) { + const struct hclge_hw_error *err; + u32 err_sts; + + err = &hclge_rocee_qmm_ovf_err_int[0]; + err_sts = HCLGE_ROCEE_OVF_ERR_TYPE_MASK & + le32_to_cpu(desc[0].data[0]); + while (err->msg) { + if (err->int_msk == err_sts) { + dev_warn(dev, "%s [error status=0x%x] found\n", + err->msg, + le32_to_cpu(desc[0].data[0])); + break; + } + err++; } } - /* clear TM scheduler errors */ - ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0); - if (ret) { - dev_err(dev, "failed(%d) to clear TM SCH error status\n", ret); - return; + if (le32_to_cpu(desc[0].data[1]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) { + dev_warn(dev, "ROCEE TSP OVF [error status=0x%x] found\n", + le32_to_cpu(desc[0].data[1])); } - ret = hclge_cmd_query_error(hdev, &desc, - HCLGE_TM_SCH_ECC_ERR_RINT_CE, 0, 0, 0); - if (ret) { - dev_err(dev, "failed(%d) to read SCH CE status\n", ret); - return; + if (le32_to_cpu(desc[0].data[2]) & HCLGE_ROCEE_OVF_ERR_INT_MASK) { + dev_warn(dev, "ROCEE SCC OVF [error status=0x%x] found\n", + le32_to_cpu(desc[0].data[2])); } - ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0); + return 0; +} + +static int hclge_log_and_clear_rocee_ras_error(struct hclge_dev *hdev) +{ + enum hnae3_reset_type reset_type = HNAE3_FUNC_RESET; + struct hnae3_ae_dev *ae_dev = hdev->ae_dev; + struct device *dev = &hdev->pdev->dev; + struct hclge_desc desc[2]; + unsigned int status; + int ret; + + /* read RAS error interrupt status */ + ret = hclge_cmd_query_error(hdev, &desc[0], + HCLGE_QUERY_CLEAR_ROCEE_RAS_INT, + 0, 0, 0); if (ret) { - dev_err(dev, "failed(%d) to clear TM SCH CE status\n", ret); - return; + dev_err(dev, "failed(%d) to query ROCEE RAS INT SRC\n", ret); + /* reset everything for now */ + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); + return ret; } - ret = hclge_cmd_query_error(hdev, &desc, - HCLGE_TM_SCH_ECC_ERR_RINT_NFE, 0, 0, 0); - if (ret) { - dev_err(dev, "failed(%d) to read SCH NFE status\n", ret); - return; + status = le32_to_cpu(desc[0].data[0]); + + if (status & HCLGE_ROCEE_RERR_INT_MASK) + dev_warn(dev, "ROCEE RAS AXI rresp error\n"); + + if (status & HCLGE_ROCEE_BERR_INT_MASK) + dev_warn(dev, "ROCEE RAS AXI bresp error\n"); + + if (status & HCLGE_ROCEE_ECC_INT_MASK) { + dev_warn(dev, "ROCEE RAS 2bit ECC error\n"); + reset_type = HNAE3_GLOBAL_RESET; } - ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0); - if (ret) { - dev_err(dev, "failed(%d) to clear TM SCH NFE status\n", ret); - return; + if (status & HCLGE_ROCEE_OVF_INT_MASK) { + ret = hclge_log_rocee_ovf_error(hdev); + if (ret) { + dev_err(dev, "failed(%d) to process ovf error\n", ret); + /* reset everything for now */ + HCLGE_SET_DEFAULT_RESET_REQUEST(HNAE3_GLOBAL_RESET); + return ret; + } } - ret = hclge_cmd_query_error(hdev, &desc, - HCLGE_TM_SCH_ECC_ERR_RINT_FE, 0, 0, 0); + /* clear error status */ + hclge_cmd_reuse_desc(&desc[0], false); + ret = hclge_cmd_send(&hdev->hw, &desc[0], 1); if (ret) { - dev_err(dev, "failed(%d) to read SCH FE status\n", ret); - return; + dev_err(dev, "failed(%d) to clear ROCEE RAS error\n", ret); + /* reset everything for now */ + reset_type = HNAE3_GLOBAL_RESET; } - ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0); - if (ret) - dev_err(dev, "failed(%d) to clear TM SCH FE status\n", ret); + HCLGE_SET_DEFAULT_RESET_REQUEST(reset_type); + + return ret; } -static void hclge_process_tm_qcn_error(struct hclge_dev *hdev) +static int hclge_config_rocee_ras_interrupt(struct hclge_dev *hdev, bool en) { struct device *dev = &hdev->pdev->dev; struct hclge_desc desc; int ret; - /* read QCN errors */ - ret = hclge_cmd_query_error(hdev, &desc, - HCLGE_TM_QCN_MEM_INT_INFO_CMD, 0, 0, 0); - if (ret) { - dev_err(dev, "failed(%d) to read QCN ECC err status\n", ret); - return; - } + if (hdev->pdev->revision < 0x21 || !hnae3_dev_roce_supported(hdev)) + return 0; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_CONFIG_ROCEE_RAS_INT_EN, false); + if (en) { + /* enable ROCEE hw error interrupts */ + desc.data[0] = cpu_to_le32(HCLGE_ROCEE_RAS_NFE_INT_EN); + desc.data[1] = cpu_to_le32(HCLGE_ROCEE_RAS_CE_INT_EN); - /* log QCN errors */ - if (le32_to_cpu(desc.data[0])) - hclge_log_error(dev, &hclge_qcn_ecc_err_int[0], - le32_to_cpu(desc.data[0])); + hclge_log_and_clear_rocee_ras_error(hdev); + } + desc.data[2] = cpu_to_le32(HCLGE_ROCEE_RAS_NFE_INT_EN_MASK); + desc.data[3] = cpu_to_le32(HCLGE_ROCEE_RAS_CE_INT_EN_MASK); - /* clear QCN errors */ - ret = hclge_cmd_clear_error(hdev, &desc, NULL, 0, 0); + ret = hclge_cmd_send(&hdev->hw, &desc, 1); if (ret) - dev_err(dev, "failed(%d) to clear QCN error status\n", ret); + dev_err(dev, "failed(%d) to config ROCEE RAS interrupt\n", ret); + + return ret; } -static void hclge_process_tm_error(struct hclge_dev *hdev, - enum hclge_err_int_type type) +static int hclge_handle_rocee_ras_error(struct hnae3_ae_dev *ae_dev) { - hclge_process_tm_sch_error(hdev); - hclge_process_tm_qcn_error(hdev); + struct hclge_dev *hdev = ae_dev->priv; + + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || + hdev->pdev->revision < 0x21) + return HNAE3_NONE_RESET; + + return hclge_log_and_clear_rocee_ras_error(hdev); } static const struct hclge_hw_blk hw_blk[] = { - { .msk = BIT(0), .name = "IGU_EGU", - .enable_error = hclge_enable_igu_egu_error, - .process_error = hclge_process_igu_egu_error, }, - { .msk = BIT(5), .name = "COMMON", - .enable_error = hclge_enable_common_error, - .process_error = hclge_process_common_error, }, - { .msk = BIT(4), .name = "TM", - .enable_error = hclge_enable_tm_hw_error, - .process_error = hclge_process_tm_error, }, - { .msk = BIT(1), .name = "PPP", - .enable_error = hclge_enable_ppp_error, - .process_error = hclge_process_ppp_error, }, + { + .msk = BIT(0), .name = "IGU_EGU", + .config_err_int = hclge_config_igu_egu_hw_err_int, + }, + { + .msk = BIT(1), .name = "PPP", + .config_err_int = hclge_config_ppp_hw_err_int, + }, + { + .msk = BIT(2), .name = "SSU", + .config_err_int = hclge_config_ssu_hw_err_int, + }, + { + .msk = BIT(3), .name = "PPU", + .config_err_int = hclge_config_ppu_hw_err_int, + }, + { + .msk = BIT(4), .name = "TM", + .config_err_int = hclge_config_tm_hw_err_int, + }, + { + .msk = BIT(5), .name = "COMMON", + .config_err_int = hclge_config_common_hw_err_int, + }, + { + .msk = BIT(8), .name = "MAC", + .config_err_int = hclge_config_mac_err_int, + }, { /* sentinel */ } }; int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state) { + const struct hclge_hw_blk *module = hw_blk; struct device *dev = &hdev->pdev->dev; int ret = 0; - int i = 0; - while (hw_blk[i].name) { - if (!hw_blk[i].enable_error) { - i++; - continue; + while (module->name) { + if (module->config_err_int) { + ret = module->config_err_int(hdev, state); + if (ret) + return ret; } - ret = hw_blk[i].enable_error(hdev, state); - if (ret) { - dev_err(dev, "fail(%d) to en/disable err int\n", ret); - return ret; - } - i++; + module++; } + ret = hclge_config_rocee_ras_interrupt(hdev, state); + if (ret) + dev_err(dev, "fail(%d) to configure ROCEE err int\n", ret); + return ret; } -pci_ers_result_t hclge_process_ras_hw_error(struct hnae3_ae_dev *ae_dev) +pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev) { struct hclge_dev *hdev = ae_dev->priv; struct device *dev = &hdev->pdev->dev; - u32 sts, val; - int i = 0; - - sts = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG); - - /* Processing Non-fatal errors */ - if (sts & HCLGE_RAS_REG_NFE_MASK) { - val = (sts >> HCLGE_RAS_REG_NFE_SHIFT) & 0xFF; - i = 0; - while (hw_blk[i].name) { - if (!(hw_blk[i].msk & val)) { - i++; - continue; - } - dev_warn(dev, "%s ras non-fatal error identified\n", - hw_blk[i].name); - if (hw_blk[i].process_error) - hw_blk[i].process_error(hdev, - HCLGE_ERR_INT_RAS_NFE); - i++; - } + u32 status; + + status = hclge_read_dev(&hdev->hw, HCLGE_RAS_PF_OTHER_INT_STS_REG); + + /* Handling Non-fatal HNS RAS errors */ + if (status & HCLGE_RAS_REG_NFE_MASK) { + dev_warn(dev, + "HNS Non-Fatal RAS error(status=0x%x) identified\n", + status); + hclge_handle_all_ras_errors(hdev); + } else { + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) || + hdev->pdev->revision < 0x21) + return PCI_ERS_RESULT_RECOVERED; + } + + if (status & HCLGE_RAS_REG_ROCEE_ERR_MASK) { + dev_warn(dev, "ROCEE uncorrected RAS error identified\n"); + hclge_handle_rocee_ras_error(ae_dev); + } + + if (status & HCLGE_RAS_REG_NFE_MASK || + status & HCLGE_RAS_REG_ROCEE_ERR_MASK) + return PCI_ERS_RESULT_NEED_RESET; + + return PCI_ERS_RESULT_RECOVERED; +} + +int hclge_handle_hw_msix_error(struct hclge_dev *hdev, + unsigned long *reset_requests) +{ + struct device *dev = &hdev->pdev->dev; + u32 mpf_bd_num, pf_bd_num, bd_num; + struct hclge_desc desc_bd; + struct hclge_desc *desc; + __le32 *desc_data; + int ret = 0; + u32 status; + + /* set default handling */ + set_bit(HNAE3_FUNC_RESET, reset_requests); + + /* query the number of bds for the MSIx int status */ + hclge_cmd_setup_basic_desc(&desc_bd, HCLGE_QUERY_MSIX_INT_STS_BD_NUM, + true); + ret = hclge_cmd_send(&hdev->hw, &desc_bd, 1); + if (ret) { + dev_err(dev, "fail(%d) to query msix int status bd num\n", + ret); + /* reset everything for now */ + set_bit(HNAE3_GLOBAL_RESET, reset_requests); + return ret; + } + + mpf_bd_num = le32_to_cpu(desc_bd.data[0]); + pf_bd_num = le32_to_cpu(desc_bd.data[1]); + bd_num = max_t(u32, mpf_bd_num, pf_bd_num); + + desc = kcalloc(bd_num, sizeof(struct hclge_desc), GFP_KERNEL); + if (!desc) + goto out; + + /* query all main PF MSIx errors */ + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_ALL_MPF_MSIX_INT, + true); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + + ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num); + if (ret) { + dev_err(dev, "query all mpf msix int cmd failed (%d)\n", + ret); + /* reset everything for now */ + set_bit(HNAE3_GLOBAL_RESET, reset_requests); + goto msi_error; + } + + /* log MAC errors */ + desc_data = (__le32 *)&desc[1]; + status = le32_to_cpu(*desc_data); + if (status) { + hclge_log_error(dev, "MAC_AFIFO_TNL_INT_R", + &hclge_mac_afifo_tnl_int[0], status); + set_bit(HNAE3_GLOBAL_RESET, reset_requests); } - return PCI_ERS_RESULT_NEED_RESET; + /* log PPU(RCB) errors */ + desc_data = (__le32 *)&desc[5]; + status = le32_to_cpu(*(desc_data + 2)) & + HCLGE_PPU_MPF_INT_ST2_MSIX_MASK; + if (status) { + dev_warn(dev, + "PPU_MPF_ABNORMAL_INT_ST2[28:29], err_status(0x%x)\n", + status); + set_bit(HNAE3_CORE_RESET, reset_requests); + } + + /* clear all main PF MSIx errors */ + hclge_cmd_reuse_desc(&desc[0], false); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + + ret = hclge_cmd_send(&hdev->hw, &desc[0], mpf_bd_num); + if (ret) { + dev_err(dev, "clear all mpf msix int cmd failed (%d)\n", + ret); + /* reset everything for now */ + set_bit(HNAE3_GLOBAL_RESET, reset_requests); + goto msi_error; + } + + /* query all PF MSIx errors */ + memset(desc, 0, bd_num * sizeof(struct hclge_desc)); + hclge_cmd_setup_basic_desc(&desc[0], HCLGE_QUERY_CLEAR_ALL_PF_MSIX_INT, + true); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + + ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num); + if (ret) { + dev_err(dev, "query all pf msix int cmd failed (%d)\n", + ret); + /* reset everything for now */ + set_bit(HNAE3_GLOBAL_RESET, reset_requests); + goto msi_error; + } + + /* log SSU PF errors */ + status = le32_to_cpu(desc[0].data[0]) & HCLGE_SSU_PORT_INT_MSIX_MASK; + if (status) { + hclge_log_error(dev, "SSU_PORT_BASED_ERR_INT", + &hclge_ssu_port_based_pf_int[0], status); + set_bit(HNAE3_GLOBAL_RESET, reset_requests); + } + + /* read and log PPP PF errors */ + desc_data = (__le32 *)&desc[2]; + status = le32_to_cpu(*desc_data); + if (status) + hclge_log_error(dev, "PPP_PF_ABNORMAL_INT_ST0", + &hclge_ppp_pf_abnormal_int[0], status); + + /* PPU(RCB) PF errors */ + desc_data = (__le32 *)&desc[3]; + status = le32_to_cpu(*desc_data) & HCLGE_PPU_PF_INT_MSIX_MASK; + if (status) + hclge_log_error(dev, "PPU_PF_ABNORMAL_INT_ST", + &hclge_ppu_pf_abnormal_int[0], status); + + /* clear all PF MSIx errors */ + hclge_cmd_reuse_desc(&desc[0], false); + desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); + + ret = hclge_cmd_send(&hdev->hw, &desc[0], pf_bd_num); + if (ret) { + dev_err(dev, "clear all pf msix int cmd failed (%d)\n", + ret); + /* reset everything for now */ + set_bit(HNAE3_GLOBAL_RESET, reset_requests); + } + +msi_error: + kfree(desc); +out: + return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h index e0e3b5861495..51a7d4eb066a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_err.h @@ -7,9 +7,11 @@ #include "hclge_main.h" #define HCLGE_RAS_PF_OTHER_INT_STS_REG 0x20B00 -#define HCLGE_RAS_REG_FE_MASK 0xFF #define HCLGE_RAS_REG_NFE_MASK 0xFF00 -#define HCLGE_RAS_REG_NFE_SHIFT 8 +#define HCLGE_RAS_REG_ROCEE_ERR_MASK 0x3000000 + +#define HCLGE_VECTOR0_PF_OTHER_INT_STS_REG 0x20800 +#define HCLGE_VECTOR0_REG_MSIX_MASK 0x1FF00 #define HCLGE_IMP_TCM_ECC_ERR_INT_EN 0xFFFF0000 #define HCLGE_IMP_TCM_ECC_ERR_INT_EN_MASK 0xFFFF0000 @@ -23,6 +25,8 @@ #define HCLGE_IMP_RD_POISON_ERR_INT_EN_MASK 0x0100 #define HCLGE_TQP_ECC_ERR_INT_EN 0x0FFF #define HCLGE_TQP_ECC_ERR_INT_EN_MASK 0x0FFF +#define HCLGE_MSIX_SRAM_ECC_ERR_INT_EN_MASK 0x0F000000 +#define HCLGE_MSIX_SRAM_ECC_ERR_INT_EN 0x0F000000 #define HCLGE_IGU_ERR_INT_EN 0x0000066F #define HCLGE_IGU_ERR_INT_EN_MASK 0x000F #define HCLGE_IGU_TNL_ERR_INT_EN 0x0002AABF @@ -41,21 +45,55 @@ #define HCLGE_TM_QCN_MEM_ERR_INT_EN 0xFFFFFF #define HCLGE_NCSI_ERR_INT_EN 0x3 #define HCLGE_NCSI_ERR_INT_TYPE 0x9 +#define HCLGE_MAC_COMMON_ERR_INT_EN GENMASK(7, 0) +#define HCLGE_MAC_COMMON_ERR_INT_EN_MASK GENMASK(7, 0) +#define HCLGE_PPU_MPF_ABNORMAL_INT0_EN GENMASK(31, 0) +#define HCLGE_PPU_MPF_ABNORMAL_INT0_EN_MASK GENMASK(31, 0) +#define HCLGE_PPU_MPF_ABNORMAL_INT1_EN GENMASK(31, 0) +#define HCLGE_PPU_MPF_ABNORMAL_INT1_EN_MASK GENMASK(31, 0) +#define HCLGE_PPU_MPF_ABNORMAL_INT2_EN 0x3FFF3FFF +#define HCLGE_PPU_MPF_ABNORMAL_INT2_EN_MASK 0x3FFF3FFF +#define HCLGE_PPU_MPF_ABNORMAL_INT2_EN2 0xB +#define HCLGE_PPU_MPF_ABNORMAL_INT2_EN2_MASK 0xB +#define HCLGE_PPU_MPF_ABNORMAL_INT3_EN GENMASK(7, 0) +#define HCLGE_PPU_MPF_ABNORMAL_INT3_EN_MASK GENMASK(23, 16) +#define HCLGE_PPU_PF_ABNORMAL_INT_EN GENMASK(5, 0) +#define HCLGE_PPU_PF_ABNORMAL_INT_EN_MASK GENMASK(5, 0) +#define HCLGE_SSU_1BIT_ECC_ERR_INT_EN GENMASK(31, 0) +#define HCLGE_SSU_1BIT_ECC_ERR_INT_EN_MASK GENMASK(31, 0) +#define HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN GENMASK(31, 0) +#define HCLGE_SSU_MULTI_BIT_ECC_ERR_INT_EN_MASK GENMASK(31, 0) +#define HCLGE_SSU_BIT32_ECC_ERR_INT_EN 0x0101 +#define HCLGE_SSU_BIT32_ECC_ERR_INT_EN_MASK 0x0101 +#define HCLGE_SSU_COMMON_INT_EN GENMASK(9, 0) +#define HCLGE_SSU_COMMON_INT_EN_MASK GENMASK(9, 0) +#define HCLGE_SSU_PORT_BASED_ERR_INT_EN 0x0BFF +#define HCLGE_SSU_PORT_BASED_ERR_INT_EN_MASK 0x0BFF0000 +#define HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN GENMASK(23, 0) +#define HCLGE_SSU_FIFO_OVERFLOW_ERR_INT_EN_MASK GENMASK(23, 0) + +#define HCLGE_SSU_COMMON_ERR_INT_MASK GENMASK(9, 0) +#define HCLGE_SSU_PORT_INT_MSIX_MASK 0x7BFF +#define HCLGE_IGU_INT_MASK GENMASK(3, 0) +#define HCLGE_IGU_EGU_TNL_INT_MASK GENMASK(5, 0) +#define HCLGE_PPP_MPF_INT_ST3_MASK GENMASK(5, 0) +#define HCLGE_PPU_MPF_INT_ST3_MASK GENMASK(7, 0) +#define HCLGE_PPU_MPF_INT_ST2_MSIX_MASK GENMASK(29, 28) +#define HCLGE_PPU_PF_INT_MSIX_MASK 0x27 +#define HCLGE_QCN_FIFO_INT_MASK GENMASK(17, 0) +#define HCLGE_QCN_ECC_INT_MASK GENMASK(21, 0) +#define HCLGE_NCSI_ECC_INT_MASK GENMASK(1, 0) -#define HCLGE_IMP_TCM_ECC_INT_MASK 0xFFFF -#define HCLGE_IMP_ITCM4_ECC_INT_MASK 0x3 -#define HCLGE_CMDQ_ECC_INT_MASK 0xFFFF -#define HCLGE_CMDQ_ROC_ECC_INT_SHIFT 16 -#define HCLGE_TQP_ECC_INT_MASK 0xFFF -#define HCLGE_TQP_ECC_INT_SHIFT 16 -#define HCLGE_IMP_TCM_ECC_CLR_MASK 0xFFFF -#define HCLGE_IMP_ITCM4_ECC_CLR_MASK 0x3 -#define HCLGE_CMDQ_NIC_ECC_CLR_MASK 0xFFFF -#define HCLGE_CMDQ_ROCEE_ECC_CLR_MASK 0xFFFF0000 -#define HCLGE_TQP_IMP_ERR_CLR_MASK 0x0FFF0001 -#define HCLGE_IGU_COM_INT_MASK 0xF -#define HCLGE_IGU_EGU_TNL_INT_MASK 0x3F -#define HCLGE_PPP_PF_INT_MASK 0x100 +#define HCLGE_ROCEE_RAS_NFE_INT_EN 0xF +#define HCLGE_ROCEE_RAS_CE_INT_EN 0x1 +#define HCLGE_ROCEE_RAS_NFE_INT_EN_MASK 0xF +#define HCLGE_ROCEE_RAS_CE_INT_EN_MASK 0x1 +#define HCLGE_ROCEE_RERR_INT_MASK BIT(0) +#define HCLGE_ROCEE_BERR_INT_MASK BIT(1) +#define HCLGE_ROCEE_ECC_INT_MASK BIT(2) +#define HCLGE_ROCEE_OVF_INT_MASK BIT(3) +#define HCLGE_ROCEE_OVF_ERR_INT_MASK 0x10000 +#define HCLGE_ROCEE_OVF_ERR_TYPE_MASK 0x3F enum hclge_err_int_type { HCLGE_ERR_INT_MSIX = 0, @@ -67,9 +105,7 @@ enum hclge_err_int_type { struct hclge_hw_blk { u32 msk; const char *name; - int (*enable_error)(struct hclge_dev *hdev, bool en); - void (*process_error)(struct hclge_dev *hdev, - enum hclge_err_int_type type); + int (*config_err_int)(struct hclge_dev *hdev, bool en); }; struct hclge_hw_error { @@ -78,6 +114,7 @@ struct hclge_hw_error { }; int hclge_hw_error_set_state(struct hclge_dev *hdev, bool state); -int hclge_enable_tm_hw_error(struct hclge_dev *hdev, bool en); -pci_ers_result_t hclge_process_ras_hw_error(struct hnae3_ae_dev *ae_dev); +pci_ers_result_t hclge_handle_hw_ras_error(struct hnae3_ae_dev *ae_dev); +int hclge_handle_hw_msix_error(struct hclge_dev *hdev, + unsigned long *reset_requests); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index ffdd96020860..f7637c08bb3a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -26,7 +26,9 @@ #define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset)))) #define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f)) -static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu); +#define HCLGE_BUF_SIZE_UNIT 256 + +static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps); static int hclge_init_vlan_config(struct hclge_dev *hdev); static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev); static int hclge_set_umv_space(struct hclge_dev *hdev, u16 space_size, @@ -48,6 +50,62 @@ static const struct pci_device_id ae_algo_pci_tbl[] = { MODULE_DEVICE_TABLE(pci, ae_algo_pci_tbl); +static const u32 cmdq_reg_addr_list[] = {HCLGE_CMDQ_TX_ADDR_L_REG, + HCLGE_CMDQ_TX_ADDR_H_REG, + HCLGE_CMDQ_TX_DEPTH_REG, + HCLGE_CMDQ_TX_TAIL_REG, + HCLGE_CMDQ_TX_HEAD_REG, + HCLGE_CMDQ_RX_ADDR_L_REG, + HCLGE_CMDQ_RX_ADDR_H_REG, + HCLGE_CMDQ_RX_DEPTH_REG, + HCLGE_CMDQ_RX_TAIL_REG, + HCLGE_CMDQ_RX_HEAD_REG, + HCLGE_VECTOR0_CMDQ_SRC_REG, + HCLGE_CMDQ_INTR_STS_REG, + HCLGE_CMDQ_INTR_EN_REG, + HCLGE_CMDQ_INTR_GEN_REG}; + +static const u32 common_reg_addr_list[] = {HCLGE_MISC_VECTOR_REG_BASE, + HCLGE_VECTOR0_OTER_EN_REG, + HCLGE_MISC_RESET_STS_REG, + HCLGE_MISC_VECTOR_INT_STS, + HCLGE_GLOBAL_RESET_REG, + HCLGE_FUN_RST_ING, + HCLGE_GRO_EN_REG}; + +static const u32 ring_reg_addr_list[] = {HCLGE_RING_RX_ADDR_L_REG, + HCLGE_RING_RX_ADDR_H_REG, + HCLGE_RING_RX_BD_NUM_REG, + HCLGE_RING_RX_BD_LENGTH_REG, + HCLGE_RING_RX_MERGE_EN_REG, + HCLGE_RING_RX_TAIL_REG, + HCLGE_RING_RX_HEAD_REG, + HCLGE_RING_RX_FBD_NUM_REG, + HCLGE_RING_RX_OFFSET_REG, + HCLGE_RING_RX_FBD_OFFSET_REG, + HCLGE_RING_RX_STASH_REG, + HCLGE_RING_RX_BD_ERR_REG, + HCLGE_RING_TX_ADDR_L_REG, + HCLGE_RING_TX_ADDR_H_REG, + HCLGE_RING_TX_BD_NUM_REG, + HCLGE_RING_TX_PRIORITY_REG, + HCLGE_RING_TX_TC_REG, + HCLGE_RING_TX_MERGE_EN_REG, + HCLGE_RING_TX_TAIL_REG, + HCLGE_RING_TX_HEAD_REG, + HCLGE_RING_TX_FBD_NUM_REG, + HCLGE_RING_TX_OFFSET_REG, + HCLGE_RING_TX_EBD_NUM_REG, + HCLGE_RING_TX_EBD_OFFSET_REG, + HCLGE_RING_TX_BD_ERR_REG, + HCLGE_RING_EN_REG}; + +static const u32 tqp_intr_reg_addr_list[] = {HCLGE_TQP_INTR_CTRL_REG, + HCLGE_TQP_INTR_GL0_REG, + HCLGE_TQP_INTR_GL1_REG, + HCLGE_TQP_INTR_GL2_REG, + HCLGE_TQP_INTR_RL_REG}; + static const char hns3_nic_test_strs[][ETH_GSTRING_LEN] = { "App Loopback test", "Serdes serial Loopback test", @@ -631,6 +689,22 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev) hdev->num_tqps = __le16_to_cpu(req->tqp_num); hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S; + if (req->tx_buf_size) + hdev->tx_buf_size = + __le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S; + else + hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF; + + hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT); + + if (req->dv_buf_size) + hdev->dv_buf_size = + __le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S; + else + hdev->dv_buf_size = HCLGE_DEFAULT_DV; + + hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT); + if (hnae3_dev_roce_supported(hdev)) { hdev->roce_base_msix_offset = hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee), @@ -886,7 +960,7 @@ static int hclge_configure(struct hclge_dev *hdev) hdev->pfc_max = hdev->tc_max; } - hdev->tm_info.num_tc = hdev->tc_max; + hdev->tm_info.num_tc = 1; /* Currently not support uncontiuous tc */ for (i = 0; i < hdev->tm_info.num_tc; i++) @@ -921,6 +995,28 @@ static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min, return hclge_cmd_send(&hdev->hw, &desc, 1); } +static int hclge_config_gro(struct hclge_dev *hdev, bool en) +{ + struct hclge_cfg_gro_status_cmd *req; + struct hclge_desc desc; + int ret; + + if (!hnae3_dev_gro_supported(hdev)) + return 0; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false); + req = (struct hclge_cfg_gro_status_cmd *)desc.data; + + req->gro_en = cpu_to_le16(en ? 1 : 0); + + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "GRO hardware config cmd failed, ret = %d\n", ret); + + return ret; +} + static int hclge_alloc_tqps(struct hclge_dev *hdev) { struct hclge_tqp *tqp; @@ -1144,6 +1240,7 @@ static int hclge_alloc_vport(struct hclge_dev *hdev) for (i = 0; i < num_vport; i++) { vport->back = hdev; vport->vport_id = i; + vport->mps = HCLGE_MAC_DEFAULT_FRAME; if (i == 0) ret = hclge_vport_setup(vport, tqp_main_vport); @@ -1289,40 +1386,51 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev, { u32 shared_buf_min, shared_buf_tc, shared_std; int tc_num, pfc_enable_num; - u32 shared_buf; + u32 shared_buf, aligned_mps; u32 rx_priv; int i; tc_num = hclge_get_tc_num(hdev); pfc_enable_num = hclge_get_pfc_enalbe_num(hdev); + aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT); if (hnae3_dev_dcb_supported(hdev)) - shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV; + shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size; else - shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV; + shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF + + hdev->dv_buf_size; - shared_buf_tc = pfc_enable_num * hdev->mps + - (tc_num - pfc_enable_num) * hdev->mps / 2 + - hdev->mps; - shared_std = max_t(u32, shared_buf_min, shared_buf_tc); + shared_buf_tc = pfc_enable_num * aligned_mps + + (tc_num - pfc_enable_num) * aligned_mps / 2 + + aligned_mps; + shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc), + HCLGE_BUF_SIZE_UNIT); rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc); - if (rx_all <= rx_priv + shared_std) + if (rx_all < rx_priv + shared_std) return false; - shared_buf = rx_all - rx_priv; + shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT); buf_alloc->s_buf.buf_size = shared_buf; - buf_alloc->s_buf.self.high = shared_buf; - buf_alloc->s_buf.self.low = 2 * hdev->mps; + if (hnae3_dev_dcb_supported(hdev)) { + buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size; + buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high + - roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT); + } else { + buf_alloc->s_buf.self.high = aligned_mps + + HCLGE_NON_DCB_ADDITIONAL_BUF; + buf_alloc->s_buf.self.low = + roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT); + } for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { if ((hdev->hw_tc_map & BIT(i)) && (hdev->tm_info.hw_pfc_map & BIT(i))) { - buf_alloc->s_buf.tc_thrd[i].low = hdev->mps; - buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps; + buf_alloc->s_buf.tc_thrd[i].low = aligned_mps; + buf_alloc->s_buf.tc_thrd[i].high = 2 * aligned_mps; } else { buf_alloc->s_buf.tc_thrd[i].low = 0; - buf_alloc->s_buf.tc_thrd[i].high = hdev->mps; + buf_alloc->s_buf.tc_thrd[i].high = aligned_mps; } } @@ -1340,11 +1448,11 @@ static int hclge_tx_buffer_calc(struct hclge_dev *hdev, for (i = 0; i < HCLGE_MAX_TC_NUM; i++) { struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i]; - if (total_size < HCLGE_DEFAULT_TX_BUF) + if (total_size < hdev->tx_buf_size) return -ENOMEM; if (hdev->hw_tc_map & BIT(i)) - priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF; + priv->tx_buf_size = hdev->tx_buf_size; else priv->tx_buf_size = 0; @@ -1362,7 +1470,6 @@ static int hclge_tx_buffer_calc(struct hclge_dev *hdev, static int hclge_rx_buffer_calc(struct hclge_dev *hdev, struct hclge_pkt_buf_alloc *buf_alloc) { -#define HCLGE_BUF_SIZE_UNIT 128 u32 rx_all = hdev->pkt_buf_size, aligned_mps; int no_pfc_priv_num, pfc_priv_num; struct hclge_priv_buf *priv; @@ -1388,13 +1495,16 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev, priv->enable = 1; if (hdev->tm_info.hw_pfc_map & BIT(i)) { priv->wl.low = aligned_mps; - priv->wl.high = priv->wl.low + aligned_mps; + priv->wl.high = + roundup(priv->wl.low + aligned_mps, + HCLGE_BUF_SIZE_UNIT); priv->buf_size = priv->wl.high + - HCLGE_DEFAULT_DV; + hdev->dv_buf_size; } else { priv->wl.low = 0; priv->wl.high = 2 * aligned_mps; - priv->buf_size = priv->wl.high; + priv->buf_size = priv->wl.high + + hdev->dv_buf_size; } } else { priv->enable = 0; @@ -1424,13 +1534,13 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev, priv->enable = 1; if (hdev->tm_info.hw_pfc_map & BIT(i)) { - priv->wl.low = 128; + priv->wl.low = 256; priv->wl.high = priv->wl.low + aligned_mps; - priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV; + priv->buf_size = priv->wl.high + hdev->dv_buf_size; } else { priv->wl.low = 0; priv->wl.high = aligned_mps; - priv->buf_size = priv->wl.high; + priv->buf_size = priv->wl.high + hdev->dv_buf_size; } } @@ -1873,37 +1983,6 @@ static int hclge_cfg_mac_speed_dup_h(struct hnae3_handle *handle, int speed, return hclge_cfg_mac_speed_dup(hdev, speed, duplex); } -static int hclge_query_mac_an_speed_dup(struct hclge_dev *hdev, int *speed, - u8 *duplex) -{ - struct hclge_query_an_speed_dup_cmd *req; - struct hclge_desc desc; - int speed_tmp; - int ret; - - req = (struct hclge_query_an_speed_dup_cmd *)desc.data; - - hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_AN_RESULT, true); - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) { - dev_err(&hdev->pdev->dev, - "mac speed/autoneg/duplex query cmd failed %d\n", - ret); - return ret; - } - - *duplex = hnae3_get_bit(req->an_syn_dup_speed, HCLGE_QUERY_DUPLEX_B); - speed_tmp = hnae3_get_field(req->an_syn_dup_speed, HCLGE_QUERY_SPEED_M, - HCLGE_QUERY_SPEED_S); - - ret = hclge_parse_speed(speed_tmp, speed); - if (ret) - dev_err(&hdev->pdev->dev, - "could not parse speed(=%d), %d\n", speed_tmp, ret); - - return ret; -} - static int hclge_set_autoneg_en(struct hclge_dev *hdev, bool enable) { struct hclge_config_auto_neg_cmd *req; @@ -1947,12 +2026,10 @@ static int hclge_get_autoneg(struct hnae3_handle *handle) static int hclge_mac_init(struct hclge_dev *hdev) { - struct hnae3_handle *handle = &hdev->vport[0].nic; - struct net_device *netdev = handle->kinfo.netdev; struct hclge_mac *mac = &hdev->hw.mac; - int mtu; int ret; + hdev->support_sfp_query = true; hdev->hw.mac.duplex = HCLGE_MAC_FULL; ret = hclge_cfg_mac_speed_dup_hw(hdev, hdev->hw.mac.speed, hdev->hw.mac.duplex); @@ -1964,15 +2041,16 @@ static int hclge_mac_init(struct hclge_dev *hdev) mac->link = 0; - if (netdev) - mtu = netdev->mtu; - else - mtu = ETH_DATA_LEN; + ret = hclge_set_mac_mtu(hdev, hdev->mps); + if (ret) { + dev_err(&hdev->pdev->dev, "set mtu failed ret=%d\n", ret); + return ret; + } - ret = hclge_set_mtu(handle, mtu); + ret = hclge_buffer_alloc(hdev); if (ret) dev_err(&hdev->pdev->dev, - "set mtu failed ret=%d\n", ret); + "allocate buffer fail, ret=%d\n", ret); return ret; } @@ -2061,34 +2139,58 @@ static void hclge_update_link_status(struct hclge_dev *hdev) } } +static int hclge_get_sfp_speed(struct hclge_dev *hdev, u32 *speed) +{ + struct hclge_sfp_speed_cmd *resp = NULL; + struct hclge_desc desc; + int ret; + + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_SFP_GET_SPEED, true); + resp = (struct hclge_sfp_speed_cmd *)desc.data; + ret = hclge_cmd_send(&hdev->hw, &desc, 1); + if (ret == -EOPNOTSUPP) { + dev_warn(&hdev->pdev->dev, + "IMP do not support get SFP speed %d\n", ret); + return ret; + } else if (ret) { + dev_err(&hdev->pdev->dev, "get sfp speed failed %d\n", ret); + return ret; + } + + *speed = resp->sfp_speed; + + return 0; +} + static int hclge_update_speed_duplex(struct hclge_dev *hdev) { struct hclge_mac mac = hdev->hw.mac; - u8 duplex; int speed; int ret; - /* get the speed and duplex as autoneg'result from mac cmd when phy + /* get the speed from SFP cmd when phy * doesn't exit. */ - if (mac.phydev || !mac.autoneg) + if (mac.phydev) return 0; - ret = hclge_query_mac_an_speed_dup(hdev, &speed, &duplex); - if (ret) { - dev_err(&hdev->pdev->dev, - "mac autoneg/speed/duplex query failed %d\n", ret); - return ret; - } + /* if IMP does not support get SFP/qSFP speed, return directly */ + if (!hdev->support_sfp_query) + return 0; - ret = hclge_cfg_mac_speed_dup(hdev, speed, duplex); - if (ret) { - dev_err(&hdev->pdev->dev, - "mac speed/duplex config failed %d\n", ret); + ret = hclge_get_sfp_speed(hdev, &speed); + if (ret == -EOPNOTSUPP) { + hdev->support_sfp_query = false; + return ret; + } else if (ret) { return ret; } - return 0; + if (speed == HCLGE_MAC_SPEED_UNKNOWN) + return 0; /* do nothing if no SFP */ + + /* must config full duplex for SFP */ + return hclge_cfg_mac_speed_dup(hdev, speed, HCLGE_MAC_FULL); } static int hclge_update_speed_duplex_h(struct hnae3_handle *handle) @@ -2129,12 +2231,13 @@ static void hclge_service_complete(struct hclge_dev *hdev) static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) { - u32 rst_src_reg; - u32 cmdq_src_reg; + u32 rst_src_reg, cmdq_src_reg, msix_src_reg; /* fetch the events from their corresponding regs */ rst_src_reg = hclge_read_dev(&hdev->hw, HCLGE_MISC_VECTOR_INT_STS); cmdq_src_reg = hclge_read_dev(&hdev->hw, HCLGE_VECTOR0_CMDQ_SRC_REG); + msix_src_reg = hclge_read_dev(&hdev->hw, + HCLGE_VECTOR0_PF_OTHER_INT_STS_REG); /* Assumption: If by any chance reset and mailbox events are reported * together then we will only process reset event in this go and will @@ -2144,7 +2247,16 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) */ /* check for vector0 reset event sources */ + if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) { + dev_info(&hdev->pdev->dev, "IMP reset interrupt\n"); + set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); + return HCLGE_VECTOR0_EVENT_RST; + } + if (BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B) & rst_src_reg) { + dev_info(&hdev->pdev->dev, "global reset interrupt\n"); set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); set_bit(HNAE3_GLOBAL_RESET, &hdev->reset_pending); *clearval = BIT(HCLGE_VECTOR0_GLOBALRESET_INT_B); @@ -2152,17 +2264,16 @@ static u32 hclge_check_event_cause(struct hclge_dev *hdev, u32 *clearval) } if (BIT(HCLGE_VECTOR0_CORERESET_INT_B) & rst_src_reg) { + dev_info(&hdev->pdev->dev, "core reset interrupt\n"); set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); set_bit(HNAE3_CORE_RESET, &hdev->reset_pending); *clearval = BIT(HCLGE_VECTOR0_CORERESET_INT_B); return HCLGE_VECTOR0_EVENT_RST; } - if (BIT(HCLGE_VECTOR0_IMPRESET_INT_B) & rst_src_reg) { - set_bit(HNAE3_IMP_RESET, &hdev->reset_pending); - *clearval = BIT(HCLGE_VECTOR0_IMPRESET_INT_B); - return HCLGE_VECTOR0_EVENT_RST; - } + /* check for vector0 msix event source */ + if (msix_src_reg & HCLGE_VECTOR0_REG_MSIX_MASK) + return HCLGE_VECTOR0_EVENT_ERR; /* check for vector0 mailbox(=CMDQ RX) event source */ if (BIT(HCLGE_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { @@ -2214,6 +2325,19 @@ static irqreturn_t hclge_misc_irq_handle(int irq, void *data) /* vector 0 interrupt is shared with reset and mailbox source events.*/ switch (event_cause) { + case HCLGE_VECTOR0_EVENT_ERR: + /* we do not know what type of reset is required now. This could + * only be decided after we fetch the type of errors which + * caused this event. Therefore, we will do below for now: + * 1. Assert HNAE3_UNKNOWN_RESET type of reset. This means we + * have defered type of reset to be used. + * 2. Schedule the reset serivce task. + * 3. When service task receives HNAE3_UNKNOWN_RESET type it + * will fetch the correct type of reset. This would be done + * by first decoding the types of errors. + */ + set_bit(HNAE3_UNKNOWN_RESET, &hdev->reset_request); + /* fall through */ case HCLGE_VECTOR0_EVENT_RST: hclge_reset_task_schedule(hdev); break; @@ -2308,21 +2432,56 @@ static int hclge_notify_client(struct hclge_dev *hdev, int ret; ret = client->ops->reset_notify(handle, type); - if (ret) + if (ret) { + dev_err(&hdev->pdev->dev, + "notify nic client failed %d(%d)\n", type, ret); return ret; + } } return 0; } +static int hclge_notify_roce_client(struct hclge_dev *hdev, + enum hnae3_reset_notify_type type) +{ + struct hnae3_client *client = hdev->roce_client; + int ret = 0; + u16 i; + + if (!client) + return 0; + + if (!client->ops->reset_notify) + return -EOPNOTSUPP; + + for (i = 0; i < hdev->num_vmdq_vport + 1; i++) { + struct hnae3_handle *handle = &hdev->vport[i].roce; + + ret = client->ops->reset_notify(handle, type); + if (ret) { + dev_err(&hdev->pdev->dev, + "notify roce client failed %d(%d)", + type, ret); + return ret; + } + } + + return ret; +} + static int hclge_reset_wait(struct hclge_dev *hdev) { #define HCLGE_RESET_WATI_MS 100 -#define HCLGE_RESET_WAIT_CNT 5 +#define HCLGE_RESET_WAIT_CNT 200 u32 val, reg, reg_bit; u32 cnt = 0; switch (hdev->reset_type) { + case HNAE3_IMP_RESET: + reg = HCLGE_GLOBAL_RESET_REG; + reg_bit = HCLGE_IMP_RESET_BIT; + break; case HNAE3_GLOBAL_RESET: reg = HCLGE_GLOBAL_RESET_REG; reg_bit = HCLGE_GLOBAL_RESET_BIT; @@ -2335,6 +2494,8 @@ static int hclge_reset_wait(struct hclge_dev *hdev) reg = HCLGE_FUN_RST_ING; reg_bit = HCLGE_FUN_RST_ING_B; break; + case HNAE3_FLR_RESET: + break; default: dev_err(&hdev->pdev->dev, "Wait for unsupported reset type: %d\n", @@ -2342,6 +2503,20 @@ static int hclge_reset_wait(struct hclge_dev *hdev) return -EINVAL; } + if (hdev->reset_type == HNAE3_FLR_RESET) { + while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) && + cnt++ < HCLGE_RESET_WAIT_CNT) + msleep(HCLGE_RESET_WATI_MS); + + if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) { + dev_err(&hdev->pdev->dev, + "flr wait timeout: %d\n", cnt); + return -EBUSY; + } + + return 0; + } + val = hclge_read_dev(&hdev->hw, reg); while (hnae3_get_bit(val, reg_bit) && cnt < HCLGE_RESET_WAIT_CNT) { msleep(HCLGE_RESET_WATI_MS); @@ -2358,6 +2533,55 @@ static int hclge_reset_wait(struct hclge_dev *hdev) return 0; } +static int hclge_set_vf_rst(struct hclge_dev *hdev, int func_id, bool reset) +{ + struct hclge_vf_rst_cmd *req; + struct hclge_desc desc; + + req = (struct hclge_vf_rst_cmd *)desc.data; + hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GBL_RST_STATUS, false); + req->dest_vfid = func_id; + + if (reset) + req->vf_rst = 0x1; + + return hclge_cmd_send(&hdev->hw, &desc, 1); +} + +int hclge_set_all_vf_rst(struct hclge_dev *hdev, bool reset) +{ + int i; + + for (i = hdev->num_vmdq_vport + 1; i < hdev->num_alloc_vport; i++) { + struct hclge_vport *vport = &hdev->vport[i]; + int ret; + + /* Send cmd to set/clear VF's FUNC_RST_ING */ + ret = hclge_set_vf_rst(hdev, vport->vport_id, reset); + if (ret) { + dev_err(&hdev->pdev->dev, + "set vf(%d) rst failed %d!\n", + vport->vport_id, ret); + return ret; + } + + if (!reset) + continue; + + /* Inform VF to process the reset. + * hclge_inform_reset_assert_to_vf may fail if VF + * driver is not loaded. + */ + ret = hclge_inform_reset_assert_to_vf(vport); + if (ret) + dev_warn(&hdev->pdev->dev, + "inform reset to vf(%d) failed %d!\n", + vport->vport_id, ret); + } + + return 0; +} + int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id) { struct hclge_desc desc; @@ -2396,11 +2620,16 @@ static void hclge_do_reset(struct hclge_dev *hdev) break; case HNAE3_FUNC_RESET: dev_info(&pdev->dev, "PF Reset requested\n"); - hclge_func_reset_cmd(hdev, 0); /* schedule again to check later */ set_bit(HNAE3_FUNC_RESET, &hdev->reset_pending); hclge_reset_task_schedule(hdev); break; + case HNAE3_FLR_RESET: + dev_info(&pdev->dev, "FLR requested\n"); + /* schedule again to check later */ + set_bit(HNAE3_FLR_RESET, &hdev->reset_pending); + hclge_reset_task_schedule(hdev); + break; default: dev_warn(&pdev->dev, "Unsupported reset type: %d\n", hdev->reset_type); @@ -2413,21 +2642,46 @@ static enum hnae3_reset_type hclge_get_reset_level(struct hclge_dev *hdev, { enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; + /* first, resolve any unknown reset type to the known type(s) */ + if (test_bit(HNAE3_UNKNOWN_RESET, addr)) { + /* we will intentionally ignore any errors from this function + * as we will end up in *some* reset request in any case + */ + hclge_handle_hw_msix_error(hdev, addr); + clear_bit(HNAE3_UNKNOWN_RESET, addr); + /* We defered the clearing of the error event which caused + * interrupt since it was not posssible to do that in + * interrupt context (and this is the reason we introduced + * new UNKNOWN reset type). Now, the errors have been + * handled and cleared in hardware we can safely enable + * interrupts. This is an exception to the norm. + */ + hclge_enable_vector(&hdev->misc_vector, true); + } + /* return the highest priority reset level amongst all */ - if (test_bit(HNAE3_GLOBAL_RESET, addr)) + if (test_bit(HNAE3_IMP_RESET, addr)) { + rst_level = HNAE3_IMP_RESET; + clear_bit(HNAE3_IMP_RESET, addr); + clear_bit(HNAE3_GLOBAL_RESET, addr); + clear_bit(HNAE3_CORE_RESET, addr); + clear_bit(HNAE3_FUNC_RESET, addr); + } else if (test_bit(HNAE3_GLOBAL_RESET, addr)) { rst_level = HNAE3_GLOBAL_RESET; - else if (test_bit(HNAE3_CORE_RESET, addr)) + clear_bit(HNAE3_GLOBAL_RESET, addr); + clear_bit(HNAE3_CORE_RESET, addr); + clear_bit(HNAE3_FUNC_RESET, addr); + } else if (test_bit(HNAE3_CORE_RESET, addr)) { rst_level = HNAE3_CORE_RESET; - else if (test_bit(HNAE3_IMP_RESET, addr)) - rst_level = HNAE3_IMP_RESET; - else if (test_bit(HNAE3_FUNC_RESET, addr)) + clear_bit(HNAE3_CORE_RESET, addr); + clear_bit(HNAE3_FUNC_RESET, addr); + } else if (test_bit(HNAE3_FUNC_RESET, addr)) { rst_level = HNAE3_FUNC_RESET; - - /* now, clear all other resets */ - clear_bit(HNAE3_GLOBAL_RESET, addr); - clear_bit(HNAE3_CORE_RESET, addr); - clear_bit(HNAE3_IMP_RESET, addr); - clear_bit(HNAE3_FUNC_RESET, addr); + clear_bit(HNAE3_FUNC_RESET, addr); + } else if (test_bit(HNAE3_FLR_RESET, addr)) { + rst_level = HNAE3_FLR_RESET; + clear_bit(HNAE3_FLR_RESET, addr); + } return rst_level; } @@ -2457,39 +2711,209 @@ static void hclge_clear_reset_cause(struct hclge_dev *hdev) hclge_enable_vector(&hdev->misc_vector, true); } +static int hclge_reset_prepare_down(struct hclge_dev *hdev) +{ + int ret = 0; + + switch (hdev->reset_type) { + case HNAE3_FUNC_RESET: + /* fall through */ + case HNAE3_FLR_RESET: + ret = hclge_set_all_vf_rst(hdev, true); + break; + default: + break; + } + + return ret; +} + +static int hclge_reset_prepare_wait(struct hclge_dev *hdev) +{ + u32 reg_val; + int ret = 0; + + switch (hdev->reset_type) { + case HNAE3_FUNC_RESET: + /* There is no mechanism for PF to know if VF has stopped IO + * for now, just wait 100 ms for VF to stop IO + */ + msleep(100); + ret = hclge_func_reset_cmd(hdev, 0); + if (ret) { + dev_err(&hdev->pdev->dev, + "asserting function reset fail %d!\n", ret); + return ret; + } + + /* After performaning pf reset, it is not necessary to do the + * mailbox handling or send any command to firmware, because + * any mailbox handling or command to firmware is only valid + * after hclge_cmd_init is called. + */ + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + break; + case HNAE3_FLR_RESET: + /* There is no mechanism for PF to know if VF has stopped IO + * for now, just wait 100 ms for VF to stop IO + */ + msleep(100); + set_bit(HCLGE_STATE_CMD_DISABLE, &hdev->state); + set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); + break; + case HNAE3_IMP_RESET: + reg_val = hclge_read_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG); + hclge_write_dev(&hdev->hw, HCLGE_PF_OTHER_INT_REG, + BIT(HCLGE_VECTOR0_IMP_RESET_INT_B) | reg_val); + break; + default: + break; + } + + dev_info(&hdev->pdev->dev, "prepare wait ok\n"); + + return ret; +} + +static bool hclge_reset_err_handle(struct hclge_dev *hdev, bool is_timeout) +{ +#define MAX_RESET_FAIL_CNT 5 +#define RESET_UPGRADE_DELAY_SEC 10 + + if (hdev->reset_pending) { + dev_info(&hdev->pdev->dev, "Reset pending %lu\n", + hdev->reset_pending); + return true; + } else if ((hdev->reset_type != HNAE3_IMP_RESET) && + (hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) & + BIT(HCLGE_IMP_RESET_BIT))) { + dev_info(&hdev->pdev->dev, + "reset failed because IMP Reset is pending\n"); + hclge_clear_reset_cause(hdev); + return false; + } else if (hdev->reset_fail_cnt < MAX_RESET_FAIL_CNT) { + hdev->reset_fail_cnt++; + if (is_timeout) { + set_bit(hdev->reset_type, &hdev->reset_pending); + dev_info(&hdev->pdev->dev, + "re-schedule to wait for hw reset done\n"); + return true; + } + + dev_info(&hdev->pdev->dev, "Upgrade reset level\n"); + hclge_clear_reset_cause(hdev); + mod_timer(&hdev->reset_timer, + jiffies + RESET_UPGRADE_DELAY_SEC * HZ); + + return false; + } + + hclge_clear_reset_cause(hdev); + dev_err(&hdev->pdev->dev, "Reset fail!\n"); + return false; +} + +static int hclge_reset_prepare_up(struct hclge_dev *hdev) +{ + int ret = 0; + + switch (hdev->reset_type) { + case HNAE3_FUNC_RESET: + /* fall through */ + case HNAE3_FLR_RESET: + ret = hclge_set_all_vf_rst(hdev, false); + break; + default: + break; + } + + return ret; +} + static void hclge_reset(struct hclge_dev *hdev) { struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); - struct hnae3_handle *handle; + bool is_timeout = false; + int ret; /* Initialize ae_dev reset status as well, in case enet layer wants to * know if device is undergoing reset */ ae_dev->reset_type = hdev->reset_type; + hdev->reset_count++; /* perform reset of the stack & ae device for a client */ - handle = &hdev->vport[0].nic; + ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT); + if (ret) + goto err_reset; + + ret = hclge_reset_prepare_down(hdev); + if (ret) + goto err_reset; + rtnl_lock(); - hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); + ret = hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); + if (ret) + goto err_reset_lock; + rtnl_unlock(); - if (!hclge_reset_wait(hdev)) { - rtnl_lock(); - hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); - hclge_reset_ae_dev(hdev->ae_dev); - hclge_notify_client(hdev, HNAE3_INIT_CLIENT); + ret = hclge_reset_prepare_wait(hdev); + if (ret) + goto err_reset; - hclge_clear_reset_cause(hdev); - } else { - rtnl_lock(); - /* schedule again to check pending resets later */ - set_bit(hdev->reset_type, &hdev->reset_pending); - hclge_reset_task_schedule(hdev); + if (hclge_reset_wait(hdev)) { + is_timeout = true; + goto err_reset; } - hclge_notify_client(hdev, HNAE3_UP_CLIENT); - handle->last_reset_time = jiffies; + ret = hclge_notify_roce_client(hdev, HNAE3_UNINIT_CLIENT); + if (ret) + goto err_reset; + + rtnl_lock(); + ret = hclge_notify_client(hdev, HNAE3_UNINIT_CLIENT); + if (ret) + goto err_reset_lock; + + ret = hclge_reset_ae_dev(hdev->ae_dev); + if (ret) + goto err_reset_lock; + + ret = hclge_notify_client(hdev, HNAE3_INIT_CLIENT); + if (ret) + goto err_reset_lock; + + hclge_clear_reset_cause(hdev); + + ret = hclge_reset_prepare_up(hdev); + if (ret) + goto err_reset_lock; + + ret = hclge_notify_client(hdev, HNAE3_UP_CLIENT); + if (ret) + goto err_reset_lock; + rtnl_unlock(); + + ret = hclge_notify_roce_client(hdev, HNAE3_INIT_CLIENT); + if (ret) + goto err_reset; + + ret = hclge_notify_roce_client(hdev, HNAE3_UP_CLIENT); + if (ret) + goto err_reset; + + hdev->last_reset_time = jiffies; + hdev->reset_fail_cnt = 0; ae_dev->reset_type = HNAE3_NONE_RESET; + + return; + +err_reset_lock: + rtnl_unlock(); +err_reset: + if (hclge_reset_err_handle(hdev, is_timeout)) + hclge_reset_task_schedule(hdev); } static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle) @@ -2515,20 +2939,42 @@ static void hclge_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle) if (!handle) handle = &hdev->vport[0].nic; - if (time_before(jiffies, (handle->last_reset_time + 3 * HZ))) + if (time_before(jiffies, (hdev->last_reset_time + 3 * HZ))) return; - else if (time_after(jiffies, (handle->last_reset_time + 4 * 5 * HZ))) - handle->reset_level = HNAE3_FUNC_RESET; + else if (hdev->default_reset_request) + hdev->reset_level = + hclge_get_reset_level(hdev, + &hdev->default_reset_request); + else if (time_after(jiffies, (hdev->last_reset_time + 4 * 5 * HZ))) + hdev->reset_level = HNAE3_FUNC_RESET; dev_info(&hdev->pdev->dev, "received reset event , reset type is %d", - handle->reset_level); + hdev->reset_level); /* request reset & schedule reset task */ - set_bit(handle->reset_level, &hdev->reset_request); + set_bit(hdev->reset_level, &hdev->reset_request); hclge_reset_task_schedule(hdev); - if (handle->reset_level < HNAE3_GLOBAL_RESET) - handle->reset_level++; + if (hdev->reset_level < HNAE3_GLOBAL_RESET) + hdev->reset_level++; +} + +static void hclge_set_def_reset_request(struct hnae3_ae_dev *ae_dev, + enum hnae3_reset_type rst_type) +{ + struct hclge_dev *hdev = ae_dev->priv; + + set_bit(rst_type, &hdev->default_reset_request); +} + +static void hclge_reset_timer(struct timer_list *t) +{ + struct hclge_dev *hdev = from_timer(hdev, t, reset_timer); + + dev_info(&hdev->pdev->dev, + "triggering global reset in reset timer\n"); + set_bit(HNAE3_GLOBAL_RESET, &hdev->default_reset_request); + hclge_reset_event(hdev->pdev, NULL); } static void hclge_reset_subtask(struct hclge_dev *hdev) @@ -2542,6 +2988,7 @@ static void hclge_reset_subtask(struct hclge_dev *hdev) * b. else, we can come back later to check this status so re-sched * now. */ + hdev->last_reset_time = jiffies; hdev->reset_type = hclge_get_reset_level(hdev, &hdev->reset_pending); if (hdev->reset_type != HNAE3_NONE_RESET) hclge_reset(hdev); @@ -2584,6 +3031,23 @@ static void hclge_mailbox_service_task(struct work_struct *work) clear_bit(HCLGE_STATE_MBX_HANDLING, &hdev->state); } +static void hclge_update_vport_alive(struct hclge_dev *hdev) +{ + int i; + + /* start from vport 1 for PF is always alive */ + for (i = 1; i < hdev->num_alloc_vport; i++) { + struct hclge_vport *vport = &hdev->vport[i]; + + if (time_after(jiffies, vport->last_active_jiffies + 8 * HZ)) + clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); + + /* If vf is not alive, set to default value */ + if (!test_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state)) + vport->mps = HCLGE_MAC_DEFAULT_FRAME; + } +} + static void hclge_service_task(struct work_struct *work) { struct hclge_dev *hdev = @@ -2596,6 +3060,7 @@ static void hclge_service_task(struct work_struct *work) hclge_update_speed_duplex(hdev); hclge_update_link_status(hdev); + hclge_update_vport_alive(hdev); hclge_service_complete(hdev); } @@ -4212,6 +4677,13 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle, u8 vf = ethtool_get_flow_spec_ring_vf(fs->ring_cookie); u16 tqps; + if (vf > hdev->num_req_vfs) { + dev_err(&hdev->pdev->dev, + "Error: vf id (%d) > max vf num (%d)\n", + vf, hdev->num_req_vfs); + return -EINVAL; + } + dst_vport_id = vf ? hdev->vport[vf].vport_id : vport->vport_id; tqps = vf ? hdev->vport[vf].alloc_tqps : vport->alloc_tqps; @@ -4222,13 +4694,6 @@ static int hclge_add_fd_entry(struct hnae3_handle *handle, return -EINVAL; } - if (vf > hdev->num_req_vfs) { - dev_err(&hdev->pdev->dev, - "Error: vf id (%d) > max vf num (%d)\n", - vf, hdev->num_req_vfs); - return -EINVAL; - } - action = HCLGE_FD_ACTION_ACCEPT_PACKET; q_index = ring; } @@ -4336,8 +4801,16 @@ static int hclge_restore_fd_entries(struct hnae3_handle *handle) struct hlist_node *node; int ret; + /* Return ok here, because reset error handling will check this + * return value. If error is returned here, the reset process will + * fail. + */ if (!hnae3_dev_fd_supported(hdev)) - return -EOPNOTSUPP; + return 0; + + /* if fd is disabled, should not restore it when reset */ + if (!hdev->fd_cfg.fd_en) + return 0; hlist_for_each_entry_safe(rule, node, &hdev->fd_rule_list, rule_node) { ret = hclge_config_action(hdev, HCLGE_FD_STAGE_1, rule); @@ -4592,6 +5065,31 @@ static int hclge_get_all_rules(struct hnae3_handle *handle, return 0; } +static bool hclge_get_hw_reset_stat(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hclge_read_dev(&hdev->hw, HCLGE_GLOBAL_RESET_REG) || + hclge_read_dev(&hdev->hw, HCLGE_FUN_RST_ING); +} + +static bool hclge_ae_dev_resetting(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state); +} + +static unsigned long hclge_ae_dev_reset_cnt(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hdev->reset_count; +} + static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) { struct hclge_vport *vport = hclge_get_vport(handle); @@ -4801,19 +5299,28 @@ static void hclge_reset_tqp_stats(struct hnae3_handle *handle) } } -static int hclge_ae_start(struct hnae3_handle *handle) +static void hclge_set_timer_task(struct hnae3_handle *handle, bool enable) { struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; - int i; - for (i = 0; i < vport->alloc_tqps; i++) - hclge_tqp_enable(hdev, i, 0, true); + if (enable) { + mod_timer(&hdev->service_timer, jiffies + HZ); + } else { + del_timer_sync(&hdev->service_timer); + cancel_work_sync(&hdev->service_task); + clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); + } +} + +static int hclge_ae_start(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; /* mac enable */ hclge_cfg_mac_mode(hdev, true); clear_bit(HCLGE_STATE_DOWN, &hdev->state); - mod_timer(&hdev->service_timer, jiffies + HZ); hdev->hw.mac.link = 0; /* reset tqp stats */ @@ -4832,17 +5339,17 @@ static void hclge_ae_stop(struct hnae3_handle *handle) set_bit(HCLGE_STATE_DOWN, &hdev->state); - del_timer_sync(&hdev->service_timer); - cancel_work_sync(&hdev->service_task); - clear_bit(HCLGE_STATE_SERVICE_SCHED, &hdev->state); - - if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state)) { + /* If it is not PF reset, the firmware will disable the MAC, + * so it only need to stop phy here. + */ + if (test_bit(HCLGE_STATE_RST_HANDLING, &hdev->state) && + hdev->reset_type != HNAE3_FUNC_RESET) { hclge_mac_stop_phy(hdev); return; } - for (i = 0; i < vport->alloc_tqps; i++) - hclge_tqp_enable(hdev, i, 0, false); + for (i = 0; i < handle->kinfo.num_tqps; i++) + hclge_reset_tqp(handle, i); /* Mac disable */ hclge_cfg_mac_mode(hdev, false); @@ -4851,11 +5358,35 @@ static void hclge_ae_stop(struct hnae3_handle *handle) /* reset tqp stats */ hclge_reset_tqp_stats(handle); - del_timer_sync(&hdev->service_timer); - cancel_work_sync(&hdev->service_task); hclge_update_link_status(hdev); } +int hclge_vport_start(struct hclge_vport *vport) +{ + set_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); + vport->last_active_jiffies = jiffies; + return 0; +} + +void hclge_vport_stop(struct hclge_vport *vport) +{ + clear_bit(HCLGE_VPORT_STATE_ALIVE, &vport->state); +} + +static int hclge_client_start(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_vport_start(vport); +} + +static void hclge_client_stop(struct hnae3_handle *handle) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + + hclge_vport_stop(vport); +} + static int hclge_get_mac_vlan_cmd_status(struct hclge_vport *vport, u16 cmdq_resp, u8 resp_code, enum hclge_mac_vlan_tbl_opcode op) @@ -6003,54 +6534,76 @@ int hclge_en_hw_strip_rxvtag(struct hnae3_handle *handle, bool enable) return hclge_set_vlan_rx_offload_cfg(vport); } -static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mtu) +static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps) { struct hclge_config_max_frm_size_cmd *req; struct hclge_desc desc; - int max_frm_size; - int ret; - - max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; - - if (max_frm_size < HCLGE_MAC_MIN_FRAME || - max_frm_size > HCLGE_MAC_MAX_FRAME) - return -EINVAL; - - max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME); hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_CONFIG_MAX_FRM_SIZE, false); req = (struct hclge_config_max_frm_size_cmd *)desc.data; - req->max_frm_size = cpu_to_le16(max_frm_size); + req->max_frm_size = cpu_to_le16(new_mps); req->min_frm_size = HCLGE_MAC_MIN_FRAME; - ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) - dev_err(&hdev->pdev->dev, "set mtu fail, ret =%d.\n", ret); - else - hdev->mps = max_frm_size; - - return ret; + return hclge_cmd_send(&hdev->hw, &desc, 1); } static int hclge_set_mtu(struct hnae3_handle *handle, int new_mtu) { struct hclge_vport *vport = hclge_get_vport(handle); + + return hclge_set_vport_mtu(vport, new_mtu); +} + +int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu) +{ struct hclge_dev *hdev = vport->back; - int ret; + int i, max_frm_size, ret = 0; + + max_frm_size = new_mtu + ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN; + if (max_frm_size < HCLGE_MAC_MIN_FRAME || + max_frm_size > HCLGE_MAC_MAX_FRAME) + return -EINVAL; + + max_frm_size = max(max_frm_size, HCLGE_MAC_DEFAULT_FRAME); + mutex_lock(&hdev->vport_lock); + /* VF's mps must fit within hdev->mps */ + if (vport->vport_id && max_frm_size > hdev->mps) { + mutex_unlock(&hdev->vport_lock); + return -EINVAL; + } else if (vport->vport_id) { + vport->mps = max_frm_size; + mutex_unlock(&hdev->vport_lock); + return 0; + } + + /* PF's mps must be greater then VF's mps */ + for (i = 1; i < hdev->num_alloc_vport; i++) + if (max_frm_size < hdev->vport[i].mps) { + mutex_unlock(&hdev->vport_lock); + return -EINVAL; + } + + hclge_notify_client(hdev, HNAE3_DOWN_CLIENT); - ret = hclge_set_mac_mtu(hdev, new_mtu); + ret = hclge_set_mac_mtu(hdev, max_frm_size); if (ret) { dev_err(&hdev->pdev->dev, "Change mtu fail, ret =%d\n", ret); - return ret; + goto out; } + hdev->mps = max_frm_size; + vport->mps = max_frm_size; + ret = hclge_buffer_alloc(hdev); if (ret) dev_err(&hdev->pdev->dev, "Allocate buffer fail, ret =%d\n", ret); +out: + hclge_notify_client(hdev, HNAE3_UP_CLIENT); + mutex_unlock(&hdev->vport_lock); return ret; } @@ -6098,8 +6651,7 @@ static int hclge_get_reset_status(struct hclge_dev *hdev, u16 queue_id) return hnae3_get_bit(req->ready_to_reset, HCLGE_TQP_RESET_B); } -static u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, - u16 queue_id) +u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id) { struct hnae3_queue *queue; struct hclge_tqp *tqp; @@ -6250,7 +6802,7 @@ int hclge_cfg_flowctrl(struct hclge_dev *hdev) if (!phydev->link || !phydev->autoneg) return 0; - local_advertising = ethtool_adv_to_lcl_adv_t(phydev->advertising); + local_advertising = linkmode_adv_to_lcl_adv_t(phydev->advertising); if (phydev->pause) remote_advertising = LPA_PAUSE_CAP; @@ -6612,6 +7164,8 @@ static void hclge_state_uninit(struct hclge_dev *hdev) if (hdev->service_timer.function) del_timer_sync(&hdev->service_timer); + if (hdev->reset_timer.function) + del_timer_sync(&hdev->reset_timer); if (hdev->service_task.func) cancel_work_sync(&hdev->service_task); if (hdev->rst_service_task.func) @@ -6620,6 +7174,34 @@ static void hclge_state_uninit(struct hclge_dev *hdev) cancel_work_sync(&hdev->mbx_service_task); } +static void hclge_flr_prepare(struct hnae3_ae_dev *ae_dev) +{ +#define HCLGE_FLR_WAIT_MS 100 +#define HCLGE_FLR_WAIT_CNT 50 + struct hclge_dev *hdev = ae_dev->priv; + int cnt = 0; + + clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state); + clear_bit(HNAE3_FLR_DONE, &hdev->flr_state); + set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request); + hclge_reset_event(hdev->pdev, NULL); + + while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) && + cnt++ < HCLGE_FLR_WAIT_CNT) + msleep(HCLGE_FLR_WAIT_MS); + + if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state)) + dev_err(&hdev->pdev->dev, + "flr wait down timeout: %d\n", cnt); +} + +static void hclge_flr_done(struct hnae3_ae_dev *ae_dev) +{ + struct hclge_dev *hdev = ae_dev->priv; + + set_bit(HNAE3_FLR_DONE, &hdev->flr_state); +} + static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) { struct pci_dev *pdev = ae_dev->pdev; @@ -6635,7 +7217,11 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) hdev->pdev = pdev; hdev->ae_dev = ae_dev; hdev->reset_type = HNAE3_NONE_RESET; + hdev->reset_level = HNAE3_FUNC_RESET; ae_dev->priv = hdev; + hdev->mps = ETH_FRAME_LEN + ETH_FCS_LEN + 2 * VLAN_HLEN; + + mutex_init(&hdev->vport_lock); ret = hclge_pci_init(hdev); if (ret) { @@ -6727,6 +7313,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) goto err_mdiobus_unreg; } + ret = hclge_config_gro(hdev, true); + if (ret) + goto err_mdiobus_unreg; + ret = hclge_init_vlan_config(hdev); if (ret) { dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); @@ -6762,13 +7352,14 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) ret = hclge_hw_error_set_state(hdev, true); if (ret) { dev_err(&pdev->dev, - "hw error interrupts enable failed, ret =%d\n", ret); + "fail(%d) to enable hw error interrupts\n", ret); goto err_mdiobus_unreg; } hclge_dcb_ops_set(hdev); timer_setup(&hdev->service_timer, hclge_service_timer, 0); + timer_setup(&hdev->reset_timer, hclge_reset_timer, 0); INIT_WORK(&hdev->service_task, hclge_service_task); INIT_WORK(&hdev->rst_service_task, hclge_reset_service_task); INIT_WORK(&hdev->mbx_service_task, hclge_mailbox_service_task); @@ -6779,6 +7370,7 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_enable_vector(&hdev->misc_vector, true); hclge_state_init(hdev); + hdev->last_reset_time = jiffies; pr_info("%s driver initialization finished.\n", HCLGE_DRIVER_NAME); return 0; @@ -6806,6 +7398,17 @@ static void hclge_stats_clear(struct hclge_dev *hdev) memset(&hdev->hw_stats, 0, sizeof(hdev->hw_stats)); } +static void hclge_reset_vport_state(struct hclge_dev *hdev) +{ + struct hclge_vport *vport = hdev->vport; + int i; + + for (i = 0; i < hdev->num_alloc_vport; i++) { + hclge_vport_start(vport); + vport++; + } +} + static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) { struct hclge_dev *hdev = ae_dev->priv; @@ -6823,19 +7426,6 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } - ret = hclge_get_cap(hdev); - if (ret) { - dev_err(&pdev->dev, "get hw capability error, ret = %d.\n", - ret); - return ret; - } - - ret = hclge_configure(hdev); - if (ret) { - dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret); - return ret; - } - ret = hclge_map_tqp(hdev); if (ret) { dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret); @@ -6856,6 +7446,10 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } + ret = hclge_config_gro(hdev, true); + if (ret) + return ret; + ret = hclge_init_vlan_config(hdev); if (ret) { dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret); @@ -6881,11 +7475,17 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) return ret; } - /* Re-enable the TM hw error interrupts because - * they get disabled on core/global reset. + /* Re-enable the hw error interrupts because + * the interrupts get disabled on core/global reset. */ - if (hclge_enable_tm_hw_error(hdev, true)) - dev_err(&pdev->dev, "failed to enable TM hw error interrupts\n"); + ret = hclge_hw_error_set_state(hdev, true); + if (ret) { + dev_err(&pdev->dev, + "fail(%d) to re-enable HNS hw error interrupts\n", ret); + return ret; + } + + hclge_reset_vport_state(hdev); dev_info(&pdev->dev, "Reset done, %s driver initialization finished.\n", HCLGE_DRIVER_NAME); @@ -6913,6 +7513,7 @@ static void hclge_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) hclge_destroy_cmd_queue(&hdev->hw); hclge_misc_irq_uninit(hdev); hclge_pci_uninit(hdev); + mutex_destroy(&hdev->vport_lock); ae_dev->priv = NULL; } @@ -7166,8 +7767,15 @@ static int hclge_get_64_bit_regs(struct hclge_dev *hdev, u32 regs_num, return 0; } +#define MAX_SEPARATE_NUM 4 +#define SEPARATOR_VALUE 0xFFFFFFFF +#define REG_NUM_PER_LINE 4 +#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) + static int hclge_get_regs_len(struct hnae3_handle *handle) { + int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; + struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; u32 regs_num_32_bit, regs_num_64_bit; @@ -7180,15 +7788,25 @@ static int hclge_get_regs_len(struct hnae3_handle *handle) return -EOPNOTSUPP; } - return regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64); + cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; + common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; + ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; + tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; + + return (cmdq_lines + common_lines + ring_lines * kinfo->num_tqps + + tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE + + regs_num_32_bit * sizeof(u32) + regs_num_64_bit * sizeof(u64); } static void hclge_get_regs(struct hnae3_handle *handle, u32 *version, void *data) { + struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_dev *hdev = vport->back; u32 regs_num_32_bit, regs_num_64_bit; + int i, j, reg_um, separator_num; + u32 *reg = data; int ret; *version = hdev->fw_version; @@ -7200,16 +7818,53 @@ static void hclge_get_regs(struct hnae3_handle *handle, u32 *version, return; } - ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, data); + /* fetching per-PF registers valus from PF PCIe register space */ + reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); + separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; + for (i = 0; i < reg_um; i++) + *reg++ = hclge_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + + reg_um = sizeof(common_reg_addr_list) / sizeof(u32); + separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; + for (i = 0; i < reg_um; i++) + *reg++ = hclge_read_dev(&hdev->hw, common_reg_addr_list[i]); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + + reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); + separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; + for (j = 0; j < kinfo->num_tqps; j++) { + for (i = 0; i < reg_um; i++) + *reg++ = hclge_read_dev(&hdev->hw, + ring_reg_addr_list[i] + + 0x200 * j); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + } + + reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); + separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; + for (j = 0; j < hdev->num_msi_used - 1; j++) { + for (i = 0; i < reg_um; i++) + *reg++ = hclge_read_dev(&hdev->hw, + tqp_intr_reg_addr_list[i] + + 4 * j); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + } + + /* fetching PF common registers values from firmware */ + ret = hclge_get_32_bit_regs(hdev, regs_num_32_bit, reg); if (ret) { dev_err(&hdev->pdev->dev, "Get 32 bit register failed, ret = %d.\n", ret); return; } - data = (u32 *)data + regs_num_32_bit; - ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, - data); + reg += regs_num_32_bit; + ret = hclge_get_64_bit_regs(hdev, regs_num_64_bit, reg); if (ret) dev_err(&hdev->pdev->dev, "Get 64 bit register failed, ret = %d.\n", ret); @@ -7272,9 +7927,19 @@ static void hclge_get_link_mode(struct hnae3_handle *handle, } } +static int hclge_gro_en(struct hnae3_handle *handle, int enable) +{ + struct hclge_vport *vport = hclge_get_vport(handle); + struct hclge_dev *hdev = vport->back; + + return hclge_config_gro(hdev, enable); +} + static const struct hnae3_ae_ops hclge_ops = { .init_ae_dev = hclge_init_ae_dev, .uninit_ae_dev = hclge_uninit_ae_dev, + .flr_prepare = hclge_flr_prepare, + .flr_done = hclge_flr_done, .init_client_instance = hclge_init_client_instance, .uninit_client_instance = hclge_uninit_client_instance, .map_ring_to_vector = hclge_map_ring_to_vector, @@ -7285,6 +7950,8 @@ static const struct hnae3_ae_ops hclge_ops = { .set_loopback = hclge_set_loopback, .start = hclge_ae_start, .stop = hclge_ae_stop, + .client_start = hclge_client_start, + .client_stop = hclge_client_stop, .get_status = hclge_get_status, .get_ksettings_an_result = hclge_get_ksettings_an_result, .update_speed_duplex_h = hclge_update_speed_duplex_h, @@ -7321,6 +7988,7 @@ static const struct hnae3_ae_ops hclge_ops = { .set_vf_vlan_filter = hclge_set_vf_vlan_filter, .enable_hw_strip_rxvtag = hclge_en_hw_strip_rxvtag, .reset_event = hclge_reset_event, + .set_default_reset_request = hclge_set_def_reset_request, .get_tqps_and_rss_info = hclge_get_tqps_and_rss_info, .set_channels = hclge_set_channels, .get_channels = hclge_get_channels, @@ -7336,7 +8004,14 @@ static const struct hnae3_ae_ops hclge_ops = { .get_fd_all_rules = hclge_get_all_rules, .restore_fd_rules = hclge_restore_fd_entries, .enable_fd = hclge_enable_fd, - .process_hw_error = hclge_process_ras_hw_error, + .dbg_run_cmd = hclge_dbg_run_cmd, + .handle_hw_ras_error = hclge_handle_hw_ras_error, + .get_hw_reset_stat = hclge_get_hw_reset_stat, + .ae_dev_resetting = hclge_ae_dev_resetting, + .ae_dev_reset_cnt = hclge_ae_dev_reset_cnt, + .set_gro_en = hclge_gro_en, + .get_global_queue_id = hclge_covert_handle_qid_global, + .set_timer_task = hclge_set_timer_task, }; static struct hnae3_ae_algo ae_algo = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h index 0d9215404269..6615b85a1c52 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.h @@ -28,6 +28,62 @@ #define HCLGE_VECTOR_REG_OFFSET 0x4 #define HCLGE_VECTOR_VF_OFFSET 0x100000 +#define HCLGE_CMDQ_TX_ADDR_L_REG 0x27000 +#define HCLGE_CMDQ_TX_ADDR_H_REG 0x27004 +#define HCLGE_CMDQ_TX_DEPTH_REG 0x27008 +#define HCLGE_CMDQ_TX_TAIL_REG 0x27010 +#define HCLGE_CMDQ_TX_HEAD_REG 0x27014 +#define HCLGE_CMDQ_RX_ADDR_L_REG 0x27018 +#define HCLGE_CMDQ_RX_ADDR_H_REG 0x2701C +#define HCLGE_CMDQ_RX_DEPTH_REG 0x27020 +#define HCLGE_CMDQ_RX_TAIL_REG 0x27024 +#define HCLGE_CMDQ_RX_HEAD_REG 0x27028 +#define HCLGE_CMDQ_INTR_SRC_REG 0x27100 +#define HCLGE_CMDQ_INTR_STS_REG 0x27104 +#define HCLGE_CMDQ_INTR_EN_REG 0x27108 +#define HCLGE_CMDQ_INTR_GEN_REG 0x2710C + +/* bar registers for common func */ +#define HCLGE_VECTOR0_OTER_EN_REG 0x20600 +#define HCLGE_RAS_OTHER_STS_REG 0x20B00 +#define HCLGE_FUNC_RESET_STS_REG 0x20C00 +#define HCLGE_GRO_EN_REG 0x28000 + +/* bar registers for rcb */ +#define HCLGE_RING_RX_ADDR_L_REG 0x80000 +#define HCLGE_RING_RX_ADDR_H_REG 0x80004 +#define HCLGE_RING_RX_BD_NUM_REG 0x80008 +#define HCLGE_RING_RX_BD_LENGTH_REG 0x8000C +#define HCLGE_RING_RX_MERGE_EN_REG 0x80014 +#define HCLGE_RING_RX_TAIL_REG 0x80018 +#define HCLGE_RING_RX_HEAD_REG 0x8001C +#define HCLGE_RING_RX_FBD_NUM_REG 0x80020 +#define HCLGE_RING_RX_OFFSET_REG 0x80024 +#define HCLGE_RING_RX_FBD_OFFSET_REG 0x80028 +#define HCLGE_RING_RX_STASH_REG 0x80030 +#define HCLGE_RING_RX_BD_ERR_REG 0x80034 +#define HCLGE_RING_TX_ADDR_L_REG 0x80040 +#define HCLGE_RING_TX_ADDR_H_REG 0x80044 +#define HCLGE_RING_TX_BD_NUM_REG 0x80048 +#define HCLGE_RING_TX_PRIORITY_REG 0x8004C +#define HCLGE_RING_TX_TC_REG 0x80050 +#define HCLGE_RING_TX_MERGE_EN_REG 0x80054 +#define HCLGE_RING_TX_TAIL_REG 0x80058 +#define HCLGE_RING_TX_HEAD_REG 0x8005C +#define HCLGE_RING_TX_FBD_NUM_REG 0x80060 +#define HCLGE_RING_TX_OFFSET_REG 0x80064 +#define HCLGE_RING_TX_EBD_NUM_REG 0x80068 +#define HCLGE_RING_TX_EBD_OFFSET_REG 0x80070 +#define HCLGE_RING_TX_BD_ERR_REG 0x80074 +#define HCLGE_RING_EN_REG 0x80090 + +/* bar registers for tqp interrupt */ +#define HCLGE_TQP_INTR_CTRL_REG 0x20000 +#define HCLGE_TQP_INTR_GL0_REG 0x20100 +#define HCLGE_TQP_INTR_GL1_REG 0x20200 +#define HCLGE_TQP_INTR_GL2_REG 0x20300 +#define HCLGE_TQP_INTR_RL_REG 0x20900 + #define HCLGE_RSS_IND_TBL_SIZE 512 #define HCLGE_RSS_SET_BITMAP_MSK GENMASK(15, 0) #define HCLGE_RSS_KEY_SIZE 40 @@ -97,11 +153,13 @@ enum HLCGE_PORT_TYPE { #define HCLGE_NETWORK_PORT_ID_M GENMASK(3, 0) /* Reset related Registers */ +#define HCLGE_PF_OTHER_INT_REG 0x20600 #define HCLGE_MISC_RESET_STS_REG 0x20700 #define HCLGE_MISC_VECTOR_INT_STS 0x20800 #define HCLGE_GLOBAL_RESET_REG 0x20A00 #define HCLGE_GLOBAL_RESET_BIT 0 #define HCLGE_CORE_RESET_BIT 1 +#define HCLGE_IMP_RESET_BIT 2 #define HCLGE_FUN_RST_ING 0x20C00 #define HCLGE_FUN_RST_ING_B 0 @@ -115,8 +173,10 @@ enum HLCGE_PORT_TYPE { /* CMDQ register bits for RX event(=MBX event) */ #define HCLGE_VECTOR0_RX_CMDQ_INT_B 1 +#define HCLGE_VECTOR0_IMP_RESET_INT_B 1 + #define HCLGE_MAC_DEFAULT_FRAME \ - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN + ETH_DATA_LEN) + (ETH_HLEN + ETH_FCS_LEN + 2 * VLAN_HLEN + ETH_DATA_LEN) #define HCLGE_MAC_MIN_FRAME 64 #define HCLGE_MAC_MAX_FRAME 9728 @@ -145,12 +205,14 @@ enum HCLGE_DEV_STATE { enum hclge_evt_cause { HCLGE_VECTOR0_EVENT_RST, HCLGE_VECTOR0_EVENT_MBX, + HCLGE_VECTOR0_EVENT_ERR, HCLGE_VECTOR0_EVENT_OTHER, }; #define HCLGE_MPF_ENBALE 1 enum HCLGE_MAC_SPEED { + HCLGE_MAC_SPEED_UNKNOWN = 0, /* unknown */ HCLGE_MAC_SPEED_10M = 10, /* 10 Mbps */ HCLGE_MAC_SPEED_100M = 100, /* 100 Mbps */ HCLGE_MAC_SPEED_1G = 1000, /* 1000 Mbps = 1 Gbps */ @@ -593,10 +655,16 @@ struct hclge_dev { struct hclge_misc_vector misc_vector; struct hclge_hw_stats hw_stats; unsigned long state; + unsigned long flr_state; + unsigned long last_reset_time; enum hnae3_reset_type reset_type; + enum hnae3_reset_type reset_level; + unsigned long default_reset_request; unsigned long reset_request; /* reset has been requested */ unsigned long reset_pending; /* client rst is pending to be served */ + unsigned long reset_count; /* the number of reset has been done */ + u32 reset_fail_cnt; u32 fw_version; u16 num_vmdq_vport; /* Num vmdq vport this PF has set up */ u16 num_tqps; /* Num task queue pairs of this PF */ @@ -614,6 +682,7 @@ struct hclge_dev { u8 hw_tc_map; u8 tc_num_last_time; enum hclge_fc_mode fc_mode_last_time; + u8 support_sfp_query; #define HCLGE_FLAG_TC_BASE_SCH_MODE 1 #define HCLGE_FLAG_VNET_BASE_SCH_MODE 2 @@ -644,6 +713,7 @@ struct hclge_dev { unsigned long service_timer_period; unsigned long service_timer_previous; struct timer_list service_timer; + struct timer_list reset_timer; struct work_struct service_task; struct work_struct rst_service_task; struct work_struct mbx_service_task; @@ -666,7 +736,12 @@ struct hclge_dev { u32 flag; u32 pkt_buf_size; /* Total pf buf size for tx/rx */ + u32 tx_buf_size; /* Tx buffer size for each TC */ + u32 dv_buf_size; /* Dv buffer size for each TC */ + u32 mps; /* Max packet size */ + /* vport_lock protect resource shared by vports */ + struct mutex vport_lock; struct hclge_vlan_type_cfg vlan_type_cfg; @@ -717,6 +792,11 @@ struct hclge_rss_tuple_cfg { u8 ipv6_fragment_en; }; +enum HCLGE_VPORT_STATE { + HCLGE_VPORT_STATE_ALIVE, + HCLGE_VPORT_STATE_MAX +}; + struct hclge_vport { u16 alloc_tqps; /* Allocated Tx/Rx queues */ @@ -742,6 +822,10 @@ struct hclge_vport { struct hclge_dev *back; /* Back reference to associated dev */ struct hnae3_handle nic; struct hnae3_handle roce; + + unsigned long state; + unsigned long last_active_jiffies; + u32 mps; /* Max packet size */ }; void hclge_promisc_param_init(struct hclge_promisc_param *param, bool en_uc, @@ -768,6 +852,12 @@ static inline int hclge_get_queue_id(struct hnae3_queue *queue) return tqp->index; } +static inline bool hclge_is_reset_pending(struct hclge_dev *hdev) +{ + return !!hdev->reset_pending; +} + +int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport); int hclge_cfg_mac_speed_dup(struct hclge_dev *hdev, int speed, u8 duplex); int hclge_set_vlan_filter(struct hnae3_handle *handle, __be16 proto, u16 vlan_id, bool is_kill); @@ -777,9 +867,15 @@ int hclge_buffer_alloc(struct hclge_dev *hdev); int hclge_rss_init_hw(struct hclge_dev *hdev); void hclge_rss_indir_init_cfg(struct hclge_dev *hdev); +int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport); void hclge_mbx_handler(struct hclge_dev *hdev); int hclge_reset_tqp(struct hnae3_handle *handle, u16 queue_id); void hclge_reset_vf_queue(struct hclge_vport *vport, u16 queue_id); int hclge_cfg_flowctrl(struct hclge_dev *hdev); int hclge_func_reset_cmd(struct hclge_dev *hdev, int func_id); +int hclge_vport_start(struct hclge_vport *vport); +void hclge_vport_stop(struct hclge_vport *vport); +int hclge_set_vport_mtu(struct hclge_vport *vport, int new_mtu); +int hclge_dbg_run_cmd(struct hnae3_handle *handle, char *cmd_buf); +u16 hclge_covert_handle_qid_global(struct hnae3_handle *handle, u16 queue_id); #endif diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index f890022938d9..a1de451a85df 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -79,15 +79,26 @@ static int hclge_send_mbx_msg(struct hclge_vport *vport, u8 *msg, u16 msg_len, return status; } -static int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport) +int hclge_inform_reset_assert_to_vf(struct hclge_vport *vport) { + struct hclge_dev *hdev = vport->back; + enum hnae3_reset_type reset_type; u8 msg_data[2]; u8 dest_vfid; dest_vfid = (u8)vport->vport_id; + if (hdev->reset_type == HNAE3_FUNC_RESET) + reset_type = HNAE3_VF_PF_FUNC_RESET; + else if (hdev->reset_type == HNAE3_FLR_RESET) + reset_type = HNAE3_VF_FULL_RESET; + else + return -EINVAL; + + memcpy(&msg_data[0], &reset_type, sizeof(u16)); + /* send this requested info to VF */ - return hclge_send_mbx_msg(vport, msg_data, sizeof(u8), + return hclge_send_mbx_msg(vport, msg_data, sizeof(msg_data), HCLGE_MBX_ASSERTING_RESET, dest_vfid); } @@ -290,6 +301,21 @@ static int hclge_set_vf_vlan_cfg(struct hclge_vport *vport, return status; } +static int hclge_set_vf_alive(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req, + bool gen_resp) +{ + bool alive = !!mbx_req->msg[2]; + int ret = 0; + + if (alive) + ret = hclge_vport_start(vport); + else + hclge_vport_stop(vport); + + return ret; +} + static int hclge_get_vf_tcinfo(struct hclge_vport *vport, struct hclge_mbx_vf_to_pf_cmd *mbx_req, bool gen_resp) @@ -363,24 +389,41 @@ static void hclge_reset_vf(struct hclge_vport *vport, int ret; dev_warn(&hdev->pdev->dev, "PF received VF reset request from VF %d!", - mbx_req->mbx_src_vfid); - - /* Acknowledge VF that PF is now about to assert the reset for the VF. - * On receiving this message VF will get into pending state and will - * start polling for the hardware reset completion status. - */ - ret = hclge_inform_reset_assert_to_vf(vport); - if (ret) { - dev_err(&hdev->pdev->dev, - "PF fail(%d) to inform VF(%d)of reset, reset failed!\n", - ret, vport->vport_id); - return; - } + vport->vport_id); - dev_warn(&hdev->pdev->dev, "PF is now resetting VF %d.\n", - mbx_req->mbx_src_vfid); - /* reset this virtual function */ - hclge_func_reset_cmd(hdev, mbx_req->mbx_src_vfid); + ret = hclge_func_reset_cmd(hdev, vport->vport_id); + hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0); +} + +static void hclge_vf_keep_alive(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ + vport->last_active_jiffies = jiffies; +} + +static int hclge_set_vf_mtu(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ + int ret; + u32 mtu; + + memcpy(&mtu, &mbx_req->msg[2], sizeof(mtu)); + ret = hclge_set_vport_mtu(vport, mtu); + + return hclge_gen_resp_to_vf(vport, mbx_req, ret, NULL, 0); +} + +static int hclge_get_queue_id_in_pf(struct hclge_vport *vport, + struct hclge_mbx_vf_to_pf_cmd *mbx_req) +{ + u16 queue_id, qid_in_pf; + u8 resp_data[2]; + + memcpy(&queue_id, &mbx_req->msg[2], sizeof(queue_id)); + qid_in_pf = hclge_covert_handle_qid_global(&vport->nic, queue_id); + memcpy(resp_data, &qid_in_pf, sizeof(qid_in_pf)); + + return hclge_gen_resp_to_vf(vport, mbx_req, 0, resp_data, 2); } static bool hclge_cmd_crq_empty(struct hclge_hw *hw) @@ -460,6 +503,13 @@ void hclge_mbx_handler(struct hclge_dev *hdev) "PF failed(%d) to config VF's VLAN\n", ret); break; + case HCLGE_MBX_SET_ALIVE: + ret = hclge_set_vf_alive(vport, req, false); + if (ret) + dev_err(&hdev->pdev->dev, + "PF failed(%d) to set VF's ALIVE\n", + ret); + break; case HCLGE_MBX_GET_QINFO: ret = hclge_get_vf_queue_info(vport, req, true); if (ret) @@ -487,6 +537,22 @@ void hclge_mbx_handler(struct hclge_dev *hdev) case HCLGE_MBX_RESET: hclge_reset_vf(vport, req); break; + case HCLGE_MBX_KEEP_ALIVE: + hclge_vf_keep_alive(vport, req); + break; + case HCLGE_MBX_SET_MTU: + ret = hclge_set_vf_mtu(vport, req); + if (ret) + dev_err(&hdev->pdev->dev, + "VF fail(%d) to set mtu\n", ret); + break; + case HCLGE_MBX_GET_QID_IN_PF: + ret = hclge_get_queue_id_in_pf(vport, req); + if (ret) + dev_err(&hdev->pdev->dev, + "PF failed(%d) to get qid for VF\n", + ret); + break; default: dev_err(&hdev->pdev->dev, "un-supported mailbox message, code = %d\n", diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c index 03018638f701..dabb8437f8dc 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mdio.c @@ -12,7 +12,7 @@ SUPPORTED_TP | \ PHY_10BT_FEATURES | \ PHY_100BT_FEATURES | \ - PHY_1000BT_FEATURES) + SUPPORTED_1000baseT_Full) enum hclge_mdio_c22_op_seq { HCLGE_MDIO_C22_WRITE = 1, @@ -179,6 +179,10 @@ static void hclge_mac_adjust_link(struct net_device *netdev) int duplex, speed; int ret; + /* When phy link down, do nothing */ + if (netdev->phydev->link == 0) + return; + speed = netdev->phydev->speed; duplex = netdev->phydev->duplex; @@ -195,12 +199,13 @@ int hclge_mac_connect_phy(struct hclge_dev *hdev) { struct net_device *netdev = hdev->vport[0].nic.netdev; struct phy_device *phydev = hdev->hw.mac.phydev; + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; int ret; if (!phydev) return 0; - phydev->supported &= ~SUPPORTED_FIBRE; + linkmode_clear_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, phydev->supported); ret = phy_connect_direct(netdev, phydev, hclge_mac_adjust_link, @@ -210,7 +215,15 @@ int hclge_mac_connect_phy(struct hclge_dev *hdev) return ret; } - phydev->supported &= HCLGE_PHY_SUPPORTED_FEATURES; + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT, mask); + linkmode_set_bit_array(phy_10_100_features_array, + ARRAY_SIZE(phy_10_100_features_array), + mask); + linkmode_set_bit_array(phy_gbit_features_array, + ARRAY_SIZE(phy_gbit_features_array), + mask); + linkmode_and(phydev->supported, phydev->supported, mask); phy_support_asym_pause(phydev); return 0; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 494e562fe8c7..00458da67503 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -1259,15 +1259,13 @@ int hclge_pause_setup_hw(struct hclge_dev *hdev) return 0; } -int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc) +void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc) { struct hclge_vport *vport = hdev->vport; struct hnae3_knic_private_info *kinfo; u32 i, k; for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) { - if (prio_tc[i] >= hdev->tm_info.num_tc) - return -EINVAL; hdev->tm_info.prio_tc[i] = prio_tc[i]; for (k = 0; k < hdev->num_alloc_vport; k++) { @@ -1275,18 +1273,12 @@ int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc) kinfo->prio_tc[i] = prio_tc[i]; } } - return 0; } -int hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc) +void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc) { u8 i, bit_map = 0; - for (i = 0; i < hdev->num_alloc_vport; i++) { - if (num_tc > hdev->vport[i].alloc_tqps) - return -EINVAL; - } - hdev->tm_info.num_tc = num_tc; for (i = 0; i < hdev->tm_info.num_tc; i++) @@ -1300,8 +1292,6 @@ int hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc) hdev->hw_tc_map = bit_map; hclge_tm_schd_info_init(hdev); - - return 0; } int hclge_tm_init_hw(struct hclge_dev *hdev) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h index 25eef13a3e14..b6496a439304 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.h @@ -40,6 +40,13 @@ struct hclge_nq_to_qs_link_cmd { __le16 qset_id; }; +struct hclge_tqp_tx_queue_tc_cmd { + __le16 queue_id; + __le16 rsvd; + u8 tc_id; + u8 rev[3]; +}; + struct hclge_pg_weight_cmd { u8 pg_id; u8 dwrr; @@ -55,6 +62,12 @@ struct hclge_qs_weight_cmd { u8 dwrr; }; +struct hclge_ets_tc_weight_cmd { + u8 tc_weight[HNAE3_MAX_TC]; + u8 weight_offset; + u8 rsvd[15]; +}; + #define HCLGE_TM_SHAP_IR_B_MSK GENMASK(7, 0) #define HCLGE_TM_SHAP_IR_B_LSH 0 #define HCLGE_TM_SHAP_IR_U_MSK GENMASK(11, 8) @@ -131,8 +144,8 @@ struct hclge_port_shapping_cmd { int hclge_tm_schd_init(struct hclge_dev *hdev); int hclge_pause_setup_hw(struct hclge_dev *hdev); int hclge_tm_schd_mode_hw(struct hclge_dev *hdev); -int hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc); -int hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc); +void hclge_tm_prio_tc_info_update(struct hclge_dev *hdev, u8 *prio_tc); +void hclge_tm_schd_info_update(struct hclge_dev *hdev, u8 num_tc); int hclge_tm_dwrr_cfg(struct hclge_dev *hdev); int hclge_tm_map_cfg(struct hclge_dev *hdev); int hclge_tm_init_hw(struct hclge_dev *hdev); diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c index 0d3b445f6799..d5765c8cf3a3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.c @@ -72,6 +72,45 @@ static bool hclgevf_is_special_opcode(u16 opcode) return false; } +static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring) +{ + struct hclgevf_dev *hdev = ring->dev; + struct hclgevf_hw *hw = &hdev->hw; + u32 reg_val; + + if (ring->flag == HCLGEVF_TYPE_CSQ) { + reg_val = (u32)ring->desc_dma_addr; + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val); + reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1); + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val); + + reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S); + reg_val |= HCLGEVF_NIC_CMQ_ENABLE; + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val); + + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0); + } else { + reg_val = (u32)ring->desc_dma_addr; + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val); + reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1); + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val); + + reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S); + reg_val |= HCLGEVF_NIC_CMQ_ENABLE; + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val); + + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0); + hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0); + } +} + +static void hclgevf_cmd_init_regs(struct hclgevf_hw *hw) +{ + hclgevf_cmd_config_regs(&hw->cmq.csq); + hclgevf_cmd_config_regs(&hw->cmq.crq); +} + static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring) { int size = ring->desc_num * sizeof(struct hclgevf_desc); @@ -96,61 +135,23 @@ static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring) } } -static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev, - struct hclgevf_cmq_ring *ring) +static int hclgevf_alloc_cmd_queue(struct hclgevf_dev *hdev, int ring_type) { struct hclgevf_hw *hw = &hdev->hw; - int ring_type = ring->flag; - u32 reg_val; + struct hclgevf_cmq_ring *ring = + (ring_type == HCLGEVF_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq; int ret; - ring->desc_num = HCLGEVF_NIC_CMQ_DESC_NUM; - spin_lock_init(&ring->lock); - ring->next_to_clean = 0; - ring->next_to_use = 0; ring->dev = hdev; + ring->flag = ring_type; /* allocate CSQ/CRQ descriptor */ ret = hclgevf_alloc_cmd_desc(ring); - if (ret) { + if (ret) dev_err(&hdev->pdev->dev, "failed(%d) to alloc %s desc\n", ret, (ring_type == HCLGEVF_TYPE_CSQ) ? "CSQ" : "CRQ"); - return ret; - } - /* initialize the hardware registers with csq/crq dma-address, - * descriptor number, head & tail pointers - */ - switch (ring_type) { - case HCLGEVF_TYPE_CSQ: - reg_val = (u32)ring->desc_dma_addr; - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val); - reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1); - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val); - - reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S); - reg_val |= HCLGEVF_NIC_CMQ_ENABLE; - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val); - - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0); - hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0); - return 0; - case HCLGEVF_TYPE_CRQ: - reg_val = (u32)ring->desc_dma_addr; - hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val); - reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1); - hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val); - - reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S); - reg_val |= HCLGEVF_NIC_CMQ_ENABLE; - hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val); - - hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0); - hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0); - return 0; - default: - return -EINVAL; - } + return ret; } void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc, @@ -188,7 +189,8 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num) spin_lock_bh(&hw->cmq.csq.lock); - if (num > hclgevf_ring_space(&hw->cmq.csq)) { + if (num > hclgevf_ring_space(&hw->cmq.csq) || + test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) { spin_unlock_bh(&hw->cmq.csq.lock); return -EBUSY; } @@ -282,55 +284,83 @@ static int hclgevf_cmd_query_firmware_version(struct hclgevf_hw *hw, return status; } -int hclgevf_cmd_init(struct hclgevf_dev *hdev) +int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev) { - u32 version; int ret; - /* setup Tx write back timeout */ + /* Setup the lock for command queue */ + spin_lock_init(&hdev->hw.cmq.csq.lock); + spin_lock_init(&hdev->hw.cmq.crq.lock); + hdev->hw.cmq.tx_timeout = HCLGEVF_CMDQ_TX_TIMEOUT; + hdev->hw.cmq.csq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM; + hdev->hw.cmq.crq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM; - /* setup queue CSQ/CRQ rings */ - hdev->hw.cmq.csq.flag = HCLGEVF_TYPE_CSQ; - ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.csq); + ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CSQ); if (ret) { dev_err(&hdev->pdev->dev, - "failed(%d) to initialize CSQ ring\n", ret); + "CSQ ring setup error %d\n", ret); return ret; } - hdev->hw.cmq.crq.flag = HCLGEVF_TYPE_CRQ; - ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.crq); + ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CRQ); if (ret) { dev_err(&hdev->pdev->dev, - "failed(%d) to initialize CRQ ring\n", ret); + "CRQ ring setup error %d\n", ret); goto err_csq; } + return 0; +err_csq: + hclgevf_free_cmd_desc(&hdev->hw.cmq.csq); + return ret; +} + +int hclgevf_cmd_init(struct hclgevf_dev *hdev) +{ + u32 version; + int ret; + + spin_lock_bh(&hdev->hw.cmq.csq.lock); + spin_lock_bh(&hdev->hw.cmq.crq.lock); + /* initialize the pointers of async rx queue of mailbox */ hdev->arq.hdev = hdev; hdev->arq.head = 0; hdev->arq.tail = 0; hdev->arq.count = 0; + hdev->hw.cmq.csq.next_to_clean = 0; + hdev->hw.cmq.csq.next_to_use = 0; + hdev->hw.cmq.crq.next_to_clean = 0; + hdev->hw.cmq.crq.next_to_use = 0; + + hclgevf_cmd_init_regs(&hdev->hw); + + spin_unlock_bh(&hdev->hw.cmq.crq.lock); + spin_unlock_bh(&hdev->hw.cmq.csq.lock); + + clear_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); + + /* Check if there is new reset pending, because the higher level + * reset may happen when lower level reset is being processed. + */ + if (hclgevf_is_reset_pending(hdev)) { + set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); + return -EBUSY; + } /* get firmware version */ ret = hclgevf_cmd_query_firmware_version(&hdev->hw, &version); if (ret) { dev_err(&hdev->pdev->dev, "failed(%d) to query firmware version\n", ret); - goto err_crq; + return ret; } hdev->fw_version = version; dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version); return 0; -err_crq: - hclgevf_free_cmd_desc(&hdev->hw.cmq.crq); -err_csq: - hclgevf_free_cmd_desc(&hdev->hw.cmq.csq); - - return ret; } void hclgevf_cmd_uninit(struct hclgevf_dev *hdev) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h index bc294b0c8b62..47030b42341f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_cmd.h @@ -87,6 +87,8 @@ enum hclgevf_opcode_type { HCLGEVF_OPC_QUERY_TX_STATUS = 0x0B03, HCLGEVF_OPC_QUERY_RX_STATUS = 0x0B13, HCLGEVF_OPC_CFG_COM_TQP_QUEUE = 0x0B20, + /* GRO command */ + HCLGEVF_OPC_GRO_GENERIC_CONFIG = 0x0C10, /* RSS cmd */ HCLGEVF_OPC_RSS_GENERIC_CONFIG = 0x0D01, HCLGEVF_OPC_RSS_INPUT_TUPLE = 0x0D02, @@ -149,6 +151,12 @@ struct hclgevf_query_res_cmd { __le16 rsv[7]; }; +#define HCLGEVF_GRO_EN_B 0 +struct hclgevf_cfg_gro_status_cmd { + __le16 gro_en; + u8 rsv[22]; +}; + #define HCLGEVF_RSS_DEFAULT_OUTPORT_B 4 #define HCLGEVF_RSS_HASH_KEY_OFFSET_B 4 #define HCLGEVF_RSS_HASH_KEY_NUM 16 @@ -256,6 +264,7 @@ static inline u32 hclgevf_read_reg(u8 __iomem *base, u32 reg) int hclgevf_cmd_init(struct hclgevf_dev *hdev); void hclgevf_cmd_uninit(struct hclgevf_dev *hdev); +int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev); int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num); void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 085edb945389..82103d5fa815 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -2,6 +2,7 @@ // Copyright (c) 2016-2017 Hisilicon Limited. #include <linux/etherdevice.h> +#include <linux/iopoll.h> #include <net/rtnetlink.h> #include "hclgevf_cmd.h" #include "hclgevf_main.h" @@ -10,8 +11,7 @@ #define HCLGEVF_NAME "hclgevf" -static int hclgevf_init_hdev(struct hclgevf_dev *hdev); -static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev); +static int hclgevf_reset_hdev(struct hclgevf_dev *hdev); static struct hnae3_ae_algo ae_algovf; static const struct pci_device_id ae_algovf_pci_tbl[] = { @@ -23,6 +23,58 @@ static const struct pci_device_id ae_algovf_pci_tbl[] = { MODULE_DEVICE_TABLE(pci, ae_algovf_pci_tbl); +static const u32 cmdq_reg_addr_list[] = {HCLGEVF_CMDQ_TX_ADDR_L_REG, + HCLGEVF_CMDQ_TX_ADDR_H_REG, + HCLGEVF_CMDQ_TX_DEPTH_REG, + HCLGEVF_CMDQ_TX_TAIL_REG, + HCLGEVF_CMDQ_TX_HEAD_REG, + HCLGEVF_CMDQ_RX_ADDR_L_REG, + HCLGEVF_CMDQ_RX_ADDR_H_REG, + HCLGEVF_CMDQ_RX_DEPTH_REG, + HCLGEVF_CMDQ_RX_TAIL_REG, + HCLGEVF_CMDQ_RX_HEAD_REG, + HCLGEVF_VECTOR0_CMDQ_SRC_REG, + HCLGEVF_CMDQ_INTR_STS_REG, + HCLGEVF_CMDQ_INTR_EN_REG, + HCLGEVF_CMDQ_INTR_GEN_REG}; + +static const u32 common_reg_addr_list[] = {HCLGEVF_MISC_VECTOR_REG_BASE, + HCLGEVF_RST_ING, + HCLGEVF_GRO_EN_REG}; + +static const u32 ring_reg_addr_list[] = {HCLGEVF_RING_RX_ADDR_L_REG, + HCLGEVF_RING_RX_ADDR_H_REG, + HCLGEVF_RING_RX_BD_NUM_REG, + HCLGEVF_RING_RX_BD_LENGTH_REG, + HCLGEVF_RING_RX_MERGE_EN_REG, + HCLGEVF_RING_RX_TAIL_REG, + HCLGEVF_RING_RX_HEAD_REG, + HCLGEVF_RING_RX_FBD_NUM_REG, + HCLGEVF_RING_RX_OFFSET_REG, + HCLGEVF_RING_RX_FBD_OFFSET_REG, + HCLGEVF_RING_RX_STASH_REG, + HCLGEVF_RING_RX_BD_ERR_REG, + HCLGEVF_RING_TX_ADDR_L_REG, + HCLGEVF_RING_TX_ADDR_H_REG, + HCLGEVF_RING_TX_BD_NUM_REG, + HCLGEVF_RING_TX_PRIORITY_REG, + HCLGEVF_RING_TX_TC_REG, + HCLGEVF_RING_TX_MERGE_EN_REG, + HCLGEVF_RING_TX_TAIL_REG, + HCLGEVF_RING_TX_HEAD_REG, + HCLGEVF_RING_TX_FBD_NUM_REG, + HCLGEVF_RING_TX_OFFSET_REG, + HCLGEVF_RING_TX_EBD_NUM_REG, + HCLGEVF_RING_TX_EBD_OFFSET_REG, + HCLGEVF_RING_TX_BD_ERR_REG, + HCLGEVF_RING_EN_REG}; + +static const u32 tqp_intr_reg_addr_list[] = {HCLGEVF_TQP_INTR_CTRL_REG, + HCLGEVF_TQP_INTR_GL0_REG, + HCLGEVF_TQP_INTR_GL1_REG, + HCLGEVF_TQP_INTR_GL2_REG, + HCLGEVF_TQP_INTR_RL_REG}; + static inline struct hclgevf_dev *hclgevf_ae_get_hdev( struct hnae3_handle *handle) { @@ -204,17 +256,28 @@ static int hclgevf_get_queue_info(struct hclgevf_dev *hdev) return 0; } +static u16 hclgevf_get_qid_global(struct hnae3_handle *handle, u16 queue_id) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + u8 msg_data[2], resp_data[2]; + u16 qid_in_pf = 0; + int ret; + + memcpy(&msg_data[0], &queue_id, sizeof(queue_id)); + + ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_GET_QID_IN_PF, 0, msg_data, + 2, true, resp_data, 2); + if (!ret) + qid_in_pf = *(u16 *)resp_data; + + return qid_in_pf; +} + static int hclgevf_alloc_tqps(struct hclgevf_dev *hdev) { struct hclgevf_tqp *tqp; int i; - /* if this is on going reset then we need to re-allocate the TPQs - * since we cannot assume we would get same number of TPQs back from PF - */ - if (hclgevf_dev_ongoing_reset(hdev)) - devm_kfree(&hdev->pdev->dev, hdev->htqp); - hdev->htqp = devm_kcalloc(&hdev->pdev->dev, hdev->num_tqps, sizeof(struct hclgevf_tqp), GFP_KERNEL); if (!hdev->htqp) @@ -258,12 +321,6 @@ static int hclgevf_knic_setup(struct hclgevf_dev *hdev) new_tqps = kinfo->rss_size * kinfo->num_tc; kinfo->num_tqps = min(new_tqps, hdev->num_tqps); - /* if this is on going reset then we need to re-allocate the hnae queues - * as well since number of TPQs from PF might have changed. - */ - if (hclgevf_dev_ongoing_reset(hdev)) - devm_kfree(&hdev->pdev->dev, kinfo->tqp); - kinfo->tqp = devm_kcalloc(&hdev->pdev->dev, kinfo->num_tqps, sizeof(struct hnae3_queue *), GFP_KERNEL); if (!kinfo->tqp) @@ -868,6 +925,9 @@ static int hclgevf_unmap_ring_from_vector( struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); int ret, vector_id; + if (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) + return 0; + vector_id = hclgevf_get_vector_index(hdev, vector); if (vector_id < 0) { dev_err(&handle->pdev->dev, @@ -956,13 +1016,6 @@ static int hclgevf_tqp_enable(struct hclgevf_dev *hdev, int tqp_id, return status; } -static int hclgevf_get_queue_id(struct hnae3_queue *queue) -{ - struct hclgevf_tqp *tqp = container_of(queue, struct hclgevf_tqp, q); - - return tqp->index; -} - static void hclgevf_reset_tqp_stats(struct hnae3_handle *handle) { struct hnae3_knic_private_info *kinfo = &handle->kinfo; @@ -1097,38 +1150,87 @@ static int hclgevf_reset_tqp(struct hnae3_handle *handle, u16 queue_id) 2, true, NULL, 0); } +static int hclgevf_set_mtu(struct hnae3_handle *handle, int new_mtu) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_MTU, 0, (u8 *)&new_mtu, + sizeof(new_mtu), true, NULL, 0); +} + static int hclgevf_notify_client(struct hclgevf_dev *hdev, enum hnae3_reset_notify_type type) { struct hnae3_client *client = hdev->nic_client; struct hnae3_handle *handle = &hdev->nic; + int ret; if (!client->ops->reset_notify) return -EOPNOTSUPP; - return client->ops->reset_notify(handle, type); + ret = client->ops->reset_notify(handle, type); + if (ret) + dev_err(&hdev->pdev->dev, "notify nic client failed %d(%d)\n", + type, ret); + + return ret; +} + +static void hclgevf_flr_done(struct hnae3_ae_dev *ae_dev) +{ + struct hclgevf_dev *hdev = ae_dev->priv; + + set_bit(HNAE3_FLR_DONE, &hdev->flr_state); +} + +static int hclgevf_flr_poll_timeout(struct hclgevf_dev *hdev, + unsigned long delay_us, + unsigned long wait_cnt) +{ + unsigned long cnt = 0; + + while (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state) && + cnt++ < wait_cnt) + usleep_range(delay_us, delay_us * 2); + + if (!test_bit(HNAE3_FLR_DONE, &hdev->flr_state)) { + dev_err(&hdev->pdev->dev, + "flr wait timeout\n"); + return -ETIMEDOUT; + } + + return 0; } static int hclgevf_reset_wait(struct hclgevf_dev *hdev) { -#define HCLGEVF_RESET_WAIT_MS 500 -#define HCLGEVF_RESET_WAIT_CNT 20 - u32 val, cnt = 0; +#define HCLGEVF_RESET_WAIT_US 20000 +#define HCLGEVF_RESET_WAIT_CNT 2000 +#define HCLGEVF_RESET_WAIT_TIMEOUT_US \ + (HCLGEVF_RESET_WAIT_US * HCLGEVF_RESET_WAIT_CNT) + + u32 val; + int ret; /* wait to check the hardware reset completion status */ - val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); - while (hnae3_get_bit(val, HCLGEVF_FUN_RST_ING_B) && - (cnt < HCLGEVF_RESET_WAIT_CNT)) { - msleep(HCLGEVF_RESET_WAIT_MS); - val = hclgevf_read_dev(&hdev->hw, HCLGEVF_FUN_RST_ING); - cnt++; - } + val = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); + dev_info(&hdev->pdev->dev, "checking vf resetting status: %x\n", val); + + if (hdev->reset_type == HNAE3_FLR_RESET) + return hclgevf_flr_poll_timeout(hdev, + HCLGEVF_RESET_WAIT_US, + HCLGEVF_RESET_WAIT_CNT); + + ret = readl_poll_timeout(hdev->hw.io_base + HCLGEVF_RST_ING, val, + !(val & HCLGEVF_RST_ING_BITS), + HCLGEVF_RESET_WAIT_US, + HCLGEVF_RESET_WAIT_TIMEOUT_US); /* hardware completion status should be available by this time */ - if (cnt >= HCLGEVF_RESET_WAIT_CNT) { - dev_warn(&hdev->pdev->dev, - "could'nt get reset done status from h/w, timeout!\n"); - return -EBUSY; + if (ret) { + dev_err(&hdev->pdev->dev, + "could'nt get reset done status from h/w, timeout!\n"); + return ret; } /* we will wait a bit more to let reset of the stack to complete. This @@ -1145,10 +1247,12 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev) int ret; /* uninitialize the nic client */ - hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); + ret = hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); + if (ret) + return ret; /* re-initialize the hclge device */ - ret = hclgevf_init_hdev(hdev); + ret = hclgevf_reset_hdev(hdev); if (ret) { dev_err(&hdev->pdev->dev, "hclge device re-init failed, VF is disabled!\n"); @@ -1156,22 +1260,60 @@ static int hclgevf_reset_stack(struct hclgevf_dev *hdev) } /* bring up the nic client again */ - hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); + ret = hclgevf_notify_client(hdev, HNAE3_INIT_CLIENT); + if (ret) + return ret; return 0; } +static int hclgevf_reset_prepare_wait(struct hclgevf_dev *hdev) +{ + int ret = 0; + + switch (hdev->reset_type) { + case HNAE3_VF_FUNC_RESET: + ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, + 0, true, NULL, sizeof(u8)); + break; + case HNAE3_FLR_RESET: + set_bit(HNAE3_FLR_DOWN, &hdev->flr_state); + break; + default: + break; + } + + set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); + + dev_info(&hdev->pdev->dev, "prepare reset(%d) wait done, ret:%d\n", + hdev->reset_type, ret); + + return ret; +} + static int hclgevf_reset(struct hclgevf_dev *hdev) { + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); int ret; + /* Initialize ae_dev reset status as well, in case enet layer wants to + * know if device is undergoing reset + */ + ae_dev->reset_type = hdev->reset_type; + hdev->reset_count++; rtnl_lock(); /* bring down the nic to stop any ongoing TX/RX */ - hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); + ret = hclgevf_notify_client(hdev, HNAE3_DOWN_CLIENT); + if (ret) + goto err_reset_lock; rtnl_unlock(); + ret = hclgevf_reset_prepare_wait(hdev); + if (ret) + goto err_reset; + /* check if VF could successfully fetch the hardware reset completion * status from the hardware */ @@ -1181,58 +1323,121 @@ static int hclgevf_reset(struct hclgevf_dev *hdev) dev_err(&hdev->pdev->dev, "VF failed(=%d) to fetch H/W reset completion status\n", ret); - - dev_warn(&hdev->pdev->dev, "VF reset failed, disabling VF!\n"); - rtnl_lock(); - hclgevf_notify_client(hdev, HNAE3_UNINIT_CLIENT); - - rtnl_unlock(); - return ret; + goto err_reset; } rtnl_lock(); /* now, re-initialize the nic client and ae device*/ ret = hclgevf_reset_stack(hdev); - if (ret) + if (ret) { dev_err(&hdev->pdev->dev, "failed to reset VF stack\n"); + goto err_reset_lock; + } /* bring up the nic to enable TX/RX again */ - hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); + ret = hclgevf_notify_client(hdev, HNAE3_UP_CLIENT); + if (ret) + goto err_reset_lock; rtnl_unlock(); + hdev->last_reset_time = jiffies; + ae_dev->reset_type = HNAE3_NONE_RESET; + return ret; -} +err_reset_lock: + rtnl_unlock(); +err_reset: + /* When VF reset failed, only the higher level reset asserted by PF + * can restore it, so re-initialize the command queue to receive + * this higher reset event. + */ + hclgevf_cmd_init(hdev); + dev_err(&hdev->pdev->dev, "failed to reset VF\n"); -static int hclgevf_do_reset(struct hclgevf_dev *hdev) -{ - int status; - u8 respmsg; + return ret; +} - status = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_RESET, 0, NULL, - 0, false, &respmsg, sizeof(u8)); - if (status) - dev_err(&hdev->pdev->dev, - "VF reset request to PF failed(=%d)\n", status); +static enum hnae3_reset_type hclgevf_get_reset_level(struct hclgevf_dev *hdev, + unsigned long *addr) +{ + enum hnae3_reset_type rst_level = HNAE3_NONE_RESET; + + /* return the highest priority reset level amongst all */ + if (test_bit(HNAE3_VF_RESET, addr)) { + rst_level = HNAE3_VF_RESET; + clear_bit(HNAE3_VF_RESET, addr); + clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); + clear_bit(HNAE3_VF_FUNC_RESET, addr); + } else if (test_bit(HNAE3_VF_FULL_RESET, addr)) { + rst_level = HNAE3_VF_FULL_RESET; + clear_bit(HNAE3_VF_FULL_RESET, addr); + clear_bit(HNAE3_VF_FUNC_RESET, addr); + } else if (test_bit(HNAE3_VF_PF_FUNC_RESET, addr)) { + rst_level = HNAE3_VF_PF_FUNC_RESET; + clear_bit(HNAE3_VF_PF_FUNC_RESET, addr); + clear_bit(HNAE3_VF_FUNC_RESET, addr); + } else if (test_bit(HNAE3_VF_FUNC_RESET, addr)) { + rst_level = HNAE3_VF_FUNC_RESET; + clear_bit(HNAE3_VF_FUNC_RESET, addr); + } else if (test_bit(HNAE3_FLR_RESET, addr)) { + rst_level = HNAE3_FLR_RESET; + clear_bit(HNAE3_FLR_RESET, addr); + } - return status; + return rst_level; } static void hclgevf_reset_event(struct pci_dev *pdev, struct hnae3_handle *handle) { - struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev); + struct hclgevf_dev *hdev = ae_dev->priv; dev_info(&hdev->pdev->dev, "received reset request from VF enet\n"); - handle->reset_level = HNAE3_VF_RESET; + if (hdev->default_reset_request) + hdev->reset_level = + hclgevf_get_reset_level(hdev, + &hdev->default_reset_request); + else + hdev->reset_level = HNAE3_VF_FUNC_RESET; /* reset of this VF requested */ set_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state); hclgevf_reset_task_schedule(hdev); - handle->last_reset_time = jiffies; + hdev->last_reset_time = jiffies; +} + +static void hclgevf_set_def_reset_request(struct hnae3_ae_dev *ae_dev, + enum hnae3_reset_type rst_type) +{ + struct hclgevf_dev *hdev = ae_dev->priv; + + set_bit(rst_type, &hdev->default_reset_request); +} + +static void hclgevf_flr_prepare(struct hnae3_ae_dev *ae_dev) +{ +#define HCLGEVF_FLR_WAIT_MS 100 +#define HCLGEVF_FLR_WAIT_CNT 50 + struct hclgevf_dev *hdev = ae_dev->priv; + int cnt = 0; + + clear_bit(HNAE3_FLR_DOWN, &hdev->flr_state); + clear_bit(HNAE3_FLR_DONE, &hdev->flr_state); + set_bit(HNAE3_FLR_RESET, &hdev->default_reset_request); + hclgevf_reset_event(hdev->pdev, NULL); + + while (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state) && + cnt++ < HCLGEVF_FLR_WAIT_CNT) + msleep(HCLGEVF_FLR_WAIT_MS); + + if (!test_bit(HNAE3_FLR_DOWN, &hdev->flr_state)) + dev_err(&hdev->pdev->dev, + "flr wait down timeout: %d\n", cnt); } static u32 hclgevf_get_fw_version(struct hnae3_handle *handle) @@ -1321,9 +1526,15 @@ static void hclgevf_reset_service_task(struct work_struct *work) */ hdev->reset_attempts = 0; - ret = hclgevf_reset(hdev); - if (ret) - dev_err(&hdev->pdev->dev, "VF stack reset failed.\n"); + hdev->last_reset_time = jiffies; + while ((hdev->reset_type = + hclgevf_get_reset_level(hdev, &hdev->reset_pending)) + != HNAE3_NONE_RESET) { + ret = hclgevf_reset(hdev); + if (ret) + dev_err(&hdev->pdev->dev, + "VF stack reset failed %d.\n", ret); + } } else if (test_and_clear_bit(HCLGEVF_RESET_REQUESTED, &hdev->reset_state)) { /* we could be here when either of below happens: @@ -1352,19 +1563,17 @@ static void hclgevf_reset_service_task(struct work_struct *work) */ if (hdev->reset_attempts > 3) { /* prepare for full reset of stack + pcie interface */ - hdev->nic.reset_level = HNAE3_VF_FULL_RESET; + set_bit(HNAE3_VF_FULL_RESET, &hdev->reset_pending); /* "defer" schedule the reset task again */ set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); } else { hdev->reset_attempts++; - /* request PF for resetting this VF via mailbox */ - ret = hclgevf_do_reset(hdev); - if (ret) - dev_warn(&hdev->pdev->dev, - "VF rst fail, stack will call\n"); + set_bit(hdev->reset_level, &hdev->reset_pending); + set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); } + hclgevf_reset_task_schedule(hdev); } clear_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); @@ -1386,6 +1595,28 @@ static void hclgevf_mailbox_service_task(struct work_struct *work) clear_bit(HCLGEVF_STATE_MBX_HANDLING, &hdev->state); } +static void hclgevf_keep_alive_timer(struct timer_list *t) +{ + struct hclgevf_dev *hdev = from_timer(hdev, t, keep_alive_timer); + + schedule_work(&hdev->keep_alive_task); + mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); +} + +static void hclgevf_keep_alive_task(struct work_struct *work) +{ + struct hclgevf_dev *hdev; + u8 respmsg; + int ret; + + hdev = container_of(work, struct hclgevf_dev, keep_alive_task); + ret = hclgevf_send_mbx_msg(hdev, HCLGE_MBX_KEEP_ALIVE, 0, NULL, + 0, false, &respmsg, sizeof(u8)); + if (ret) + dev_err(&hdev->pdev->dev, + "VF sends keep alive cmd failed(=%d)\n", ret); +} + static void hclgevf_service_task(struct work_struct *work) { struct hclgevf_dev *hdev; @@ -1407,24 +1638,37 @@ static void hclgevf_clear_event_cause(struct hclgevf_dev *hdev, u32 regclr) hclgevf_write_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG, regclr); } -static bool hclgevf_check_event_cause(struct hclgevf_dev *hdev, u32 *clearval) +static enum hclgevf_evt_cause hclgevf_check_evt_cause(struct hclgevf_dev *hdev, + u32 *clearval) { - u32 cmdq_src_reg; + u32 cmdq_src_reg, rst_ing_reg; /* fetch the events from their corresponding regs */ cmdq_src_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_VECTOR0_CMDQ_SRC_REG); + if (BIT(HCLGEVF_VECTOR0_RST_INT_B) & cmdq_src_reg) { + rst_ing_reg = hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); + dev_info(&hdev->pdev->dev, + "receive reset interrupt 0x%x!\n", rst_ing_reg); + set_bit(HNAE3_VF_RESET, &hdev->reset_pending); + set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); + set_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state); + cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RST_INT_B); + *clearval = cmdq_src_reg; + return HCLGEVF_VECTOR0_EVENT_RST; + } + /* check for vector0 mailbox(=CMDQ RX) event source */ if (BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B) & cmdq_src_reg) { cmdq_src_reg &= ~BIT(HCLGEVF_VECTOR0_RX_CMDQ_INT_B); *clearval = cmdq_src_reg; - return true; + return HCLGEVF_VECTOR0_EVENT_MBX; } dev_dbg(&hdev->pdev->dev, "vector 0 interrupt from unknown source\n"); - return false; + return HCLGEVF_VECTOR0_EVENT_OTHER; } static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) @@ -1434,19 +1678,28 @@ static void hclgevf_enable_vector(struct hclgevf_misc_vector *vector, bool en) static irqreturn_t hclgevf_misc_irq_handle(int irq, void *data) { + enum hclgevf_evt_cause event_cause; struct hclgevf_dev *hdev = data; u32 clearval; hclgevf_enable_vector(&hdev->misc_vector, false); - if (!hclgevf_check_event_cause(hdev, &clearval)) - goto skip_sched; - - hclgevf_mbx_handler(hdev); + event_cause = hclgevf_check_evt_cause(hdev, &clearval); - hclgevf_clear_event_cause(hdev, clearval); + switch (event_cause) { + case HCLGEVF_VECTOR0_EVENT_RST: + hclgevf_reset_task_schedule(hdev); + break; + case HCLGEVF_VECTOR0_EVENT_MBX: + hclgevf_mbx_handler(hdev); + break; + default: + break; + } -skip_sched: - hclgevf_enable_vector(&hdev->misc_vector, true); + if (event_cause != HCLGEVF_VECTOR0_EVENT_OTHER) { + hclgevf_clear_event_cause(hdev, clearval); + hclgevf_enable_vector(&hdev->misc_vector, true); + } return IRQ_HANDLED; } @@ -1468,7 +1721,7 @@ static int hclgevf_configure(struct hclgevf_dev *hdev) static int hclgevf_alloc_hdev(struct hnae3_ae_dev *ae_dev) { struct pci_dev *pdev = ae_dev->pdev; - struct hclgevf_dev *hdev = ae_dev->priv; + struct hclgevf_dev *hdev; hdev = devm_kzalloc(&pdev->dev, sizeof(*hdev), GFP_KERNEL); if (!hdev) @@ -1504,6 +1757,29 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev) return 0; } +static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en) +{ + struct hclgevf_cfg_gro_status_cmd *req; + struct hclgevf_desc desc; + int ret; + + if (!hnae3_dev_gro_supported(hdev)) + return 0; + + hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG, + false); + req = (struct hclgevf_cfg_gro_status_cmd *)desc.data; + + req->gro_en = cpu_to_le16(en ? 1 : 0); + + ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); + if (ret) + dev_err(&hdev->pdev->dev, + "VF GRO hardware config cmd failed, ret = %d.\n", ret); + + return ret; +} + static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) { struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; @@ -1564,23 +1840,22 @@ static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) false); } -static int hclgevf_ae_start(struct hnae3_handle *handle) +static void hclgevf_set_timer_task(struct hnae3_handle *handle, bool enable) { - struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - int i, queue_id; - for (i = 0; i < kinfo->num_tqps; i++) { - /* ring enable */ - queue_id = hclgevf_get_queue_id(kinfo->tqp[i]); - if (queue_id < 0) { - dev_warn(&hdev->pdev->dev, - "Get invalid queue id, ignore it\n"); - continue; - } - - hclgevf_tqp_enable(hdev, queue_id, 0, true); + if (enable) { + mod_timer(&hdev->service_timer, jiffies + HZ); + } else { + del_timer_sync(&hdev->service_timer); + cancel_work_sync(&hdev->service_task); + clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); } +} + +static int hclgevf_ae_start(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); /* reset tqp stats */ hclgevf_reset_tqp_stats(handle); @@ -1588,45 +1863,59 @@ static int hclgevf_ae_start(struct hnae3_handle *handle) hclgevf_request_link_info(hdev); clear_bit(HCLGEVF_STATE_DOWN, &hdev->state); - mod_timer(&hdev->service_timer, jiffies + HZ); return 0; } static void hclgevf_ae_stop(struct hnae3_handle *handle) { - struct hnae3_knic_private_info *kinfo = &handle->kinfo; struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); - int i, queue_id; + int i; set_bit(HCLGEVF_STATE_DOWN, &hdev->state); - for (i = 0; i < kinfo->num_tqps; i++) { - /* Ring disable */ - queue_id = hclgevf_get_queue_id(kinfo->tqp[i]); - if (queue_id < 0) { - dev_warn(&hdev->pdev->dev, - "Get invalid queue id, ignore it\n"); - continue; - } - - hclgevf_tqp_enable(hdev, queue_id, 0, false); - } + for (i = 0; i < handle->kinfo.num_tqps; i++) + hclgevf_reset_tqp(handle, i); /* reset tqp stats */ hclgevf_reset_tqp_stats(handle); - del_timer_sync(&hdev->service_timer); - cancel_work_sync(&hdev->service_task); - clear_bit(HCLGEVF_STATE_SERVICE_SCHED, &hdev->state); hclgevf_update_link_status(hdev, 0); } -static void hclgevf_state_init(struct hclgevf_dev *hdev) +static int hclgevf_set_alive(struct hnae3_handle *handle, bool alive) { - /* if this is on going reset then skip this initialization */ - if (hclgevf_dev_ongoing_reset(hdev)) - return; + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + u8 msg_data; + + msg_data = alive ? 1 : 0; + return hclgevf_send_mbx_msg(hdev, HCLGE_MBX_SET_ALIVE, + 0, &msg_data, 1, false, NULL, 0); +} + +static int hclgevf_client_start(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + mod_timer(&hdev->keep_alive_timer, jiffies + 2 * HZ); + return hclgevf_set_alive(handle, true); +} + +static void hclgevf_client_stop(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int ret; + + ret = hclgevf_set_alive(handle, false); + if (ret) + dev_warn(&hdev->pdev->dev, + "%s failed %d\n", __func__, ret); + del_timer_sync(&hdev->keep_alive_timer); + cancel_work_sync(&hdev->keep_alive_task); +} + +static void hclgevf_state_init(struct hclgevf_dev *hdev) +{ /* setup tasks for the MBX */ INIT_WORK(&hdev->mbx_service_task, hclgevf_mailbox_service_task); clear_bit(HCLGEVF_STATE_MBX_SERVICE_SCHED, &hdev->state); @@ -1668,10 +1957,6 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev) int vectors; int i; - /* if this is on going reset then skip this initialization */ - if (hclgevf_dev_ongoing_reset(hdev)) - return 0; - if (hnae3_get_bit(hdev->ae_dev->flag, HNAE3_DEV_SUPPORT_ROCE_B)) vectors = pci_alloc_irq_vectors(pdev, hdev->roce_base_msix_offset + 1, @@ -1710,6 +1995,7 @@ static int hclgevf_init_msi(struct hclgevf_dev *hdev) hdev->vector_irq = devm_kcalloc(&pdev->dev, hdev->num_msi, sizeof(int), GFP_KERNEL); if (!hdev->vector_irq) { + devm_kfree(&pdev->dev, hdev->vector_status); pci_free_irq_vectors(pdev); return -ENOMEM; } @@ -1721,6 +2007,8 @@ static void hclgevf_uninit_msi(struct hclgevf_dev *hdev) { struct pci_dev *pdev = hdev->pdev; + devm_kfree(&pdev->dev, hdev->vector_status); + devm_kfree(&pdev->dev, hdev->vector_irq); pci_free_irq_vectors(pdev); } @@ -1728,10 +2016,6 @@ static int hclgevf_misc_irq_init(struct hclgevf_dev *hdev) { int ret = 0; - /* if this is on going reset then skip this initialization */ - if (hclgevf_dev_ongoing_reset(hdev)) - return 0; - hclgevf_get_misc_vector(hdev); ret = request_irq(hdev->misc_vector.vector_irq, hclgevf_misc_irq_handle, @@ -1861,14 +2145,6 @@ static int hclgevf_pci_init(struct hclgevf_dev *hdev) struct hclgevf_hw *hw; int ret; - /* check if we need to skip initialization of pci. This will happen if - * device is undergoing VF reset. Otherwise, we would need to - * re-initialize pci interface again i.e. when device is not going - * through *any* reset or actually undergoing full reset. - */ - if (hclgevf_dev_ongoing_reset(hdev)) - return 0; - ret = pci_enable_device(pdev); if (ret) { dev_err(&pdev->dev, "failed to enable PCI device\n"); @@ -1957,23 +2233,98 @@ static int hclgevf_query_vf_resource(struct hclgevf_dev *hdev) return 0; } -static int hclgevf_init_hdev(struct hclgevf_dev *hdev) +static int hclgevf_pci_reset(struct hclgevf_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + int ret = 0; + + if (hdev->reset_type == HNAE3_VF_FULL_RESET && + test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { + hclgevf_misc_irq_uninit(hdev); + hclgevf_uninit_msi(hdev); + clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); + } + + if (!test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { + pci_set_master(pdev); + ret = hclgevf_init_msi(hdev); + if (ret) { + dev_err(&pdev->dev, + "failed(%d) to init MSI/MSI-X\n", ret); + return ret; + } + + ret = hclgevf_misc_irq_init(hdev); + if (ret) { + hclgevf_uninit_msi(hdev); + dev_err(&pdev->dev, "failed(%d) to init Misc IRQ(vector0)\n", + ret); + return ret; + } + + set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); + } + + return ret; +} + +static int hclgevf_reset_hdev(struct hclgevf_dev *hdev) { struct pci_dev *pdev = hdev->pdev; int ret; - /* check if device is on-going full reset(i.e. pcie as well) */ - if (hclgevf_dev_ongoing_full_reset(hdev)) { - dev_warn(&pdev->dev, "device is going full reset\n"); - hclgevf_uninit_hdev(hdev); + ret = hclgevf_pci_reset(hdev); + if (ret) { + dev_err(&pdev->dev, "pci reset failed %d\n", ret); + return ret; + } + + ret = hclgevf_cmd_init(hdev); + if (ret) { + dev_err(&pdev->dev, "cmd failed %d\n", ret); + return ret; } + ret = hclgevf_rss_init_hw(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed(%d) to initialize RSS\n", ret); + return ret; + } + + ret = hclgevf_config_gro(hdev, true); + if (ret) + return ret; + + ret = hclgevf_init_vlan_config(hdev); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed(%d) to initialize VLAN config\n", ret); + return ret; + } + + dev_info(&hdev->pdev->dev, "Reset done\n"); + + return 0; +} + +static int hclgevf_init_hdev(struct hclgevf_dev *hdev) +{ + struct pci_dev *pdev = hdev->pdev; + int ret; + ret = hclgevf_pci_init(hdev); if (ret) { dev_err(&pdev->dev, "PCI initialization failed\n"); return ret; } + ret = hclgevf_cmd_queue_init(hdev); + if (ret) { + dev_err(&pdev->dev, "Cmd queue init failed: %d\n", ret); + goto err_cmd_queue_init; + } + ret = hclgevf_cmd_init(hdev); if (ret) goto err_cmd_init; @@ -1983,16 +2334,17 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) if (ret) { dev_err(&hdev->pdev->dev, "Query vf status error, ret = %d.\n", ret); - goto err_query_vf; + goto err_cmd_init; } ret = hclgevf_init_msi(hdev); if (ret) { dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); - goto err_query_vf; + goto err_cmd_init; } hclgevf_state_init(hdev); + hdev->reset_level = HNAE3_VF_FUNC_RESET; ret = hclgevf_misc_irq_init(hdev); if (ret) { @@ -2001,6 +2353,8 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) goto err_misc_irq_init; } + set_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); + ret = hclgevf_configure(hdev); if (ret) { dev_err(&pdev->dev, "failed(%d) to fetch configuration\n", ret); @@ -2019,6 +2373,10 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) goto err_config; } + ret = hclgevf_config_gro(hdev, true); + if (ret) + goto err_config; + /* Initialize RSS for this VF */ ret = hclgevf_rss_init_hw(hdev); if (ret) { @@ -2034,6 +2392,7 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) goto err_config; } + hdev->last_reset_time = jiffies; pr_info("finished initializing %s driver\n", HCLGEVF_DRIVER_NAME); return 0; @@ -2043,25 +2402,31 @@ err_config: err_misc_irq_init: hclgevf_state_uninit(hdev); hclgevf_uninit_msi(hdev); -err_query_vf: - hclgevf_cmd_uninit(hdev); err_cmd_init: + hclgevf_cmd_uninit(hdev); +err_cmd_queue_init: hclgevf_pci_uninit(hdev); + clear_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state); return ret; } static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev) { hclgevf_state_uninit(hdev); - hclgevf_misc_irq_uninit(hdev); - hclgevf_cmd_uninit(hdev); - hclgevf_uninit_msi(hdev); + + if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) { + hclgevf_misc_irq_uninit(hdev); + hclgevf_uninit_msi(hdev); + } + hclgevf_pci_uninit(hdev); + hclgevf_cmd_uninit(hdev); } static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) { struct pci_dev *pdev = ae_dev->pdev; + struct hclgevf_dev *hdev; int ret; ret = hclgevf_alloc_hdev(ae_dev); @@ -2071,10 +2436,16 @@ static int hclgevf_init_ae_dev(struct hnae3_ae_dev *ae_dev) } ret = hclgevf_init_hdev(ae_dev->priv); - if (ret) + if (ret) { dev_err(&pdev->dev, "hclge device initialization failed\n"); + return ret; + } - return ret; + hdev = ae_dev->priv; + timer_setup(&hdev->keep_alive_timer, hclgevf_keep_alive_timer, 0); + INIT_WORK(&hdev->keep_alive_task, hclgevf_keep_alive_task); + + return 0; } static void hclgevf_uninit_ae_dev(struct hnae3_ae_dev *ae_dev) @@ -2151,6 +2522,13 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed, hdev->hw.mac.duplex = duplex; } +static int hclgevf_gro_en(struct hnae3_handle *handle, int enable) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + return hclgevf_config_gro(hdev, enable); +} + static void hclgevf_get_media_type(struct hnae3_handle *handle, u8 *media_type) { @@ -2159,13 +2537,104 @@ static void hclgevf_get_media_type(struct hnae3_handle *handle, *media_type = hdev->hw.mac.media_type; } +static bool hclgevf_get_hw_reset_stat(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + return !!hclgevf_read_dev(&hdev->hw, HCLGEVF_RST_ING); +} + +static bool hclgevf_ae_dev_resetting(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + return test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state); +} + +static unsigned long hclgevf_ae_dev_reset_cnt(struct hnae3_handle *handle) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + return hdev->reset_count; +} + +#define MAX_SEPARATE_NUM 4 +#define SEPARATOR_VALUE 0xFFFFFFFF +#define REG_NUM_PER_LINE 4 +#define REG_LEN_PER_LINE (REG_NUM_PER_LINE * sizeof(u32)) + +static int hclgevf_get_regs_len(struct hnae3_handle *handle) +{ + int cmdq_lines, common_lines, ring_lines, tqp_intr_lines; + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + + cmdq_lines = sizeof(cmdq_reg_addr_list) / REG_LEN_PER_LINE + 1; + common_lines = sizeof(common_reg_addr_list) / REG_LEN_PER_LINE + 1; + ring_lines = sizeof(ring_reg_addr_list) / REG_LEN_PER_LINE + 1; + tqp_intr_lines = sizeof(tqp_intr_reg_addr_list) / REG_LEN_PER_LINE + 1; + + return (cmdq_lines + common_lines + ring_lines * hdev->num_tqps + + tqp_intr_lines * (hdev->num_msi_used - 1)) * REG_LEN_PER_LINE; +} + +static void hclgevf_get_regs(struct hnae3_handle *handle, u32 *version, + void *data) +{ + struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); + int i, j, reg_um, separator_num; + u32 *reg = data; + + *version = hdev->fw_version; + + /* fetching per-VF registers values from VF PCIe register space */ + reg_um = sizeof(cmdq_reg_addr_list) / sizeof(u32); + separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; + for (i = 0; i < reg_um; i++) + *reg++ = hclgevf_read_dev(&hdev->hw, cmdq_reg_addr_list[i]); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + + reg_um = sizeof(common_reg_addr_list) / sizeof(u32); + separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; + for (i = 0; i < reg_um; i++) + *reg++ = hclgevf_read_dev(&hdev->hw, common_reg_addr_list[i]); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + + reg_um = sizeof(ring_reg_addr_list) / sizeof(u32); + separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; + for (j = 0; j < hdev->num_tqps; j++) { + for (i = 0; i < reg_um; i++) + *reg++ = hclgevf_read_dev(&hdev->hw, + ring_reg_addr_list[i] + + 0x200 * j); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + } + + reg_um = sizeof(tqp_intr_reg_addr_list) / sizeof(u32); + separator_num = MAX_SEPARATE_NUM - reg_um % REG_NUM_PER_LINE; + for (j = 0; j < hdev->num_msi_used - 1; j++) { + for (i = 0; i < reg_um; i++) + *reg++ = hclgevf_read_dev(&hdev->hw, + tqp_intr_reg_addr_list[i] + + 4 * j); + for (i = 0; i < separator_num; i++) + *reg++ = SEPARATOR_VALUE; + } +} + static const struct hnae3_ae_ops hclgevf_ops = { .init_ae_dev = hclgevf_init_ae_dev, .uninit_ae_dev = hclgevf_uninit_ae_dev, + .flr_prepare = hclgevf_flr_prepare, + .flr_done = hclgevf_flr_done, .init_client_instance = hclgevf_init_client_instance, .uninit_client_instance = hclgevf_uninit_client_instance, .start = hclgevf_ae_start, .stop = hclgevf_ae_stop, + .client_start = hclgevf_client_start, + .client_stop = hclgevf_client_stop, .map_ring_to_vector = hclgevf_map_ring_to_vector, .unmap_ring_from_vector = hclgevf_unmap_ring_from_vector, .get_vector = hclgevf_get_vector, @@ -2193,11 +2662,21 @@ static const struct hnae3_ae_ops hclgevf_ops = { .set_vlan_filter = hclgevf_set_vlan_filter, .enable_hw_strip_rxvtag = hclgevf_en_hw_strip_rxvtag, .reset_event = hclgevf_reset_event, + .set_default_reset_request = hclgevf_set_def_reset_request, .get_channels = hclgevf_get_channels, .get_tqps_and_rss_info = hclgevf_get_tqps_and_rss_info, + .get_regs_len = hclgevf_get_regs_len, + .get_regs = hclgevf_get_regs, .get_status = hclgevf_get_status, .get_ksettings_an_result = hclgevf_get_ksettings_an_result, .get_media_type = hclgevf_get_media_type, + .get_hw_reset_stat = hclgevf_get_hw_reset_stat, + .ae_dev_resetting = hclgevf_ae_dev_resetting, + .ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt, + .set_gro_en = hclgevf_gro_en, + .set_mtu = hclgevf_set_mtu, + .get_global_queue_id = hclgevf_get_qid_global, + .set_timer_task = hclgevf_set_timer_task, }; static struct hnae3_ae_algo ae_algovf = { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h index aed241e8ffab..787bc06944e5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.h @@ -27,15 +27,77 @@ #define HCLGEVF_VECTOR_REG_OFFSET 0x4 #define HCLGEVF_VECTOR_VF_OFFSET 0x100000 +/* bar registers for cmdq */ +#define HCLGEVF_CMDQ_TX_ADDR_L_REG 0x27000 +#define HCLGEVF_CMDQ_TX_ADDR_H_REG 0x27004 +#define HCLGEVF_CMDQ_TX_DEPTH_REG 0x27008 +#define HCLGEVF_CMDQ_TX_TAIL_REG 0x27010 +#define HCLGEVF_CMDQ_TX_HEAD_REG 0x27014 +#define HCLGEVF_CMDQ_RX_ADDR_L_REG 0x27018 +#define HCLGEVF_CMDQ_RX_ADDR_H_REG 0x2701C +#define HCLGEVF_CMDQ_RX_DEPTH_REG 0x27020 +#define HCLGEVF_CMDQ_RX_TAIL_REG 0x27024 +#define HCLGEVF_CMDQ_RX_HEAD_REG 0x27028 +#define HCLGEVF_CMDQ_INTR_SRC_REG 0x27100 +#define HCLGEVF_CMDQ_INTR_STS_REG 0x27104 +#define HCLGEVF_CMDQ_INTR_EN_REG 0x27108 +#define HCLGEVF_CMDQ_INTR_GEN_REG 0x2710C + +/* bar registers for common func */ +#define HCLGEVF_GRO_EN_REG 0x28000 + +/* bar registers for rcb */ +#define HCLGEVF_RING_RX_ADDR_L_REG 0x80000 +#define HCLGEVF_RING_RX_ADDR_H_REG 0x80004 +#define HCLGEVF_RING_RX_BD_NUM_REG 0x80008 +#define HCLGEVF_RING_RX_BD_LENGTH_REG 0x8000C +#define HCLGEVF_RING_RX_MERGE_EN_REG 0x80014 +#define HCLGEVF_RING_RX_TAIL_REG 0x80018 +#define HCLGEVF_RING_RX_HEAD_REG 0x8001C +#define HCLGEVF_RING_RX_FBD_NUM_REG 0x80020 +#define HCLGEVF_RING_RX_OFFSET_REG 0x80024 +#define HCLGEVF_RING_RX_FBD_OFFSET_REG 0x80028 +#define HCLGEVF_RING_RX_STASH_REG 0x80030 +#define HCLGEVF_RING_RX_BD_ERR_REG 0x80034 +#define HCLGEVF_RING_TX_ADDR_L_REG 0x80040 +#define HCLGEVF_RING_TX_ADDR_H_REG 0x80044 +#define HCLGEVF_RING_TX_BD_NUM_REG 0x80048 +#define HCLGEVF_RING_TX_PRIORITY_REG 0x8004C +#define HCLGEVF_RING_TX_TC_REG 0x80050 +#define HCLGEVF_RING_TX_MERGE_EN_REG 0x80054 +#define HCLGEVF_RING_TX_TAIL_REG 0x80058 +#define HCLGEVF_RING_TX_HEAD_REG 0x8005C +#define HCLGEVF_RING_TX_FBD_NUM_REG 0x80060 +#define HCLGEVF_RING_TX_OFFSET_REG 0x80064 +#define HCLGEVF_RING_TX_EBD_NUM_REG 0x80068 +#define HCLGEVF_RING_TX_EBD_OFFSET_REG 0x80070 +#define HCLGEVF_RING_TX_BD_ERR_REG 0x80074 +#define HCLGEVF_RING_EN_REG 0x80090 + +/* bar registers for tqp interrupt */ +#define HCLGEVF_TQP_INTR_CTRL_REG 0x20000 +#define HCLGEVF_TQP_INTR_GL0_REG 0x20100 +#define HCLGEVF_TQP_INTR_GL1_REG 0x20200 +#define HCLGEVF_TQP_INTR_GL2_REG 0x20300 +#define HCLGEVF_TQP_INTR_RL_REG 0x20900 + /* Vector0 interrupt CMDQ event source register(RW) */ #define HCLGEVF_VECTOR0_CMDQ_SRC_REG 0x27100 /* CMDQ register bits for RX event(=MBX event) */ #define HCLGEVF_VECTOR0_RX_CMDQ_INT_B 1 +/* RST register bits for RESET event */ +#define HCLGEVF_VECTOR0_RST_INT_B 2 #define HCLGEVF_TQP_RESET_TRY_TIMES 10 /* Reset related Registers */ -#define HCLGEVF_FUN_RST_ING 0x20C00 -#define HCLGEVF_FUN_RST_ING_B 0 +#define HCLGEVF_RST_ING 0x20C00 +#define HCLGEVF_FUN_RST_ING_BIT BIT(0) +#define HCLGEVF_GLOBAL_RST_ING_BIT BIT(5) +#define HCLGEVF_CORE_RST_ING_BIT BIT(6) +#define HCLGEVF_IMP_RST_ING_BIT BIT(7) +#define HCLGEVF_RST_ING_BITS \ + (HCLGEVF_FUN_RST_ING_BIT | HCLGEVF_GLOBAL_RST_ING_BIT | \ + HCLGEVF_CORE_RST_ING_BIT | HCLGEVF_IMP_RST_ING_BIT) #define HCLGEVF_RSS_IND_TBL_SIZE 512 #define HCLGEVF_RSS_SET_BITMAP_MSK 0xffff @@ -54,17 +116,25 @@ #define HCLGEVF_S_IP_BIT BIT(3) #define HCLGEVF_V_TAG_BIT BIT(4) +enum hclgevf_evt_cause { + HCLGEVF_VECTOR0_EVENT_RST, + HCLGEVF_VECTOR0_EVENT_MBX, + HCLGEVF_VECTOR0_EVENT_OTHER, +}; + /* states of hclgevf device & tasks */ enum hclgevf_states { /* device states */ HCLGEVF_STATE_DOWN, HCLGEVF_STATE_DISABLED, + HCLGEVF_STATE_IRQ_INITED, /* task states */ HCLGEVF_STATE_SERVICE_SCHED, HCLGEVF_STATE_RST_SERVICE_SCHED, HCLGEVF_STATE_RST_HANDLING, HCLGEVF_STATE_MBX_SERVICE_SCHED, HCLGEVF_STATE_MBX_HANDLING, + HCLGEVF_STATE_CMD_DISABLE, }; #define HCLGEVF_MPF_ENBALE 1 @@ -145,10 +215,17 @@ struct hclgevf_dev { struct hclgevf_misc_vector misc_vector; struct hclgevf_rss_cfg rss_cfg; unsigned long state; + unsigned long flr_state; + unsigned long default_reset_request; + unsigned long last_reset_time; + enum hnae3_reset_type reset_level; + unsigned long reset_pending; + enum hnae3_reset_type reset_type; #define HCLGEVF_RESET_REQUESTED 0 #define HCLGEVF_RESET_PENDING 1 unsigned long reset_state; /* requested, pending */ + unsigned long reset_count; /* the number of reset has been done */ u32 reset_attempts; u32 fw_version; @@ -178,7 +255,9 @@ struct hclgevf_dev { struct hclgevf_mbx_arq_ring arq; /* mailbox async rx queue */ struct timer_list service_timer; + struct timer_list keep_alive_timer; struct work_struct service_task; + struct work_struct keep_alive_task; struct work_struct rst_service_task; struct work_struct mbx_service_task; @@ -192,18 +271,9 @@ struct hclgevf_dev { u32 flag; }; -static inline bool hclgevf_dev_ongoing_reset(struct hclgevf_dev *hdev) -{ - return (hdev && - (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) && - (hdev->nic.reset_level == HNAE3_VF_RESET)); -} - -static inline bool hclgevf_dev_ongoing_full_reset(struct hclgevf_dev *hdev) +static inline bool hclgevf_is_reset_pending(struct hclgevf_dev *hdev) { - return (hdev && - (test_bit(HCLGEVF_STATE_RST_HANDLING, &hdev->state)) && - (hdev->nic.reset_level == HNAE3_VF_FULL_RESET)); + return !!hdev->reset_pending; } int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, u16 code, u16 subcode, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index e9d5a4f96304..84653f58b2d1 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -26,7 +26,7 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1, u8 *resp_data, u16 resp_len) { #define HCLGEVF_MAX_TRY_TIMES 500 -#define HCLGEVF_SLEEP_USCOEND 1000 +#define HCLGEVF_SLEEP_USECOND 1000 struct hclgevf_mbx_resp_status *mbx_resp; u16 r_code0, r_code1; int i = 0; @@ -40,7 +40,10 @@ static int hclgevf_get_mbx_resp(struct hclgevf_dev *hdev, u16 code0, u16 code1, } while ((!hdev->mbx_resp.received_resp) && (i < HCLGEVF_MAX_TRY_TIMES)) { - udelay(HCLGEVF_SLEEP_USCOEND); + if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) + return -EIO; + + usleep_range(HCLGEVF_SLEEP_USECOND, HCLGEVF_SLEEP_USECOND * 2); i++; } @@ -148,6 +151,11 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) crq = &hdev->hw.cmq.crq; while (!hclgevf_cmd_crq_empty(&hdev->hw)) { + if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) { + dev_info(&hdev->pdev->dev, "vf crq need init\n"); + return; + } + desc = &crq->desc[crq->next_to_use]; req = (struct hclge_mbx_pf_to_vf_cmd *)desc->data; @@ -233,6 +241,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) { + enum hnae3_reset_type reset_type; u16 link_status; u16 *msg_q; u8 duplex; @@ -248,6 +257,12 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) /* process all the async queue messages */ while (tail != hdev->arq.head) { + if (test_bit(HCLGEVF_STATE_CMD_DISABLE, &hdev->state)) { + dev_info(&hdev->pdev->dev, + "vf crq need init in async\n"); + return; + } + msg_q = hdev->arq.msg_q[hdev->arq.head]; switch (msg_q[0]) { @@ -267,7 +282,8 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) * has been completely reset. After this stack should * eventually be re-initialized. */ - hdev->nic.reset_level = HNAE3_VF_RESET; + reset_type = le16_to_cpu(msg_q[1]); + set_bit(reset_type, &hdev->reset_pending); set_bit(HCLGEVF_RESET_PENDING, &hdev->reset_state); hclgevf_reset_task_schedule(hdev); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h index 097b5502603f..d1a7d2522d82 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h @@ -50,6 +50,8 @@ enum hinic_port_cmd { HINIC_PORT_CMD_GET_LINK_STATE = 24, + HINIC_PORT_CMD_SET_RX_CSUM = 26, + HINIC_PORT_CMD_SET_PORT_STATE = 41, HINIC_PORT_CMD_FWCTXT_INIT = 69, diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c index f92f1bf3901a..1dfa7eb05c10 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c @@ -74,12 +74,6 @@ ((void *)((cmdq_pages)->shadow_page_vaddr) \ + (wq)->block_idx * CMDQ_BLOCK_SIZE) -#define WQE_PAGE_OFF(wq, idx) (((idx) & ((wq)->num_wqebbs_per_page - 1)) * \ - (wq)->wqebb_size) - -#define WQE_PAGE_NUM(wq, idx) (((idx) / ((wq)->num_wqebbs_per_page)) \ - & ((wq)->num_q_pages - 1)) - #define WQ_PAGE_ADDR(wq, idx) \ ((wq)->shadow_block_vaddr[WQE_PAGE_NUM(wq, idx)]) @@ -93,6 +87,17 @@ (((unsigned long)(wqe) - (unsigned long)(wq)->shadow_wqe) \ / (wq)->max_wqe_size) +static inline int WQE_PAGE_OFF(struct hinic_wq *wq, u16 idx) +{ + return (((idx) & ((wq)->num_wqebbs_per_page - 1)) + << (wq)->wqebb_size_shift); +} + +static inline int WQE_PAGE_NUM(struct hinic_wq *wq, u16 idx) +{ + return (((idx) >> ((wq)->wqebbs_per_page_shift)) + & ((wq)->num_q_pages - 1)); +} /** * queue_alloc_page - allocate page for Queue * @hwif: HW interface for allocating DMA @@ -513,10 +518,11 @@ int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq, struct hinic_hwif *hwif = wqs->hwif; struct pci_dev *pdev = hwif->pdev; u16 num_wqebbs_per_page; + u16 wqebb_size_shift; int err; - if (wqebb_size == 0) { - dev_err(&pdev->dev, "wqebb_size must be > 0\n"); + if (!is_power_of_2(wqebb_size)) { + dev_err(&pdev->dev, "wqebb_size must be power of 2\n"); return -EINVAL; } @@ -530,9 +536,11 @@ int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq, return -EINVAL; } - num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size; + wqebb_size_shift = ilog2(wqebb_size); + num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) + >> wqebb_size_shift; - if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) { + if (!is_power_of_2(num_wqebbs_per_page)) { dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n"); return -EINVAL; } @@ -550,7 +558,8 @@ int hinic_wq_allocate(struct hinic_wqs *wqs, struct hinic_wq *wq, wq->q_depth = q_depth; wq->max_wqe_size = max_wqe_size; wq->num_wqebbs_per_page = num_wqebbs_per_page; - + wq->wqebbs_per_page_shift = ilog2(num_wqebbs_per_page); + wq->wqebb_size_shift = wqebb_size_shift; wq->block_vaddr = WQ_BASE_VADDR(wqs, wq); wq->shadow_block_vaddr = WQ_BASE_ADDR(wqs, wq); wq->block_paddr = WQ_BASE_PADDR(wqs, wq); @@ -604,11 +613,13 @@ int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages, u16 q_depth, u16 max_wqe_size) { struct pci_dev *pdev = hwif->pdev; + u16 num_wqebbs_per_page_shift; u16 num_wqebbs_per_page; + u16 wqebb_size_shift; int i, j, err = -ENOMEM; - if (wqebb_size == 0) { - dev_err(&pdev->dev, "wqebb_size must be > 0\n"); + if (!is_power_of_2(wqebb_size)) { + dev_err(&pdev->dev, "wqebb_size must be power of 2\n"); return -EINVAL; } @@ -622,9 +633,11 @@ int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages, return -EINVAL; } - num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) / wqebb_size; + wqebb_size_shift = ilog2(wqebb_size); + num_wqebbs_per_page = ALIGN(wq_page_size, wqebb_size) + >> wqebb_size_shift; - if (num_wqebbs_per_page & (num_wqebbs_per_page - 1)) { + if (!is_power_of_2(num_wqebbs_per_page)) { dev_err(&pdev->dev, "num wqebbs per page must be power of 2\n"); return -EINVAL; } @@ -636,6 +649,7 @@ int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages, dev_err(&pdev->dev, "Failed to allocate CMDQ page\n"); return err; } + num_wqebbs_per_page_shift = ilog2(num_wqebbs_per_page); for (i = 0; i < cmdq_blocks; i++) { wq[i].hwif = hwif; @@ -647,7 +661,8 @@ int hinic_wqs_cmdq_alloc(struct hinic_cmdq_pages *cmdq_pages, wq[i].q_depth = q_depth; wq[i].max_wqe_size = max_wqe_size; wq[i].num_wqebbs_per_page = num_wqebbs_per_page; - + wq[i].wqebbs_per_page_shift = num_wqebbs_per_page_shift; + wq[i].wqebb_size_shift = wqebb_size_shift; wq[i].block_vaddr = CMDQ_BASE_VADDR(cmdq_pages, &wq[i]); wq[i].shadow_block_vaddr = CMDQ_BASE_ADDR(cmdq_pages, &wq[i]); wq[i].block_paddr = CMDQ_BASE_PADDR(cmdq_pages, &wq[i]); @@ -741,7 +756,7 @@ struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size, *prod_idx = MASKED_WQE_IDX(wq, atomic_read(&wq->prod_idx)); - num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size; + num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) >> wq->wqebb_size_shift; if (atomic_sub_return(num_wqebbs, &wq->delta) <= 0) { atomic_add(num_wqebbs, &wq->delta); @@ -795,7 +810,8 @@ void hinic_return_wqe(struct hinic_wq *wq, unsigned int wqe_size) **/ void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size) { - int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size; + int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) + >> wq->wqebb_size_shift; atomic_add(num_wqebbs, &wq->cons_idx); @@ -813,7 +829,8 @@ void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size) struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size, u16 *cons_idx) { - int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size; + int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) + >> wq->wqebb_size_shift; u16 curr_cons_idx, end_cons_idx; int curr_pg, end_pg; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h index 9b66545ba563..0a936cd6709b 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h @@ -39,7 +39,8 @@ struct hinic_wq { u16 q_depth; u16 max_wqe_size; u16 num_wqebbs_per_page; - + u16 wqebbs_per_page_shift; + u16 wqebb_size_shift; /* The addresses are 64 bit in the HW */ u64 block_paddr; void **shadow_block_vaddr; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h index 9754d6ed5f4a..138941527872 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h @@ -170,6 +170,10 @@ #define HINIC_RQ_CQE_STATUS_RXDONE_MASK 0x1 +#define HINIC_RQ_CQE_STATUS_CSUM_ERR_SHIFT 0 + +#define HINIC_RQ_CQE_STATUS_CSUM_ERR_MASK 0xFFFFU + #define HINIC_RQ_CQE_STATUS_GET(val, member) \ (((val) >> HINIC_RQ_CQE_STATUS_##member##_SHIFT) & \ HINIC_RQ_CQE_STATUS_##member##_MASK) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index fdf2bdb6b0d0..6d48dc62a44b 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -600,9 +600,6 @@ static int add_mac_addr(struct net_device *netdev, const u8 *addr) u16 vid = 0; int err; - if (!is_valid_ether_addr(addr)) - return -EADDRNOTAVAIL; - netif_info(nic_dev, drv, netdev, "set mac addr = %02x %02x %02x %02x %02x %02x\n", addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); @@ -726,6 +723,7 @@ static void set_rx_mode(struct work_struct *work) { struct hinic_rx_mode_work *rx_mode_work = work_to_rx_mode_work(work); struct hinic_dev *nic_dev = rx_mode_work_to_nic_dev(rx_mode_work); + struct netdev_hw_addr *ha; netif_info(nic_dev, drv, nic_dev->netdev, "set rx mode work\n"); @@ -733,6 +731,9 @@ static void set_rx_mode(struct work_struct *work) __dev_uc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr); __dev_mc_sync(nic_dev->netdev, add_mac_addr, remove_mac_addr); + + netdev_for_each_mc_addr(ha, nic_dev->netdev) + add_mac_addr(nic_dev->netdev, ha->addr); } static void hinic_set_rx_mode(struct net_device *netdev) @@ -806,7 +807,8 @@ static const struct net_device_ops hinic_netdev_ops = { static void netdev_features_init(struct net_device *netdev) { netdev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | - NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6; + NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | + NETIF_F_RXCSUM; netdev->vlan_features = netdev->hw_features; @@ -869,12 +871,16 @@ static int set_features(struct hinic_dev *nic_dev, netdev_features_t features, bool force_change) { netdev_features_t changed = force_change ? ~0 : pre_features ^ features; + u32 csum_en = HINIC_RX_CSUM_OFFLOAD_EN; int err = 0; if (changed & NETIF_F_TSO) err = hinic_port_set_tso(nic_dev, (features & NETIF_F_TSO) ? HINIC_TSO_ENABLE : HINIC_TSO_DISABLE); + if (changed & NETIF_F_RXCSUM) + err = hinic_set_rx_csum_offload(nic_dev, csum_en); + return err; } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c index 7575a7d3bd9f..122c93597268 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_port.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c @@ -409,3 +409,33 @@ int hinic_port_set_tso(struct hinic_dev *nic_dev, enum hinic_tso_state state) return 0; } + +int hinic_set_rx_csum_offload(struct hinic_dev *nic_dev, u32 en) +{ + struct hinic_checksum_offload rx_csum_cfg = {0}; + struct hinic_hwdev *hwdev = nic_dev->hwdev; + struct hinic_hwif *hwif; + struct pci_dev *pdev; + u16 out_size; + int err; + + if (!hwdev) + return -EINVAL; + + hwif = hwdev->hwif; + pdev = hwif->pdev; + rx_csum_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif); + rx_csum_cfg.rx_csum_offload = en; + + err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_RX_CSUM, + &rx_csum_cfg, sizeof(rx_csum_cfg), + &rx_csum_cfg, &out_size); + if (err || !out_size || rx_csum_cfg.status) { + dev_err(&pdev->dev, + "Failed to set rx csum offload, ret = %d\n", + rx_csum_cfg.status); + return -EINVAL; + } + + return 0; +} diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.h b/drivers/net/ethernet/huawei/hinic/hinic_port.h index f6e3220fe28f..02d896eed455 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_port.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_port.h @@ -183,6 +183,15 @@ struct hinic_tso_config { u8 resv2[3]; }; +struct hinic_checksum_offload { + u8 status; + u8 version; + u8 rsvd0[6]; + + u16 func_id; + u16 rsvd1; + u32 rx_csum_offload; +}; int hinic_port_add_mac(struct hinic_dev *nic_dev, const u8 *addr, u16 vlan_id); @@ -213,4 +222,5 @@ int hinic_port_get_cap(struct hinic_dev *nic_dev, int hinic_port_set_tso(struct hinic_dev *nic_dev, enum hinic_tso_state state); +int hinic_set_rx_csum_offload(struct hinic_dev *nic_dev, u32 en); #endif diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c index 4c0f7eda1166..0098b206e7e9 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c @@ -43,6 +43,7 @@ #define RX_IRQ_NO_LLI_TIMER 0 #define RX_IRQ_NO_CREDIT 0 #define RX_IRQ_NO_RESEND_TIMER 0 +#define HINIC_RX_BUFFER_WRITE 16 /** * hinic_rxq_clean_stats - Clean the statistics of specific queue @@ -89,6 +90,28 @@ static void rxq_stats_init(struct hinic_rxq *rxq) hinic_rxq_clean_stats(rxq); } +static void rx_csum(struct hinic_rxq *rxq, u16 cons_idx, + struct sk_buff *skb) +{ + struct net_device *netdev = rxq->netdev; + struct hinic_rq_cqe *cqe; + struct hinic_rq *rq; + u32 csum_err; + u32 status; + + rq = rxq->rq; + cqe = rq->cqe[cons_idx]; + status = be32_to_cpu(cqe->status); + csum_err = HINIC_RQ_CQE_STATUS_GET(status, CSUM_ERR); + + if (!(netdev->features & NETIF_F_RXCSUM)) + return; + + if (!csum_err) + skb->ip_summed = CHECKSUM_UNNECESSARY; + else + skb->ip_summed = CHECKSUM_NONE; +} /** * rx_alloc_skb - allocate skb and map it to dma address * @rxq: rx queue @@ -209,7 +232,6 @@ skb_out: hinic_rq_update(rxq->rq, prod_idx); } - tasklet_schedule(&rxq->rx_task); return i; } @@ -237,17 +259,6 @@ static void free_all_rx_skbs(struct hinic_rxq *rxq) } /** - * rx_alloc_task - tasklet for queue allocation - * @data: rx queue - **/ -static void rx_alloc_task(unsigned long data) -{ - struct hinic_rxq *rxq = (struct hinic_rxq *)data; - - (void)rx_alloc_pkts(rxq); -} - -/** * rx_recv_jumbo_pkt - Rx handler for jumbo pkt * @rxq: rx queue * @head_skb: the first skb in the list @@ -311,6 +322,7 @@ static int rxq_recv(struct hinic_rxq *rxq, int budget) struct hinic_qp *qp = container_of(rxq->rq, struct hinic_qp, rq); u64 pkt_len = 0, rx_bytes = 0; struct hinic_rq_wqe *rq_wqe; + unsigned int free_wqebbs; int num_wqes, pkts = 0; struct hinic_sge sge; struct sk_buff *skb; @@ -328,6 +340,8 @@ static int rxq_recv(struct hinic_rxq *rxq, int budget) rx_unmap_skb(rxq, hinic_sge_to_dma(&sge)); + rx_csum(rxq, ci, skb); + prefetch(skb->data); pkt_len = sge.len; @@ -352,8 +366,9 @@ static int rxq_recv(struct hinic_rxq *rxq, int budget) rx_bytes += pkt_len; } - if (pkts) - tasklet_schedule(&rxq->rx_task); /* rx_alloc_pkts */ + free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq); + if (free_wqebbs > HINIC_RX_BUFFER_WRITE) + rx_alloc_pkts(rxq); u64_stats_update_begin(&rxq->rxq_stats.syncp); rxq->rxq_stats.pkts += pkts; @@ -470,8 +485,6 @@ int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq, sprintf(rxq->irq_name, "hinic_rxq%d", qp->q_id); - tasklet_init(&rxq->rx_task, rx_alloc_task, (unsigned long)rxq); - pkts = rx_alloc_pkts(rxq); if (!pkts) { err = -ENOMEM; @@ -488,7 +501,6 @@ int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq, err_req_rx_irq: err_rx_pkts: - tasklet_kill(&rxq->rx_task); free_all_rx_skbs(rxq); devm_kfree(&netdev->dev, rxq->irq_name); return err; @@ -504,7 +516,6 @@ void hinic_clean_rxq(struct hinic_rxq *rxq) rx_free_irq(rxq); - tasklet_kill(&rxq->rx_task); free_all_rx_skbs(rxq); devm_kfree(&netdev->dev, rxq->irq_name); } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.h b/drivers/net/ethernet/huawei/hinic/hinic_rx.h index 27c9af4b1c12..f8ed3fa6c8ee 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_rx.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.h @@ -23,6 +23,10 @@ #include "hinic_hw_qp.h" +#define HINIC_RX_CSUM_OFFLOAD_EN 0xFFF +#define HINIC_RX_CSUM_HW_CHECK_NONE BIT(7) +#define HINIC_RX_CSUM_IPSU_OTHER_ERR BIT(8) + struct hinic_rxq_stats { u64 pkts; u64 bytes; @@ -38,8 +42,6 @@ struct hinic_rxq { char *irq_name; - struct tasklet_struct rx_task; - struct napi_struct napi; }; diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 760b2ad8e295..209255495bc9 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -2455,7 +2455,8 @@ static void emac_adjust_link(struct net_device *ndev) dev->phy.duplex = phy->duplex; dev->phy.pause = phy->pause; dev->phy.asym_pause = phy->asym_pause; - dev->phy.advertising = phy->advertising; + ethtool_convert_link_mode_to_legacy_u32(&dev->phy.advertising, + phy->advertising); } static int emac_mii_bus_read(struct mii_bus *bus, int addr, int regnum) @@ -2490,7 +2491,8 @@ static int emac_mdio_phy_start_aneg(struct mii_phy *phy, phy_dev->autoneg = phy->autoneg; phy_dev->speed = phy->speed; phy_dev->duplex = phy->duplex; - phy_dev->advertising = phy->advertising; + ethtool_convert_legacy_u32_to_link_mode(phy_dev->advertising, + phy->advertising); return phy_start_aneg(phy_dev); } @@ -2624,7 +2626,8 @@ static int emac_dt_phy_connect(struct emac_instance *dev, dev->phy.def->phy_id_mask = dev->phy_dev->drv->phy_id_mask; dev->phy.def->name = dev->phy_dev->drv->name; dev->phy.def->ops = &emac_dt_mdio_phy_ops; - dev->phy.features = dev->phy_dev->supported; + ethtool_convert_link_mode_to_legacy_u32(&dev->phy.features, + dev->phy_dev->supported); dev->phy.address = dev->phy_dev->mdio.addr; dev->phy.mode = dev->phy_dev->interface; return 0; diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 67cc6d9c8fd7..5ecbb1adcf3b 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -773,11 +773,8 @@ static void release_napi(struct ibmvnic_adapter *adapter) return; for (i = 0; i < adapter->num_active_rx_napi; i++) { - if (&adapter->napi[i]) { - netdev_dbg(adapter->netdev, - "Releasing napi[%d]\n", i); - netif_napi_del(&adapter->napi[i]); - } + netdev_dbg(adapter->netdev, "Releasing napi[%d]\n", i); + netif_napi_del(&adapter->napi[i]); } kfree(adapter->napi); diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 59e1bc0f609e..31fb76ee9d82 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -33,7 +33,7 @@ config E100 to identify the adapter. More specific information on configuring the driver is in - <file:Documentation/networking/e100.rst>. + <file:Documentation/networking/device_drivers/intel/e100.rst>. To compile this driver as a module, choose M here. The module will be called e100. @@ -49,7 +49,7 @@ config E1000 <http://support.intel.com> More specific information on configuring the driver is in - <file:Documentation/networking/e1000.rst>. + <file:Documentation/networking/device_drivers/intel/e1000.rst>. To compile this driver as a module, choose M here. The module will be called e1000. @@ -69,7 +69,7 @@ config E1000E <http://support.intel.com> More specific information on configuring the driver is in - <file:Documentation/networking/e1000e.rst>. + <file:Documentation/networking/device_drivers/intel/e1000e.rst>. To compile this driver as a module, choose M here. The module will be called e1000e. @@ -97,7 +97,7 @@ config IGB <http://support.intel.com> More specific information on configuring the driver is in - <file:Documentation/networking/igb.rst>. + <file:Documentation/networking/device_drivers/intel/igb.rst>. To compile this driver as a module, choose M here. The module will be called igb. @@ -133,7 +133,7 @@ config IGBVF <http://support.intel.com> More specific information on configuring the driver is in - <file:Documentation/networking/igbvf.rst>. + <file:Documentation/networking/device_drivers/intel/igbvf.rst>. To compile this driver as a module, choose M here. The module will be called igbvf. @@ -150,7 +150,7 @@ config IXGB <http://support.intel.com> More specific information on configuring the driver is in - <file:Documentation/networking/ixgb.rst>. + <file:Documentation/networking/device_drivers/intel/ixgb.rst>. To compile this driver as a module, choose M here. The module will be called ixgb. @@ -159,6 +159,7 @@ config IXGBE tristate "Intel(R) 10GbE PCI Express adapters support" depends on PCI select MDIO + select MDIO_DEVICE imply PTP_1588_CLOCK ---help--- This driver supports Intel(R) 10GbE PCI Express family of @@ -168,7 +169,7 @@ config IXGBE <http://support.intel.com> More specific information on configuring the driver is in - <file:Documentation/networking/ixgbe.rst>. + <file:Documentation/networking/device_drivers/intel/ixgbe.rst>. To compile this driver as a module, choose M here. The module will be called ixgbe. @@ -220,7 +221,7 @@ config IXGBEVF <http://support.intel.com> More specific information on configuring the driver is in - <file:Documentation/networking/ixgbevf.rst>. + <file:Documentation/networking/device_drivers/intel/ixgbevf.rst>. To compile this driver as a module, choose M here. The module will be called ixgbevf. MSI-X interrupt support is required @@ -247,7 +248,7 @@ config I40E <http://support.intel.com> More specific information on configuring the driver is in - <file:Documentation/networking/i40e.rst>. + <file:Documentation/networking/device_drivers/intel/i40e.rst>. To compile this driver as a module, choose M here. The module will be called i40e. @@ -282,7 +283,7 @@ config I40EVF This driver was formerly named i40evf. More specific information on configuring the driver is in - <file:Documentation/networking/iavf.rst>. + <file:Documentation/networking/device_drivers/intel/iavf.rst>. To compile this driver as a module, choose M here. The module will be called iavf. MSI-X interrupt support is required @@ -300,7 +301,7 @@ config ICE <http://support.intel.com> More specific information on configuring the driver is in - <file:Documentation/networking/ice.rst>. + <file:Documentation/networking/device_drivers/intel/ice.rst>. To compile this driver as a module, choose M here. The module will be called ice. @@ -318,7 +319,7 @@ config FM10K <http://support.intel.com> More specific information on configuring the driver is in - <file:Documentation/networking/fm10k.rst>. + <file:Documentation/networking/device_drivers/intel/fm10k.rst>. To compile this driver as a module, choose M here. The module will be called fm10k. MSI-X interrupt support is required diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 7c4b55482f72..0fd268070fb4 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -1345,8 +1345,8 @@ static inline int e100_load_ucode_wait(struct nic *nic) fw = e100_request_firmware(nic); /* If it's NULL, then no ucode is required */ - if (!fw || IS_ERR(fw)) - return PTR_ERR(fw); + if (IS_ERR_OR_NULL(fw)) + return PTR_ERR_OR_ZERO(fw); if ((err = e100_exec_cb(nic, (void *)fw, e100_setup_ucode))) netif_err(nic, probe, nic->netdev, @@ -2225,11 +2225,13 @@ static int e100_poll(struct napi_struct *napi, int budget) e100_rx_clean(nic, &work_done, budget); e100_tx_clean(nic); - /* If budget not fully consumed, exit the polling mode */ - if (work_done < budget) { - napi_complete_done(napi, work_done); + /* If budget fully consumed, continue polling */ + if (work_done == budget) + return budget; + + /* only re-enable interrupt if stack agrees polling is really done */ + if (likely(napi_complete_done(napi, work_done))) e100_enable_irq(nic); - } return work_done; } diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c index 43b6d3cec3b3..8fe9af0e2ab7 100644 --- a/drivers/net/ethernet/intel/e1000/e1000_main.c +++ b/drivers/net/ethernet/intel/e1000/e1000_main.c @@ -3803,14 +3803,15 @@ static int e1000_clean(struct napi_struct *napi, int budget) adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget); - if (!tx_clean_complete) - work_done = budget; + if (!tx_clean_complete || work_done == budget) + return budget; - /* If budget not fully consumed, exit the polling mode */ - if (work_done < budget) { + /* Exit the polling mode, but don't re-enable interrupts if stack might + * poll us due to busy-polling + */ + if (likely(napi_complete_done(napi, work_done))) { if (likely(adapter->itr_setting & 3)) e1000_set_itr(adapter); - napi_complete_done(napi, work_done); if (!test_bit(__E1000_DOWN, &adapter->flags)) e1000_irq_enable(adapter); } diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index c760dc72c520..be13227f1697 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -505,6 +505,9 @@ extern const struct e1000_info e1000_es2_info; void e1000e_ptp_init(struct e1000_adapter *adapter); void e1000e_ptp_remove(struct e1000_adapter *adapter); +u64 e1000e_read_systim(struct e1000_adapter *adapter, + struct ptp_system_timestamp *sts); + static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw) { return hw->phy.ops.reset(hw); diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 16a73bd9f4cb..308c006cb41d 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -2651,9 +2651,9 @@ err: /** * e1000e_poll - NAPI Rx polling callback * @napi: struct associated with this polling callback - * @weight: number of packets driver is allowed to process this poll + * @budget: number of packets driver is allowed to process this poll **/ -static int e1000e_poll(struct napi_struct *napi, int weight) +static int e1000e_poll(struct napi_struct *napi, int budget) { struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi); @@ -2667,16 +2667,17 @@ static int e1000e_poll(struct napi_struct *napi, int weight) (adapter->rx_ring->ims_val & adapter->tx_ring->ims_val)) tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring); - adapter->clean_rx(adapter->rx_ring, &work_done, weight); + adapter->clean_rx(adapter->rx_ring, &work_done, budget); - if (!tx_cleaned) - work_done = weight; + if (!tx_cleaned || work_done == budget) + return budget; - /* If weight not fully consumed, exit the polling mode */ - if (work_done < weight) { + /* Exit the polling mode, but don't re-enable interrupts if stack might + * poll us due to busy-polling + */ + if (likely(napi_complete_done(napi, work_done))) { if (adapter->itr_setting & 3) e1000_set_itr(adapter); - napi_complete_done(napi, work_done); if (!test_bit(__E1000_DOWN, &adapter->state)) { if (adapter->msix_entries) ew32(IMS, adapter->rx_ring->ims_val); @@ -4319,13 +4320,16 @@ void e1000e_reinit_locked(struct e1000_adapter *adapter) /** * e1000e_sanitize_systim - sanitize raw cycle counter reads * @hw: pointer to the HW structure - * @systim: time value read, sanitized and returned + * @systim: PHC time value read, sanitized and returned + * @sts: structure to hold system time before and after reading SYSTIML, + * may be NULL * * Errata for 82574/82583 possible bad bits read from SYSTIMH/L: * check to see that the time is incrementing at a reasonable * rate and is a multiple of incvalue. **/ -static u64 e1000e_sanitize_systim(struct e1000_hw *hw, u64 systim) +static u64 e1000e_sanitize_systim(struct e1000_hw *hw, u64 systim, + struct ptp_system_timestamp *sts) { u64 time_delta, rem, temp; u64 systim_next; @@ -4335,7 +4339,9 @@ static u64 e1000e_sanitize_systim(struct e1000_hw *hw, u64 systim) incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK; for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) { /* latch SYSTIMH on read of SYSTIML */ + ptp_read_system_prets(sts); systim_next = (u64)er32(SYSTIML); + ptp_read_system_postts(sts); systim_next |= (u64)er32(SYSTIMH) << 32; time_delta = systim_next - systim; @@ -4353,15 +4359,16 @@ static u64 e1000e_sanitize_systim(struct e1000_hw *hw, u64 systim) } /** - * e1000e_cyclecounter_read - read raw cycle counter (used by time counter) - * @cc: cyclecounter structure + * e1000e_read_systim - read SYSTIM register + * @adapter: board private structure + * @sts: structure which will contain system time before and after reading + * SYSTIML, may be NULL **/ -static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc) +u64 e1000e_read_systim(struct e1000_adapter *adapter, + struct ptp_system_timestamp *sts) { - struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter, - cc); struct e1000_hw *hw = &adapter->hw; - u32 systimel, systimeh; + u32 systimel, systimel_2, systimeh; u64 systim; /* SYSTIMH latching upon SYSTIML read does not work well. * This means that if SYSTIML overflows after we read it but before @@ -4369,11 +4376,15 @@ static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc) * will experience a huge non linear increment in the systime value * to fix that we test for overflow and if true, we re-read systime. */ + ptp_read_system_prets(sts); systimel = er32(SYSTIML); + ptp_read_system_postts(sts); systimeh = er32(SYSTIMH); /* Is systimel is so large that overflow is possible? */ if (systimel >= (u32)0xffffffff - E1000_TIMINCA_INCVALUE_MASK) { - u32 systimel_2 = er32(SYSTIML); + ptp_read_system_prets(sts); + systimel_2 = er32(SYSTIML); + ptp_read_system_postts(sts); if (systimel > systimel_2) { /* There was an overflow, read again SYSTIMH, and use * systimel_2 @@ -4386,12 +4397,24 @@ static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc) systim |= (u64)systimeh << 32; if (adapter->flags2 & FLAG2_CHECK_SYSTIM_OVERFLOW) - systim = e1000e_sanitize_systim(hw, systim); + systim = e1000e_sanitize_systim(hw, systim, sts); return systim; } /** + * e1000e_cyclecounter_read - read raw cycle counter (used by time counter) + * @cc: cyclecounter structure + **/ +static u64 e1000e_cyclecounter_read(const struct cyclecounter *cc) +{ + struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter, + cc); + + return e1000e_read_systim(adapter, NULL); +} + +/** * e1000_sw_init - Initialize general software structures (struct e1000_adapter) * @adapter: board private structure to initialize * diff --git a/drivers/net/ethernet/intel/e1000e/ptp.c b/drivers/net/ethernet/intel/e1000e/ptp.c index 37c76945ad9b..1a4c65d9feb4 100644 --- a/drivers/net/ethernet/intel/e1000e/ptp.c +++ b/drivers/net/ethernet/intel/e1000e/ptp.c @@ -161,22 +161,30 @@ static int e1000e_phc_getcrosststamp(struct ptp_clock_info *ptp, #endif/*CONFIG_E1000E_HWTS*/ /** - * e1000e_phc_gettime - Reads the current time from the hardware clock + * e1000e_phc_gettimex - Reads the current time from the hardware clock and + * system clock * @ptp: ptp clock structure - * @ts: timespec structure to hold the current time value + * @ts: timespec structure to hold the current PHC time + * @sts: structure to hold the current system time * * Read the timecounter and return the correct value in ns after converting * it into a struct timespec. **/ -static int e1000e_phc_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +static int e1000e_phc_gettimex(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) { struct e1000_adapter *adapter = container_of(ptp, struct e1000_adapter, ptp_clock_info); unsigned long flags; - u64 ns; + u64 cycles, ns; spin_lock_irqsave(&adapter->systim_lock, flags); - ns = timecounter_read(&adapter->tc); + + /* NOTE: Non-monotonic SYSTIM readings may be returned */ + cycles = e1000e_read_systim(adapter, sts); + ns = timecounter_cyc2time(&adapter->tc, cycles); + spin_unlock_irqrestore(&adapter->systim_lock, flags); *ts = ns_to_timespec64(ns); @@ -232,9 +240,12 @@ static void e1000e_systim_overflow_work(struct work_struct *work) systim_overflow_work.work); struct e1000_hw *hw = &adapter->hw; struct timespec64 ts; + u64 ns; - adapter->ptp_clock_info.gettime64(&adapter->ptp_clock_info, &ts); + /* Update the timecounter */ + ns = timecounter_read(&adapter->tc); + ts = ns_to_timespec64(ns); e_dbg("SYSTIM overflow check at %lld.%09lu\n", (long long) ts.tv_sec, ts.tv_nsec); @@ -251,7 +262,7 @@ static const struct ptp_clock_info e1000e_ptp_clock_info = { .pps = 0, .adjfreq = e1000e_phc_adjfreq, .adjtime = e1000e_phc_adjtime, - .gettime64 = e1000e_phc_gettime, + .gettimex64 = e1000e_phc_gettimex, .settime64 = e1000e_phc_settime, .enable = e1000e_phc_enable, }; diff --git a/drivers/net/ethernet/intel/fm10k/fm10k_main.c b/drivers/net/ethernet/intel/fm10k/fm10k_main.c index 5b2a50e5798f..6fd15a734324 100644 --- a/drivers/net/ethernet/intel/fm10k/fm10k_main.c +++ b/drivers/net/ethernet/intel/fm10k/fm10k_main.c @@ -1465,11 +1465,11 @@ static int fm10k_poll(struct napi_struct *napi, int budget) if (!clean_complete) return budget; - /* all work done, exit the polling mode */ - napi_complete_done(napi, work_done); - - /* re-enable the q_vector */ - fm10k_qv_enable(q_vector); + /* Exit the polling mode, but don't re-enable interrupts if stack might + * poll us due to busy-polling + */ + if (likely(napi_complete_done(napi, work_done))) + fm10k_qv_enable(q_vector); return min(work_done, budget - 1); } diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 876cac317e79..8de9085bba9e 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -122,6 +122,7 @@ enum i40e_state_t { __I40E_MDD_EVENT_PENDING, __I40E_VFLR_EVENT_PENDING, __I40E_RESET_RECOVERY_PENDING, + __I40E_TIMEOUT_RECOVERY_PENDING, __I40E_MISC_IRQ_REQUESTED, __I40E_RESET_INTR_RECEIVED, __I40E_REINIT_REQUESTED, @@ -146,6 +147,7 @@ enum i40e_state_t { __I40E_CLIENT_SERVICE_REQUESTED, __I40E_CLIENT_L2_CHANGE, __I40E_CLIENT_RESET, + __I40E_VIRTCHNL_OP_PENDING, /* This must be last as it determines the size of the BITMAP */ __I40E_STATE_SIZE__, }; @@ -494,7 +496,6 @@ struct i40e_pf { #define I40E_HW_STOP_FW_LLDP BIT(16) #define I40E_HW_PORT_ID_VALID BIT(17) #define I40E_HW_RESTART_AUTONEG BIT(18) -#define I40E_HW_STOPPABLE_FW_LLDP BIT(19) u32 flags; #define I40E_FLAG_RX_CSUM_ENABLED BIT(0) diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq.c b/drivers/net/ethernet/intel/i40e/i40e_adminq.c index 501ee718177f..7ab61f6ebb5f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq.c +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq.c @@ -588,6 +588,12 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw) hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) { hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE; + hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE; + } + if (hw->mac.type == I40E_MAC_X722 && + hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && + hw->aq.api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722) { + hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE; } /* Newer versions of firmware require lock when reading the NVM */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h index 80e3eec6134e..11506102471c 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h +++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h @@ -11,7 +11,7 @@ */ #define I40E_FW_API_VERSION_MAJOR 0x0001 -#define I40E_FW_API_VERSION_MINOR_X722 0x0005 +#define I40E_FW_API_VERSION_MINOR_X722 0x0006 #define I40E_FW_API_VERSION_MINOR_X710 0x0007 #define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \ @@ -20,6 +20,8 @@ /* API version 1.7 implements additional link and PHY-specific APIs */ #define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007 +/* API version 1.6 for X722 devices adds ability to stop FW LLDP agent */ +#define I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722 0x0006 struct i40e_aq_desc { __le16 flags; diff --git a/drivers/net/ethernet/intel/i40e/i40e_common.c b/drivers/net/ethernet/intel/i40e/i40e_common.c index 85f75b5978fc..97a9b1fb4763 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_common.c +++ b/drivers/net/ethernet/intel/i40e/i40e_common.c @@ -3723,6 +3723,9 @@ i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable, (struct i40e_aqc_set_dcb_parameters *)&desc.params.raw; i40e_status status; + if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) + return I40E_ERR_DEVICE_NOT_SUPPORTED; + i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_dcb_parameters); diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 9f8464f80783..a6bc7847346b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -906,6 +906,7 @@ static void i40e_get_settings_link_up(struct i40e_hw *hw, ks->base.speed = SPEED_100; break; default: + ks->base.speed = SPEED_UNKNOWN; break; } ks->base.duplex = DUPLEX_FULL; @@ -1335,6 +1336,7 @@ static int i40e_set_pauseparam(struct net_device *netdev, i40e_status status; u8 aq_failures; int err = 0; + u32 is_an; /* Changing the port's flow control is not supported if this isn't the * port's controlling PF @@ -1347,15 +1349,14 @@ static int i40e_set_pauseparam(struct net_device *netdev, if (vsi != pf->vsi[pf->lan_vsi]) return -EOPNOTSUPP; - if (pause->autoneg != ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ? - AUTONEG_ENABLE : AUTONEG_DISABLE)) { + is_an = hw_link_info->an_info & I40E_AQ_AN_COMPLETED; + if (pause->autoneg != is_an) { netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n"); return -EOPNOTSUPP; } /* If we have link and don't have autoneg */ - if (!test_bit(__I40E_DOWN, pf->state) && - !(hw_link_info->an_info & I40E_AQ_AN_COMPLETED)) { + if (!test_bit(__I40E_DOWN, pf->state) && !is_an) { /* Send message that it might not necessarily work*/ netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n"); } @@ -1406,7 +1407,7 @@ static int i40e_set_pauseparam(struct net_device *netdev, err = -EAGAIN; } - if (!test_bit(__I40E_DOWN, pf->state)) { + if (!test_bit(__I40E_DOWN, pf->state) && is_an) { /* Give it a little more time to try to come back */ msleep(75); if (!test_bit(__I40E_DOWN, pf->state)) @@ -2377,7 +2378,8 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) return -EOPNOTSUPP; /* only magic packet is supported */ - if (wol->wolopts && (wol->wolopts != WAKE_MAGIC)) + if (wol->wolopts && (wol->wolopts != WAKE_MAGIC) + | (wol->wolopts != WAKE_FILTER)) return -EOPNOTSUPP; /* is this a new value? */ @@ -4659,14 +4661,15 @@ flags_complete: return -EOPNOTSUPP; /* If the driver detected FW LLDP was disabled on init, this flag could - * be set, however we do not support _changing_ the flag if NPAR is - * enabled or FW API version < 1.7. There are situations where older - * FW versions/NPAR enabled PFs could disable LLDP, however we _must_ - * not allow the user to enable/disable LLDP with this flag on - * unsupported FW versions. + * be set, however we do not support _changing_ the flag: + * - on XL710 if NPAR is enabled or FW API version < 1.7 + * - on X722 with FW API version < 1.6 + * There are situations where older FW versions/NPAR enabled PFs could + * disable LLDP, however we _must_ not allow the user to enable/disable + * LLDP with this flag on unsupported FW versions. */ if (changed_flags & I40E_FLAG_DISABLE_FW_LLDP) { - if (!(pf->hw_features & I40E_HW_STOPPABLE_FW_LLDP)) { + if (!(pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) { dev_warn(&pf->pdev->dev, "Device does not support changing FW LLDP\n"); return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 0e5dc74b4ef2..4d40878e395a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -26,8 +26,8 @@ static const char i40e_driver_string[] = #define DRV_KERN "-k" #define DRV_VERSION_MAJOR 2 -#define DRV_VERSION_MINOR 3 -#define DRV_VERSION_BUILD 2 +#define DRV_VERSION_MINOR 7 +#define DRV_VERSION_BUILD 6 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ __stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_BUILD) DRV_KERN @@ -338,6 +338,10 @@ static void i40e_tx_timeout(struct net_device *netdev) (pf->tx_timeout_last_recovery + netdev->watchdog_timeo))) return; /* don't do any new action before the next timeout */ + /* don't kick off another recovery if one is already pending */ + if (test_and_set_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state)) + return; + if (tx_ring) { head = i40e_get_head(tx_ring); /* Read interrupt register */ @@ -1493,8 +1497,7 @@ int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr) bool found = false; int bkt; - WARN(!spin_is_locked(&vsi->mac_filter_hash_lock), - "Missing mac_filter_hash_lock\n"); + lockdep_assert_held(&vsi->mac_filter_hash_lock); hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist) { if (ether_addr_equal(macaddr, f->macaddr)) { __i40e_del_filter(vsi, f); @@ -9632,6 +9635,7 @@ end_core_reset: clear_bit(__I40E_RESET_FAILED, pf->state); clear_recovery: clear_bit(__I40E_RESET_RECOVERY_PENDING, pf->state); + clear_bit(__I40E_TIMEOUT_RECOVERY_PENDING, pf->state); } /** @@ -11332,16 +11336,15 @@ static int i40e_sw_init(struct i40e_pf *pf) /* IWARP needs one extra vector for CQP just like MISC.*/ pf->num_iwarp_msix = (int)num_online_cpus() + 1; } - /* Stopping the FW LLDP engine is only supported on the - * XL710 with a FW ver >= 1.7. Also, stopping FW LLDP - * engine is not supported if NPAR is functioning on this - * part + /* Stopping FW LLDP engine is supported on XL710 and X722 + * starting from FW versions determined in i40e_init_adminq. + * Stopping the FW LLDP engine is not supported on XL710 + * if NPAR is functioning so unset this hw flag in this case. */ if (pf->hw.mac.type == I40E_MAC_XL710 && - !pf->hw.func_caps.npar_enable && - (pf->hw.aq.api_maj_ver > 1 || - (pf->hw.aq.api_maj_ver == 1 && pf->hw.aq.api_min_ver > 6))) - pf->hw_features |= I40E_HW_STOPPABLE_FW_LLDP; + pf->hw.func_caps.npar_enable && + (pf->hw.flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE)) + pf->hw.flags &= ~I40E_HW_FLAG_FW_LLDP_STOPPABLE; #ifdef CONFIG_PCI_IOV if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) { @@ -11682,6 +11685,7 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], * @dev: the netdev being configured * @nlh: RTNL message * @flags: bridge flags + * @extack: netlink extended ack * * Inserts a new hardware bridge if not already created and * enables the bridging mode requested (VEB or VEPA). If the @@ -11694,7 +11698,8 @@ static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], **/ static int i40e_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, - u16 flags) + u16 flags, + struct netlink_ext_ack *extack) { struct i40e_netdev_priv *np = netdev_priv(dev); struct i40e_vsi *vsi = np->vsi; @@ -12334,6 +12339,9 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) ether_addr_copy(netdev->dev_addr, mac_addr); ether_addr_copy(netdev->perm_addr, mac_addr); + /* i40iw_net_event() reads 16 bytes from neigh->primary_key */ + netdev->neigh_priv_len = sizeof(u32) * 4; + netdev->priv_flags |= IFF_UNICAST_FLT; netdev->priv_flags |= IFF_SUPP_NOFCS; /* Setup netdev TC information */ @@ -14302,23 +14310,23 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) switch (hw->bus.speed) { case i40e_bus_speed_8000: - strncpy(speed, "8.0", PCI_SPEED_SIZE); break; + strlcpy(speed, "8.0", PCI_SPEED_SIZE); break; case i40e_bus_speed_5000: - strncpy(speed, "5.0", PCI_SPEED_SIZE); break; + strlcpy(speed, "5.0", PCI_SPEED_SIZE); break; case i40e_bus_speed_2500: - strncpy(speed, "2.5", PCI_SPEED_SIZE); break; + strlcpy(speed, "2.5", PCI_SPEED_SIZE); break; default: break; } switch (hw->bus.width) { case i40e_bus_width_pcie_x8: - strncpy(width, "8", PCI_WIDTH_SIZE); break; + strlcpy(width, "8", PCI_WIDTH_SIZE); break; case i40e_bus_width_pcie_x4: - strncpy(width, "4", PCI_WIDTH_SIZE); break; + strlcpy(width, "4", PCI_WIDTH_SIZE); break; case i40e_bus_width_pcie_x2: - strncpy(width, "2", PCI_WIDTH_SIZE); break; + strlcpy(width, "2", PCI_WIDTH_SIZE); break; case i40e_bus_width_pcie_x1: - strncpy(width, "1", PCI_WIDTH_SIZE); break; + strlcpy(width, "1", PCI_WIDTH_SIZE); break; default: break; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c index 1199f0502d6d..5fb4353c742b 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c @@ -28,19 +28,23 @@ * i40e_ptp_read - Read the PHC time from the device * @pf: Board private structure * @ts: timespec structure to hold the current time value + * @sts: structure to hold the system time before and after reading the PHC * * This function reads the PRTTSYN_TIME registers and stores them in a * timespec. However, since the registers are 64 bits of nanoseconds, we must * convert the result to a timespec before we can return. **/ -static void i40e_ptp_read(struct i40e_pf *pf, struct timespec64 *ts) +static void i40e_ptp_read(struct i40e_pf *pf, struct timespec64 *ts, + struct ptp_system_timestamp *sts) { struct i40e_hw *hw = &pf->hw; u32 hi, lo; u64 ns; /* The timer latches on the lowest register read. */ + ptp_read_system_prets(sts); lo = rd32(hw, I40E_PRTTSYN_TIME_L); + ptp_read_system_postts(sts); hi = rd32(hw, I40E_PRTTSYN_TIME_H); ns = (((u64)hi) << 32) | lo; @@ -146,7 +150,7 @@ static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) mutex_lock(&pf->tmreg_lock); - i40e_ptp_read(pf, &now); + i40e_ptp_read(pf, &now, NULL); timespec64_add_ns(&now, delta); i40e_ptp_write(pf, (const struct timespec64 *)&now); @@ -156,19 +160,21 @@ static int i40e_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) } /** - * i40e_ptp_gettime - Get the time of the PHC + * i40e_ptp_gettimex - Get the time of the PHC * @ptp: The PTP clock structure * @ts: timespec structure to hold the current time value + * @sts: structure to hold the system time before and after reading the PHC * * Read the device clock and return the correct value on ns, after converting it * into a timespec struct. **/ -static int i40e_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +static int i40e_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, + struct ptp_system_timestamp *sts) { struct i40e_pf *pf = container_of(ptp, struct i40e_pf, ptp_caps); mutex_lock(&pf->tmreg_lock); - i40e_ptp_read(pf, ts); + i40e_ptp_read(pf, ts, sts); mutex_unlock(&pf->tmreg_lock); return 0; @@ -694,7 +700,7 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf) if (!IS_ERR_OR_NULL(pf->ptp_clock)) return 0; - strncpy(pf->ptp_caps.name, i40e_driver_name, + strlcpy(pf->ptp_caps.name, i40e_driver_name, sizeof(pf->ptp_caps.name) - 1); pf->ptp_caps.owner = THIS_MODULE; pf->ptp_caps.max_adj = 999999999; @@ -702,7 +708,7 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf) pf->ptp_caps.pps = 0; pf->ptp_caps.adjfreq = i40e_ptp_adjfreq; pf->ptp_caps.adjtime = i40e_ptp_adjtime; - pf->ptp_caps.gettime64 = i40e_ptp_gettime; + pf->ptp_caps.gettimex64 = i40e_ptp_gettimex; pf->ptp_caps.settime64 = i40e_ptp_settime; pf->ptp_caps.enable = i40e_ptp_feature_enable; diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index d0a95424ce58..a7e14e98889f 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -2648,10 +2648,11 @@ tx_only: if (vsi->back->flags & I40E_TXR_FLAGS_WB_ON_ITR) q_vector->arm_wb_state = false; - /* Work is done so exit the polling mode and re-enable the interrupt */ - napi_complete_done(napi, work_done); - - i40e_update_enable_itr(vsi, q_vector); + /* Exit the polling mode, but don't re-enable interrupts if stack might + * poll us due to busy-polling + */ + if (likely(napi_complete_done(napi, work_done))) + i40e_update_enable_itr(vsi, q_vector); return min(work_done, budget - 1); } @@ -3454,6 +3455,8 @@ static inline int i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, size, td_tag); + skb_tx_timestamp(skb); + /* Force memory writes to complete before letting h/w know there * are new descriptors to fetch. * @@ -3507,6 +3510,7 @@ static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf, u16 i = xdp_ring->next_to_use; struct i40e_tx_buffer *tx_bi; struct i40e_tx_desc *tx_desc; + void *data = xdpf->data; u32 size = xdpf->len; dma_addr_t dma; @@ -3514,8 +3518,7 @@ static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf, xdp_ring->tx_stats.tx_busy++; return I40E_XDP_CONSUMED; } - - dma = dma_map_single(xdp_ring->dev, xdpf->data, size, DMA_TO_DEVICE); + dma = dma_map_single(xdp_ring->dev, data, size, DMA_TO_DEVICE); if (dma_mapping_error(xdp_ring->dev, dma)) return I40E_XDP_CONSUMED; @@ -3633,8 +3636,6 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, if (tsyn) tx_flags |= I40E_TX_FLAGS_TSYN; - skb_tx_timestamp(skb); - /* always enable CRC insertion offload */ td_cmd |= I40E_TX_DESC_CMD_ICRC; diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 7df969c59855..2781ab91ca82 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -615,6 +615,7 @@ struct i40e_hw { #define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1) #define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2) #define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3) +#define I40E_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4) u64 flags; /* Used in set switch config AQ command */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index ac5698ed0b11..2ac23ebfbf31 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1112,7 +1112,8 @@ static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf, if (!i40e_vc_isvalid_vsi_id(vf, vsi_id) || !vsi) return I40E_ERR_PARAM; - if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) { + if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps) && + (allmulti || alluni)) { dev_err(&pf->pdev->dev, "Unprivileged VF %d is attempting to configure promiscuous mode\n", vf->vf_id); @@ -1675,13 +1676,20 @@ err_out: int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) { struct i40e_pf *pf = pci_get_drvdata(pdev); + int ret = 0; + + if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { + dev_warn(&pdev->dev, "Unable to configure VFs, other operation is pending.\n"); + return -EAGAIN; + } if (num_vfs) { if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) { pf->flags |= I40E_FLAG_VEB_MODE_ENABLED; i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); } - return i40e_pci_sriov_enable(pdev, num_vfs); + ret = i40e_pci_sriov_enable(pdev, num_vfs); + goto sriov_configure_out; } if (!pci_vfs_assigned(pf->pdev)) { @@ -1690,9 +1698,12 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs) i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG); } else { dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n"); - return -EINVAL; + ret = -EINVAL; + goto sriov_configure_out; } - return 0; +sriov_configure_out: + clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); + return ret; } /***********************virtual channel routines******************/ @@ -3893,6 +3904,11 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) goto error_param; } + if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { + dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); + return -EAGAIN; + } + if (is_multicast_ether_addr(mac)) { dev_err(&pf->pdev->dev, "Invalid Ethernet address %pM for VF %d\n", mac, vf_id); @@ -3941,6 +3957,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) dev_info(&pf->pdev->dev, "Bring down and up the VF interface to make this change effective.\n"); error_param: + clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); return ret; } @@ -3992,6 +4009,11 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, struct i40e_vf *vf; int ret = 0; + if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { + dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); + return -EAGAIN; + } + /* validate the request */ ret = i40e_validate_vf(pf, vf_id); if (ret) @@ -4107,6 +4129,7 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, int vf_id, ret = 0; error_pvid: + clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); return ret; } @@ -4128,6 +4151,11 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, struct i40e_vf *vf; int ret = 0; + if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { + dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); + return -EAGAIN; + } + /* validate the request */ ret = i40e_validate_vf(pf, vf_id); if (ret) @@ -4154,6 +4182,7 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate, vf->tx_rate = max_tx_rate; error: + clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); return ret; } @@ -4174,6 +4203,11 @@ int i40e_ndo_get_vf_config(struct net_device *netdev, struct i40e_vf *vf; int ret = 0; + if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { + dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); + return -EAGAIN; + } + /* validate the request */ ret = i40e_validate_vf(pf, vf_id); if (ret) @@ -4209,6 +4243,7 @@ int i40e_ndo_get_vf_config(struct net_device *netdev, ret = 0; error_param: + clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); return ret; } @@ -4230,6 +4265,11 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) int abs_vf_id; int ret = 0; + if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { + dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); + return -EAGAIN; + } + /* validate the request */ if (vf_id >= pf->num_alloc_vfs) { dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); @@ -4273,6 +4313,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link) 0, (u8 *)&pfe, sizeof(pfe), NULL); error_out: + clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); return ret; } @@ -4294,6 +4335,11 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) struct i40e_vf *vf; int ret = 0; + if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { + dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); + return -EAGAIN; + } + /* validate the request */ if (vf_id >= pf->num_alloc_vfs) { dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); @@ -4327,6 +4373,7 @@ int i40e_ndo_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool enable) ret = -EIO; } out: + clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); return ret; } @@ -4345,15 +4392,22 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting) struct i40e_vf *vf; int ret = 0; + if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) { + dev_warn(&pf->pdev->dev, "Unable to configure VFs, other operation is pending.\n"); + return -EAGAIN; + } + /* validate the request */ if (vf_id >= pf->num_alloc_vfs) { dev_err(&pf->pdev->dev, "Invalid VF Identifier %d\n", vf_id); - return -EINVAL; + ret = -EINVAL; + goto out; } if (pf->flags & I40E_FLAG_MFP_ENABLED) { dev_err(&pf->pdev->dev, "Trusted VF not supported in MFP mode.\n"); - return -EINVAL; + ret = -EINVAL; + goto out; } vf = &pf->vf[vf_id]; @@ -4376,5 +4430,6 @@ int i40e_ndo_set_vf_trust(struct net_device *netdev, int vf_id, bool setting) } out: + clear_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state); return ret; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index bf67d62e2b5f..f9621026beef 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -13,9 +13,9 @@ #define I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED 3 #define I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED 10 -#define I40E_VLAN_PRIORITY_SHIFT 12 +#define I40E_VLAN_PRIORITY_SHIFT 13 #define I40E_VLAN_MASK 0xFFF -#define I40E_PRIORITY_MASK 0x7000 +#define I40E_PRIORITY_MASK 0xE000 /* Various queue ctrls */ enum i40e_queue_ctrl { diff --git a/drivers/net/ethernet/intel/iavf/iavf_txrx.c b/drivers/net/ethernet/intel/iavf/iavf_txrx.c index fb9bfad96daf..9b4d7cec2e18 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_txrx.c +++ b/drivers/net/ethernet/intel/iavf/iavf_txrx.c @@ -1761,10 +1761,11 @@ tx_only: if (vsi->back->flags & IAVF_TXR_FLAGS_WB_ON_ITR) q_vector->arm_wb_state = false; - /* Work is done so exit the polling mode and re-enable the interrupt */ - napi_complete_done(napi, work_done); - - iavf_update_enable_itr(vsi, q_vector); + /* Exit the polling mode, but don't re-enable interrupts if stack might + * poll us due to busy-polling + */ + if (likely(napi_complete_done(napi, work_done))) + iavf_update_enable_itr(vsi, q_vector); return min(work_done, budget - 1); } @@ -2343,6 +2344,8 @@ static inline void iavf_tx_map(struct iavf_ring *tx_ring, struct sk_buff *skb, tx_desc->cmd_type_offset_bsz = build_ctob(td_cmd, td_offset, size, td_tag); + skb_tx_timestamp(skb); + /* Force memory writes to complete before letting h/w know there * are new descriptors to fetch. * @@ -2461,8 +2464,6 @@ static netdev_tx_t iavf_xmit_frame_ring(struct sk_buff *skb, if (tso < 0) goto out_drop; - skb_tx_timestamp(skb); - /* always enable CRC insertion offload */ td_cmd |= IAVF_TX_DESC_CMD_ICRC; diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index b8548370f1c7..a385575600f6 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -52,7 +52,6 @@ extern const char ice_drv_ver[]; #define ICE_MBXQ_LEN 64 #define ICE_MIN_MSIX 2 #define ICE_NO_VSI 0xffff -#define ICE_MAX_VSI_ALLOC 130 #define ICE_MAX_TXQS 2048 #define ICE_MAX_RXQS 2048 #define ICE_VSI_MAP_CONTIG 0 @@ -97,14 +96,14 @@ extern const char ice_drv_ver[]; #define ice_for_each_vsi(pf, i) \ for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++) -/* Macros for each tx/rx ring in a VSI */ +/* Macros for each Tx/Rx ring in a VSI */ #define ice_for_each_txq(vsi, i) \ for ((i) = 0; (i) < (vsi)->num_txq; (i)++) #define ice_for_each_rxq(vsi, i) \ for ((i) = 0; (i) < (vsi)->num_rxq; (i)++) -/* Macros for each allocated tx/rx ring whether used or not in a VSI */ +/* Macros for each allocated Tx/Rx ring whether used or not in a VSI */ #define ice_for_each_alloc_txq(vsi, i) \ for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++) @@ -113,7 +112,9 @@ extern const char ice_drv_ver[]; struct ice_tc_info { u16 qoffset; - u16 qcount; + u16 qcount_tx; + u16 qcount_rx; + u8 netdev_tc; }; struct ice_tc_cfg { @@ -149,10 +150,10 @@ enum ice_state { __ICE_RESET_FAILED, /* set by reset/rebuild */ /* When checking for the PF to be in a nominal operating state, the * bits that are grouped at the beginning of the list need to be - * checked. Bits occurring before __ICE_STATE_NOMINAL_CHECK_BITS will - * be checked. If you need to add a bit into consideration for nominal + * checked. Bits occurring before __ICE_STATE_NOMINAL_CHECK_BITS will + * be checked. If you need to add a bit into consideration for nominal * operating state, it must be added before - * __ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position + * __ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position * without appropriate consideration. */ __ICE_STATE_NOMINAL_CHECK_BITS, @@ -182,8 +183,8 @@ struct ice_vsi { struct ice_sw *vsw; /* switch this VSI is on */ struct ice_pf *back; /* back pointer to PF */ struct ice_port_info *port_info; /* back pointer to port_info */ - struct ice_ring **rx_rings; /* rx ring array */ - struct ice_ring **tx_rings; /* tx ring array */ + struct ice_ring **rx_rings; /* Rx ring array */ + struct ice_ring **tx_rings; /* Tx ring array */ struct ice_q_vector **q_vectors; /* q_vector array */ irqreturn_t (*irq_handler)(int irq, void *data); @@ -200,8 +201,8 @@ struct ice_vsi { int sw_base_vector; /* Irq base for OS reserved vectors */ int hw_base_vector; /* HW (absolute) index of a vector */ enum ice_vsi_type type; - u16 vsi_num; /* HW (absolute) index of this VSI */ - u16 idx; /* software index in pf->vsi[] */ + u16 vsi_num; /* HW (absolute) index of this VSI */ + u16 idx; /* software index in pf->vsi[] */ /* Interrupt thresholds */ u16 work_lmt; @@ -254,8 +255,8 @@ struct ice_q_vector { struct ice_ring_container tx; struct irq_affinity_notify affinity_notify; u16 v_idx; /* index in the vsi->q_vector array. */ - u8 num_ring_tx; /* total number of tx rings in vector */ - u8 num_ring_rx; /* total number of rx rings in vector */ + u8 num_ring_tx; /* total number of Tx rings in vector */ + u8 num_ring_rx; /* total number of Rx rings in vector */ char name[ICE_INT_NAME_STR_LEN]; /* in usecs, need to use ice_intrl_to_usecs_reg() before writing this * value to the device @@ -307,10 +308,10 @@ struct ice_pf { u32 hw_oicr_idx; /* Other interrupt cause vector HW index */ u32 num_avail_hw_msix; /* remaining HW MSIX vectors left unclaimed */ u32 num_lan_msix; /* Total MSIX vectors for base driver */ - u16 num_lan_tx; /* num lan tx queues setup */ - u16 num_lan_rx; /* num lan rx queues setup */ - u16 q_left_tx; /* remaining num tx queues left unclaimed */ - u16 q_left_rx; /* remaining num rx queues left unclaimed */ + u16 num_lan_tx; /* num lan Tx queues setup */ + u16 num_lan_rx; /* num lan Rx queues setup */ + u16 q_left_tx; /* remaining num Tx queues left unclaimed */ + u16 q_left_rx; /* remaining num Rx queues left unclaimed */ u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */ u16 num_alloc_vsi; u16 corer_count; /* Core reset count */ diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h index 6653555f55dd..fcdcd80b18e7 100644 --- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h +++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h @@ -5,7 +5,7 @@ #define _ICE_ADMINQ_CMD_H_ /* This header file defines the Admin Queue commands, error codes and - * descriptor format. It is shared between Firmware and Software. + * descriptor format. It is shared between Firmware and Software. */ #define ICE_MAX_VSI 768 @@ -87,6 +87,7 @@ struct ice_aqc_list_caps { /* Device/Function buffer entry, repeated per reported capability */ struct ice_aqc_list_caps_elem { __le16 cap; +#define ICE_AQC_CAPS_VALID_FUNCTIONS 0x0005 #define ICE_AQC_CAPS_SRIOV 0x0012 #define ICE_AQC_CAPS_VF 0x0013 #define ICE_AQC_CAPS_VSI 0x0017 @@ -462,7 +463,7 @@ struct ice_aqc_sw_rules { }; /* Add/Update/Get/Remove lookup Rx/Tx command/response entry - * This structures describes the lookup rules and associated actions. "index" + * This structures describes the lookup rules and associated actions. "index" * is returned as part of a response to a successful Add command, and can be * used to identify the rule for Update/Get/Remove commands. */ @@ -1065,10 +1066,10 @@ struct ice_aqc_nvm { #define ICE_AQC_NVM_LAST_CMD BIT(0) #define ICE_AQC_NVM_PCIR_REQ BIT(0) /* Used by NVM Update reply */ #define ICE_AQC_NVM_PRESERVATION_S 1 -#define ICE_AQC_NVM_PRESERVATION_M (3 << CSR_AQ_NVM_PRESERVATION_S) -#define ICE_AQC_NVM_NO_PRESERVATION (0 << CSR_AQ_NVM_PRESERVATION_S) +#define ICE_AQC_NVM_PRESERVATION_M (3 << ICE_AQC_NVM_PRESERVATION_S) +#define ICE_AQC_NVM_NO_PRESERVATION (0 << ICE_AQC_NVM_PRESERVATION_S) #define ICE_AQC_NVM_PRESERVE_ALL BIT(1) -#define ICE_AQC_NVM_PRESERVE_SELECTED (3 << CSR_AQ_NVM_PRESERVATION_S) +#define ICE_AQC_NVM_PRESERVE_SELECTED (3 << ICE_AQC_NVM_PRESERVATION_S) #define ICE_AQC_NVM_FLASH_ONLY BIT(7) __le16 module_typeid; __le16 length; @@ -1110,7 +1111,7 @@ struct ice_aqc_get_set_rss_keys { }; /* Get/Set RSS LUT (indirect 0x0B05/0x0B03) */ -struct ice_aqc_get_set_rss_lut { +struct ice_aqc_get_set_rss_lut { #define ICE_AQC_GSET_RSS_LUT_VSI_VALID BIT(15) #define ICE_AQC_GSET_RSS_LUT_VSI_ID_S 0 #define ICE_AQC_GSET_RSS_LUT_VSI_ID_M (0x1FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S) @@ -1314,10 +1315,10 @@ struct ice_aqc_get_clear_fw_log { * @params: command-specific parameters * * Descriptor format for commands the driver posts on the Admin Transmit Queue - * (ATQ). The firmware writes back onto the command descriptor and returns - * the result of the command. Asynchronous events that are not an immediate + * (ATQ). The firmware writes back onto the command descriptor and returns + * the result of the command. Asynchronous events that are not an immediate * result of the command are written to the Admin Receive Queue (ARQ) using - * the same descriptor format. Descriptors are in little-endian notation with + * the same descriptor format. Descriptors are in little-endian notation with * 32-bit words. */ struct ice_aq_desc { @@ -1379,10 +1380,10 @@ struct ice_aq_desc { /* error codes */ enum ice_aq_err { - ICE_AQ_RC_OK = 0, /* success */ + ICE_AQ_RC_OK = 0, /* Success */ ICE_AQ_RC_ENOMEM = 9, /* Out of memory */ ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */ - ICE_AQ_RC_EEXIST = 13, /* object already exists */ + ICE_AQ_RC_EEXIST = 13, /* Object already exists */ ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */ }; diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index 554fd707a6d6..4c1d35da940d 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -405,9 +405,7 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) INIT_LIST_HEAD(&sw->vsi_list_map_head); - ice_init_def_sw_recp(hw); - - return 0; + return ice_init_def_sw_recp(hw); } /** @@ -715,7 +713,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) hw->evb_veb = true; - /* Query the allocated resources for tx scheduler */ + /* Query the allocated resources for Tx scheduler */ status = ice_sched_query_res_alloc(hw); if (status) { ice_debug(hw, ICE_DBG_SCHED, @@ -958,7 +956,7 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) * ice_copy_rxq_ctx_to_hw * @hw: pointer to the hardware structure * @ice_rxq_ctx: pointer to the rxq context - * @rxq_index: the index of the rx queue + * @rxq_index: the index of the Rx queue * * Copies rxq context from dense structure to hw register space */ @@ -1014,7 +1012,7 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = { * ice_write_rxq_ctx * @hw: pointer to the hardware structure * @rlan_ctx: pointer to the rxq context - * @rxq_index: the index of the rx queue + * @rxq_index: the index of the Rx queue * * Converts rxq context from sparse to dense structure and then writes * it to hw register space @@ -1387,6 +1385,27 @@ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) } /** + * ice_get_guar_num_vsi - determine number of guar VSI for a PF + * @hw: pointer to the hw structure + * + * Determine the number of valid functions by going through the bitmap returned + * from parsing capabilities and use this to calculate the number of VSI per PF. + */ +static u32 ice_get_guar_num_vsi(struct ice_hw *hw) +{ + u8 funcs; + +#define ICE_CAPS_VALID_FUNCS_M 0xFF + funcs = hweight8(hw->dev_caps.common_cap.valid_functions & + ICE_CAPS_VALID_FUNCS_M); + + if (!funcs) + return 0; + + return ICE_MAX_VSI / funcs; +} + +/** * ice_parse_caps - parse function/device capabilities * @hw: pointer to the hw struct * @buf: pointer to a buffer containing function/device capability records @@ -1428,6 +1447,12 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, u16 cap = le16_to_cpu(cap_resp->cap); switch (cap) { + case ICE_AQC_CAPS_VALID_FUNCTIONS: + caps->valid_functions = number; + ice_debug(hw, ICE_DBG_INIT, + "HW caps: Valid Functions = %d\n", + caps->valid_functions); + break; case ICE_AQC_CAPS_SRIOV: caps->sr_iov_1_1 = (number == 1); ice_debug(hw, ICE_DBG_INIT, @@ -1457,10 +1482,10 @@ ice_parse_caps(struct ice_hw *hw, void *buf, u32 cap_count, "HW caps: Dev.VSI cnt = %d\n", dev_p->num_vsi_allocd_to_host); } else if (func_p) { - func_p->guaranteed_num_vsi = number; + func_p->guar_num_vsi = ice_get_guar_num_vsi(hw); ice_debug(hw, ICE_DBG_INIT, "HW caps: Func.VSI cnt = %d\n", - func_p->guaranteed_num_vsi); + number); } break; case ICE_AQC_CAPS_RSS: @@ -1688,8 +1713,7 @@ void ice_clear_pxe_mode(struct ice_hw *hw) * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned */ -static u16 -ice_get_link_speed_based_on_phy_type(u64 phy_type_low) +static u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low) { u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index 84c967294eaf..2bf5e11f559a 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c @@ -3,6 +3,26 @@ #include "ice_common.h" +#define ICE_CQ_INIT_REGS(qinfo, prefix) \ +do { \ + (qinfo)->sq.head = prefix##_ATQH; \ + (qinfo)->sq.tail = prefix##_ATQT; \ + (qinfo)->sq.len = prefix##_ATQLEN; \ + (qinfo)->sq.bah = prefix##_ATQBAH; \ + (qinfo)->sq.bal = prefix##_ATQBAL; \ + (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \ + (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \ + (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \ + (qinfo)->rq.head = prefix##_ARQH; \ + (qinfo)->rq.tail = prefix##_ARQT; \ + (qinfo)->rq.len = prefix##_ARQLEN; \ + (qinfo)->rq.bah = prefix##_ARQBAH; \ + (qinfo)->rq.bal = prefix##_ARQBAL; \ + (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \ + (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \ + (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \ +} while (0) + /** * ice_adminq_init_regs - Initialize AdminQ registers * @hw: pointer to the hardware structure @@ -13,23 +33,7 @@ static void ice_adminq_init_regs(struct ice_hw *hw) { struct ice_ctl_q_info *cq = &hw->adminq; - cq->sq.head = PF_FW_ATQH; - cq->sq.tail = PF_FW_ATQT; - cq->sq.len = PF_FW_ATQLEN; - cq->sq.bah = PF_FW_ATQBAH; - cq->sq.bal = PF_FW_ATQBAL; - cq->sq.len_mask = PF_FW_ATQLEN_ATQLEN_M; - cq->sq.len_ena_mask = PF_FW_ATQLEN_ATQENABLE_M; - cq->sq.head_mask = PF_FW_ATQH_ATQH_M; - - cq->rq.head = PF_FW_ARQH; - cq->rq.tail = PF_FW_ARQT; - cq->rq.len = PF_FW_ARQLEN; - cq->rq.bah = PF_FW_ARQBAH; - cq->rq.bal = PF_FW_ARQBAL; - cq->rq.len_mask = PF_FW_ARQLEN_ARQLEN_M; - cq->rq.len_ena_mask = PF_FW_ARQLEN_ARQENABLE_M; - cq->rq.head_mask = PF_FW_ARQH_ARQH_M; + ICE_CQ_INIT_REGS(cq, PF_FW); } /** @@ -42,24 +46,7 @@ static void ice_mailbox_init_regs(struct ice_hw *hw) { struct ice_ctl_q_info *cq = &hw->mailboxq; - /* set head and tail registers in our local struct */ - cq->sq.head = PF_MBX_ATQH; - cq->sq.tail = PF_MBX_ATQT; - cq->sq.len = PF_MBX_ATQLEN; - cq->sq.bah = PF_MBX_ATQBAH; - cq->sq.bal = PF_MBX_ATQBAL; - cq->sq.len_mask = PF_MBX_ATQLEN_ATQLEN_M; - cq->sq.len_ena_mask = PF_MBX_ATQLEN_ATQENABLE_M; - cq->sq.head_mask = PF_MBX_ATQH_ATQH_M; - - cq->rq.head = PF_MBX_ARQH; - cq->rq.tail = PF_MBX_ARQT; - cq->rq.len = PF_MBX_ARQLEN; - cq->rq.bah = PF_MBX_ARQBAH; - cq->rq.bal = PF_MBX_ARQBAL; - cq->rq.len_mask = PF_MBX_ARQLEN_ARQLEN_M; - cq->rq.len_ena_mask = PF_MBX_ARQLEN_ARQENABLE_M; - cq->rq.head_mask = PF_MBX_ARQH_ARQH_M; + ICE_CQ_INIT_REGS(cq, PF_MBX); } /** @@ -131,37 +118,20 @@ ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) } /** - * ice_free_ctrlq_sq_ring - Free Control Transmit Queue (ATQ) rings + * ice_free_cq_ring - Free control queue ring * @hw: pointer to the hardware structure - * @cq: pointer to the specific Control queue + * @ring: pointer to the specific control queue ring * - * This assumes the posted send buffers have already been cleaned + * This assumes the posted buffers have already been cleaned * and de-allocated */ -static void ice_free_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) +static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring) { - dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size, - cq->sq.desc_buf.va, cq->sq.desc_buf.pa); - cq->sq.desc_buf.va = NULL; - cq->sq.desc_buf.pa = 0; - cq->sq.desc_buf.size = 0; -} - -/** - * ice_free_ctrlq_rq_ring - Free Control Receive Queue (ARQ) rings - * @hw: pointer to the hardware structure - * @cq: pointer to the specific Control queue - * - * This assumes the posted receive buffers have already been cleaned - * and de-allocated - */ -static void ice_free_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq) -{ - dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.desc_buf.size, - cq->rq.desc_buf.va, cq->rq.desc_buf.pa); - cq->rq.desc_buf.va = NULL; - cq->rq.desc_buf.pa = 0; - cq->rq.desc_buf.size = 0; + dmam_free_coherent(ice_hw_to_dev(hw), ring->desc_buf.size, + ring->desc_buf.va, ring->desc_buf.pa); + ring->desc_buf.va = NULL; + ring->desc_buf.pa = 0; + ring->desc_buf.size = 0; } /** @@ -280,54 +250,23 @@ unwind_alloc_sq_bufs: return ICE_ERR_NO_MEMORY; } -/** - * ice_free_rq_bufs - Free ARQ buffer info elements - * @hw: pointer to the hardware structure - * @cq: pointer to the specific Control queue - */ -static void ice_free_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) -{ - int i; - - /* free descriptors */ - for (i = 0; i < cq->num_rq_entries; i++) { - dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size, - cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa); - cq->rq.r.rq_bi[i].va = NULL; - cq->rq.r.rq_bi[i].pa = 0; - cq->rq.r.rq_bi[i].size = 0; - } - - /* free the dma header */ - devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head); -} - -/** - * ice_free_sq_bufs - Free ATQ buffer info elements - * @hw: pointer to the hardware structure - * @cq: pointer to the specific Control queue - */ -static void ice_free_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) +static enum ice_status +ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries) { - int i; + /* Clear Head and Tail */ + wr32(hw, ring->head, 0); + wr32(hw, ring->tail, 0); - /* only unmap if the address is non-NULL */ - for (i = 0; i < cq->num_sq_entries; i++) - if (cq->sq.r.sq_bi[i].pa) { - dmam_free_coherent(ice_hw_to_dev(hw), - cq->sq.r.sq_bi[i].size, - cq->sq.r.sq_bi[i].va, - cq->sq.r.sq_bi[i].pa); - cq->sq.r.sq_bi[i].va = NULL; - cq->sq.r.sq_bi[i].pa = 0; - cq->sq.r.sq_bi[i].size = 0; - } + /* set starting point */ + wr32(hw, ring->len, (num_entries | ring->len_ena_mask)); + wr32(hw, ring->bal, lower_32_bits(ring->desc_buf.pa)); + wr32(hw, ring->bah, upper_32_bits(ring->desc_buf.pa)); - /* free the buffer info list */ - devm_kfree(ice_hw_to_dev(hw), cq->sq.cmd_buf); + /* Check one register to verify that config was applied */ + if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa)) + return ICE_ERR_AQ_ERROR; - /* free the dma header */ - devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head); + return 0; } /** @@ -340,23 +279,7 @@ static void ice_free_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq) static enum ice_status ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) { - u32 reg = 0; - - /* Clear Head and Tail */ - wr32(hw, cq->sq.head, 0); - wr32(hw, cq->sq.tail, 0); - - /* set starting point */ - wr32(hw, cq->sq.len, (cq->num_sq_entries | cq->sq.len_ena_mask)); - wr32(hw, cq->sq.bal, lower_32_bits(cq->sq.desc_buf.pa)); - wr32(hw, cq->sq.bah, upper_32_bits(cq->sq.desc_buf.pa)); - - /* Check one register to verify that config was applied */ - reg = rd32(hw, cq->sq.bal); - if (reg != lower_32_bits(cq->sq.desc_buf.pa)) - return ICE_ERR_AQ_ERROR; - - return 0; + return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries); } /** @@ -369,25 +292,15 @@ ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) static enum ice_status ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq) { - u32 reg = 0; - - /* Clear Head and Tail */ - wr32(hw, cq->rq.head, 0); - wr32(hw, cq->rq.tail, 0); + enum ice_status status; - /* set starting point */ - wr32(hw, cq->rq.len, (cq->num_rq_entries | cq->rq.len_ena_mask)); - wr32(hw, cq->rq.bal, lower_32_bits(cq->rq.desc_buf.pa)); - wr32(hw, cq->rq.bah, upper_32_bits(cq->rq.desc_buf.pa)); + status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries); + if (status) + return status; /* Update tail in the HW to post pre-allocated buffers */ wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1)); - /* Check one register to verify that config was applied */ - reg = rd32(hw, cq->rq.bal); - if (reg != lower_32_bits(cq->rq.desc_buf.pa)) - return ICE_ERR_AQ_ERROR; - return 0; } @@ -444,7 +357,7 @@ static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) goto init_ctrlq_exit; init_ctrlq_free_rings: - ice_free_ctrlq_sq_ring(hw, cq); + ice_free_cq_ring(hw, &cq->sq); init_ctrlq_exit: return ret_code; @@ -503,12 +416,33 @@ static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) goto init_ctrlq_exit; init_ctrlq_free_rings: - ice_free_ctrlq_rq_ring(hw, cq); + ice_free_cq_ring(hw, &cq->rq); init_ctrlq_exit: return ret_code; } +#define ICE_FREE_CQ_BUFS(hw, qi, ring) \ +do { \ + int i; \ + /* free descriptors */ \ + for (i = 0; i < (qi)->num_##ring##_entries; i++) \ + if ((qi)->ring.r.ring##_bi[i].pa) { \ + dmam_free_coherent(ice_hw_to_dev(hw), \ + (qi)->ring.r.ring##_bi[i].size,\ + (qi)->ring.r.ring##_bi[i].va,\ + (qi)->ring.r.ring##_bi[i].pa);\ + (qi)->ring.r.ring##_bi[i].va = NULL; \ + (qi)->ring.r.ring##_bi[i].pa = 0; \ + (qi)->ring.r.ring##_bi[i].size = 0; \ + } \ + /* free the buffer info list */ \ + if ((qi)->ring.cmd_buf) \ + devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \ + /* free dma head */ \ + devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \ +} while (0) + /** * ice_shutdown_sq - shutdown the Control ATQ * @hw: pointer to the hardware structure @@ -538,8 +472,8 @@ ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq) cq->sq.count = 0; /* to indicate uninitialized queue */ /* free ring buffers and the ring itself */ - ice_free_sq_bufs(hw, cq); - ice_free_ctrlq_sq_ring(hw, cq); + ICE_FREE_CQ_BUFS(hw, cq, sq); + ice_free_cq_ring(hw, &cq->sq); shutdown_sq_out: mutex_unlock(&cq->sq_lock); @@ -606,8 +540,8 @@ ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq) cq->rq.count = 0; /* free ring buffers and the ring itself */ - ice_free_rq_bufs(hw, cq); - ice_free_ctrlq_rq_ring(hw, cq); + ICE_FREE_CQ_BUFS(hw, cq, rq); + ice_free_cq_ring(hw, &cq->rq); shutdown_rq_out: mutex_unlock(&cq->rq_lock); @@ -657,7 +591,6 @@ init_ctrlq_free_rq: * - cq->num_rq_entries * - cq->rq_buf_size * - cq->sq_buf_size - * */ static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) { @@ -841,7 +774,7 @@ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq) * @buf_size: size of buffer for indirect commands (or 0 for direct commands) * @cd: pointer to command details structure * - * This is the main send command routine for the ATQ. It runs the q, + * This is the main send command routine for the ATQ. It runs the queue, * cleans the queue, etc. */ enum ice_status @@ -1035,7 +968,7 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode) * @pending: number of events that could be left to process * * This function cleans one Admin Receive Queue element and returns - * the contents through e. It can also return how many events are + * the contents through e. It can also return how many events are * left to process through 'pending'. */ enum ice_status diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c index 648acdb4c644..3b6e387f5440 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c @@ -62,7 +62,7 @@ static const struct ice_stats ice_gstrings_vsi_stats[] = { * The PF_STATs are appended to the netdev stats only when ethtool -S * is queried on the base PF netdev. */ -static struct ice_stats ice_gstrings_pf_stats[] = { +static const struct ice_stats ice_gstrings_pf_stats[] = { ICE_PF_STAT("tx_bytes", stats.eth.tx_bytes), ICE_PF_STAT("rx_bytes", stats.eth.rx_bytes), ICE_PF_STAT("tx_unicast", stats.eth.tx_unicast), @@ -104,7 +104,7 @@ static struct ice_stats ice_gstrings_pf_stats[] = { ICE_PF_STAT("mac_remote_faults", stats.mac_remote_faults), }; -static u32 ice_regs_dump_list[] = { +static const u32 ice_regs_dump_list[] = { PFGEN_STATE, PRTGEN_STATUS, QRX_CTRL(0), @@ -260,10 +260,10 @@ static int ice_get_sset_count(struct net_device *netdev, int sset) * a private ethtool flag). This is due to the nature of the * ethtool stats API. * - * User space programs such as ethtool must make 3 separate + * Userspace programs such as ethtool must make 3 separate * ioctl requests, one for size, one for the strings, and * finally one for the stats. Since these cross into - * user space, changes to the number or size could result in + * userspace, changes to the number or size could result in * undefined memory access or incorrect string<->value * correlations for statistics. * @@ -1392,17 +1392,17 @@ static int ice_nway_reset(struct net_device *netdev) { /* restart autonegotiation */ struct ice_netdev_priv *np = netdev_priv(netdev); - struct ice_link_status *hw_link_info; struct ice_vsi *vsi = np->vsi; struct ice_port_info *pi; enum ice_status status; - bool link_up; pi = vsi->port_info; - hw_link_info = &pi->phy.link_info; - link_up = hw_link_info->link_info & ICE_AQ_LINK_UP; + /* If VSI state is up, then restart autoneg with link up */ + if (!test_bit(__ICE_DOWN, vsi->back->state)) + status = ice_aq_set_link_restart_an(pi, true, NULL); + else + status = ice_aq_set_link_restart_an(pi, false, NULL); - status = ice_aq_set_link_restart_an(pi, link_up, NULL); if (status) { netdev_info(netdev, "link restart failed, err %d aq_err %d\n", status, pi->hw->adminq.sq_last_status); @@ -1441,7 +1441,7 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) /** * ice_set_pauseparam - Set Flow Control parameter * @netdev: network interface device structure - * @pause: return tx/rx flow control status + * @pause: return Tx/Rx flow control status */ static int ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) @@ -1543,7 +1543,7 @@ static u32 ice_get_rxfh_key_size(struct net_device __always_unused *netdev) } /** - * ice_get_rxfh_indir_size - get the rx flow hash indirection table size + * ice_get_rxfh_indir_size - get the Rx flow hash indirection table size * @netdev: network interface device structure * * Returns the table size. @@ -1556,7 +1556,7 @@ static u32 ice_get_rxfh_indir_size(struct net_device *netdev) } /** - * ice_get_rxfh - get the rx flow hash indirection table + * ice_get_rxfh - get the Rx flow hash indirection table * @netdev: network interface device structure * @indir: indirection table * @key: hash key @@ -1603,7 +1603,7 @@ out: } /** - * ice_set_rxfh - set the rx flow hash indirection table + * ice_set_rxfh - set the Rx flow hash indirection table * @netdev: network interface device structure * @indir: indirection table * @key: hash key diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h index 596b9fb1c510..5507928c8fbe 100644 --- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h +++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h @@ -7,6 +7,9 @@ #define _ICE_HW_AUTOGEN_H_ #define QTX_COMM_DBELL(_DBQM) (0x002C0000 + ((_DBQM) * 4)) +#define QTX_COMM_HEAD(_DBQM) (0x000E0000 + ((_DBQM) * 4)) +#define QTX_COMM_HEAD_HEAD_S 0 +#define QTX_COMM_HEAD_HEAD_M ICE_M(0x1FFF, 0) #define PF_FW_ARQBAH 0x00080180 #define PF_FW_ARQBAL 0x00080080 #define PF_FW_ARQH 0x00080380 diff --git a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h index 7d2a66739e3f..bb51dd7defb5 100644 --- a/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h +++ b/drivers/net/ethernet/intel/ice/ice_lan_tx_rx.h @@ -6,11 +6,11 @@ union ice_32byte_rx_desc { struct { - __le64 pkt_addr; /* Packet buffer address */ - __le64 hdr_addr; /* Header buffer address */ + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ /* bit 0 of hdr_addr is DD bit */ - __le64 rsvd1; - __le64 rsvd2; + __le64 rsvd1; + __le64 rsvd2; } read; struct { struct { @@ -105,11 +105,11 @@ enum ice_rx_ptype_payload_layer { */ union ice_32b_rx_flex_desc { struct { - __le64 pkt_addr; /* Packet buffer address */ - __le64 hdr_addr; /* Header buffer address */ - /* bit 0 of hdr_addr is DD bit */ - __le64 rsvd1; - __le64 rsvd2; + __le64 pkt_addr; /* Packet buffer address */ + __le64 hdr_addr; /* Header buffer address */ + /* bit 0 of hdr_addr is DD bit */ + __le64 rsvd1; + __le64 rsvd2; } read; struct { /* Qword 0 */ @@ -256,6 +256,9 @@ enum ice_rx_flex_desc_status_error_0_bits { #define ICE_RXQ_CTX_SIZE_DWORDS 8 #define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32)) +#define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS 22 +#define ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS 5 +#define GLTCLAN_CQ_CNTX(i, CQ) (GLTCLAN_CQ_CNTX0(CQ) + ((i) * 0x0800)) /* RLAN Rx queue context data * @@ -274,18 +277,18 @@ struct ice_rlan_ctx { u16 dbuf; /* bigger than needed, see above for reason */ #define ICE_RLAN_CTX_HBUF_S 6 u16 hbuf; /* bigger than needed, see above for reason */ - u8 dtype; - u8 dsize; - u8 crcstrip; - u8 l2tsel; - u8 hsplit_0; - u8 hsplit_1; - u8 showiv; + u8 dtype; + u8 dsize; + u8 crcstrip; + u8 l2tsel; + u8 hsplit_0; + u8 hsplit_1; + u8 showiv; u32 rxmax; /* bigger than needed, see above for reason */ - u8 tphrdesc_ena; - u8 tphwdesc_ena; - u8 tphdata_ena; - u8 tphhead_ena; + u8 tphrdesc_ena; + u8 tphwdesc_ena; + u8 tphdata_ena; + u8 tphhead_ena; u16 lrxqthresh; /* bigger than needed, see above for reason */ }; @@ -413,35 +416,35 @@ enum ice_tx_ctx_desc_cmd_bits { struct ice_tlan_ctx { #define ICE_TLAN_CTX_BASE_S 7 u64 base; /* base is defined in 128-byte units */ - u8 port_num; + u8 port_num; u16 cgd_num; /* bigger than needed, see above for reason */ - u8 pf_num; + u8 pf_num; u16 vmvf_num; - u8 vmvf_type; + u8 vmvf_type; #define ICE_TLAN_CTX_VMVF_TYPE_VF 0 #define ICE_TLAN_CTX_VMVF_TYPE_VMQ 1 #define ICE_TLAN_CTX_VMVF_TYPE_PF 2 u16 src_vsi; - u8 tsyn_ena; - u8 alt_vlan; + u8 tsyn_ena; + u8 alt_vlan; u16 cpuid; /* bigger than needed, see above for reason */ - u8 wb_mode; - u8 tphrd_desc; - u8 tphrd; - u8 tphwr_desc; + u8 wb_mode; + u8 tphrd_desc; + u8 tphrd; + u8 tphwr_desc; u16 cmpq_id; u16 qnum_in_func; - u8 itr_notification_mode; - u8 adjust_prof_id; + u8 itr_notification_mode; + u8 adjust_prof_id; u32 qlen; /* bigger than needed, see above for reason */ - u8 quanta_prof_idx; - u8 tso_ena; + u8 quanta_prof_idx; + u8 tso_ena; u16 tso_qnum; - u8 legacy_int; - u8 drop_ena; - u8 cache_prof_idx; - u8 pkt_shaper_prof_idx; - u8 int_q_state; /* width not needed - internal do not write */ + u8 legacy_int; + u8 drop_ena; + u8 cache_prof_idx; + u8 pkt_shaper_prof_idx; + u8 int_q_state; /* width not needed - internal do not write */ }; /* macro to make the table lines short */ diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 1041fa2a7767..29b1dcfd4331 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -20,7 +20,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring) u16 pf_q; int err; - /* what is RX queue number in global space of 2K Rx queues */ + /* what is Rx queue number in global space of 2K Rx queues */ pf_q = vsi->rxq_map[ring->q_index]; /* clear the context structure first */ @@ -174,15 +174,15 @@ static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena) { int i; - for (i = 0; i < ICE_Q_WAIT_RETRY_LIMIT; i++) { + for (i = 0; i < ICE_Q_WAIT_MAX_RETRY; i++) { u32 rx_reg = rd32(&pf->hw, QRX_CTRL(pf_q)); if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) break; - usleep_range(10, 20); + usleep_range(20, 40); } - if (i >= ICE_Q_WAIT_RETRY_LIMIT) + if (i >= ICE_Q_WAIT_MAX_RETRY) return -ETIMEDOUT; return 0; @@ -774,11 +774,13 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt) */ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) { - u16 offset = 0, qmap = 0, numq_tc; - u16 pow = 0, max_rss = 0, qcount; + u16 offset = 0, qmap = 0, tx_count = 0; u16 qcount_tx = vsi->alloc_txq; u16 qcount_rx = vsi->alloc_rxq; + u16 tx_numq_tc, rx_numq_tc; + u16 pow = 0, max_rss = 0; bool ena_tc0 = false; + u8 netdev_tc = 0; int i; /* at least TC0 should be enabled by default */ @@ -794,7 +796,12 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) vsi->tc_cfg.ena_tc |= 1; } - numq_tc = qcount_rx / vsi->tc_cfg.numtc; + rx_numq_tc = qcount_rx / vsi->tc_cfg.numtc; + if (!rx_numq_tc) + rx_numq_tc = 1; + tx_numq_tc = qcount_tx / vsi->tc_cfg.numtc; + if (!tx_numq_tc) + tx_numq_tc = 1; /* TC mapping is a function of the number of Rx queues assigned to the * VSI for each traffic class and the offset of these queues. @@ -808,7 +815,8 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) * Setup number and offset of Rx queues for all TCs for the VSI */ - qcount = numq_tc; + qcount_rx = rx_numq_tc; + /* qcount will change if RSS is enabled */ if (test_bit(ICE_FLAG_RSS_ENA, vsi->back->flags)) { if (vsi->type == ICE_VSI_PF || vsi->type == ICE_VSI_VF) { @@ -816,37 +824,41 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) max_rss = ICE_MAX_LG_RSS_QS; else max_rss = ICE_MAX_SMALL_RSS_QS; - qcount = min_t(int, numq_tc, max_rss); - qcount = min_t(int, qcount, vsi->rss_size); + qcount_rx = min_t(int, rx_numq_tc, max_rss); + qcount_rx = min_t(int, qcount_rx, vsi->rss_size); } } /* find the (rounded up) power-of-2 of qcount */ - pow = order_base_2(qcount); + pow = order_base_2(qcount_rx); for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { if (!(vsi->tc_cfg.ena_tc & BIT(i))) { /* TC is not enabled */ vsi->tc_cfg.tc_info[i].qoffset = 0; - vsi->tc_cfg.tc_info[i].qcount = 1; + vsi->tc_cfg.tc_info[i].qcount_rx = 1; + vsi->tc_cfg.tc_info[i].qcount_tx = 1; + vsi->tc_cfg.tc_info[i].netdev_tc = 0; ctxt->info.tc_mapping[i] = 0; continue; } /* TC is enabled */ vsi->tc_cfg.tc_info[i].qoffset = offset; - vsi->tc_cfg.tc_info[i].qcount = qcount; + vsi->tc_cfg.tc_info[i].qcount_rx = qcount_rx; + vsi->tc_cfg.tc_info[i].qcount_tx = tx_numq_tc; + vsi->tc_cfg.tc_info[i].netdev_tc = netdev_tc++; qmap = ((offset << ICE_AQ_VSI_TC_Q_OFFSET_S) & ICE_AQ_VSI_TC_Q_OFFSET_M) | ((pow << ICE_AQ_VSI_TC_Q_NUM_S) & ICE_AQ_VSI_TC_Q_NUM_M); - offset += qcount; + offset += qcount_rx; + tx_count += tx_numq_tc; ctxt->info.tc_mapping[i] = cpu_to_le16(qmap); } - - vsi->num_txq = qcount_tx; vsi->num_rxq = offset; + vsi->num_txq = tx_count; if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) { dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n"); @@ -1000,7 +1012,7 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi) * @vsi: the VSI being configured * @v_idx: index of the vector in the VSI struct * - * We allocate one q_vector. If allocation fails we return -ENOMEM. + * We allocate one q_vector. If allocation fails we return -ENOMEM. */ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx) { @@ -1039,7 +1051,7 @@ out: * ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors * @vsi: the VSI being configured * - * We allocate one q_vector per queue interrupt. If allocation fails we + * We allocate one q_vector per queue interrupt. If allocation fails we * return -ENOMEM. */ static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) @@ -1188,7 +1200,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) struct ice_pf *pf = vsi->back; int i; - /* Allocate tx_rings */ + /* Allocate Tx rings */ for (i = 0; i < vsi->alloc_txq; i++) { struct ice_ring *ring; @@ -1207,7 +1219,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) vsi->tx_rings[i] = ring; } - /* Allocate rx_rings */ + /* Allocate Rx rings */ for (i = 0; i < vsi->alloc_rxq; i++) { struct ice_ring *ring; @@ -1611,55 +1623,62 @@ int ice_vsi_cfg_txqs(struct ice_vsi *vsi) struct ice_aqc_add_tx_qgrp *qg_buf; struct ice_aqc_add_txqs_perq *txq; struct ice_pf *pf = vsi->back; + u8 num_q_grps, q_idx = 0; enum ice_status status; u16 buf_len, i, pf_q; int err = 0, tc = 0; - u8 num_q_grps; buf_len = sizeof(struct ice_aqc_add_tx_qgrp); qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL); if (!qg_buf) return -ENOMEM; - if (vsi->num_txq > ICE_MAX_TXQ_PER_TXQG) { - err = -EINVAL; - goto err_cfg_txqs; - } qg_buf->num_txqs = 1; num_q_grps = 1; - /* set up and configure the Tx queues */ - ice_for_each_txq(vsi, i) { - struct ice_tlan_ctx tlan_ctx = { 0 }; + /* set up and configure the Tx queues for each enabled TC */ + for (tc = 0; tc < ICE_MAX_TRAFFIC_CLASS; tc++) { + if (!(vsi->tc_cfg.ena_tc & BIT(tc))) + break; - pf_q = vsi->txq_map[i]; - ice_setup_tx_ctx(vsi->tx_rings[i], &tlan_ctx, pf_q); - /* copy context contents into the qg_buf */ - qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); - ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, - ice_tlan_ctx_info); + for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) { + struct ice_tlan_ctx tlan_ctx = { 0 }; + + pf_q = vsi->txq_map[q_idx]; + ice_setup_tx_ctx(vsi->tx_rings[q_idx], &tlan_ctx, + pf_q); + /* copy context contents into the qg_buf */ + qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q); + ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx, + ice_tlan_ctx_info); + + /* init queue specific tail reg. It is referred as + * transmit comm scheduler queue doorbell. + */ + vsi->tx_rings[q_idx]->tail = + pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); + status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, + num_q_grps, qg_buf, buf_len, + NULL); + if (status) { + dev_err(&vsi->back->pdev->dev, + "Failed to set LAN Tx queue context, error: %d\n", + status); + err = -ENODEV; + goto err_cfg_txqs; + } - /* init queue specific tail reg. It is referred as transmit - * comm scheduler queue doorbell. - */ - vsi->tx_rings[i]->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q); - status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, - num_q_grps, qg_buf, buf_len, NULL); - if (status) { - dev_err(&vsi->back->pdev->dev, - "Failed to set LAN Tx queue context, error: %d\n", - status); - err = -ENODEV; - goto err_cfg_txqs; - } + /* Add Tx Queue TEID into the VSI Tx ring from the + * response. This will complete configuring and + * enabling the queue. + */ + txq = &qg_buf->txqs[0]; + if (pf_q == le16_to_cpu(txq->txq_id)) + vsi->tx_rings[q_idx]->txq_teid = + le32_to_cpu(txq->q_teid); - /* Add Tx Queue TEID into the VSI Tx ring from the response - * This will complete configuring and enabling the queue. - */ - txq = &qg_buf->txqs[0]; - if (pf_q == le16_to_cpu(txq->txq_id)) - vsi->tx_rings[i]->txq_teid = - le32_to_cpu(txq->q_teid); + q_idx++; + } } err_cfg_txqs: devm_kfree(&pf->pdev->dev, qg_buf); @@ -1908,7 +1927,8 @@ int ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, ice_for_each_txq(vsi, i) { u16 v_idx; - if (!vsi->tx_rings || !vsi->tx_rings[i]) { + if (!vsi->tx_rings || !vsi->tx_rings[i] || + !vsi->tx_rings[i]->q_vector) { err = -EINVAL; goto err_out; } @@ -2056,6 +2076,9 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, /* set RSS capabilities */ ice_vsi_set_rss_params(vsi); + /* set tc configuration */ + ice_vsi_set_tc_cfg(vsi); + /* create the VSI */ ret = ice_vsi_init(vsi); if (ret) @@ -2113,17 +2136,13 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, pf->q_left_rx -= vsi->alloc_rxq; break; default: - /* if VSI type is not recognized, clean up the resources and - * exit - */ + /* clean up the resources and exit */ goto unroll_vsi_init; } - ice_vsi_set_tc_cfg(vsi); - /* configure VSI nodes based on number of queues and TC's */ for (i = 0; i < vsi->tc_cfg.numtc; i++) - max_txqs[i] = vsi->num_txq; + max_txqs[i] = pf->num_lan_tx; ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, max_txqs); @@ -2314,7 +2333,7 @@ static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id) int start = res->search_hint; int end = start; - if ((start + needed) > res->num_entries) + if ((start + needed) > res->num_entries) return -ENOMEM; id |= ICE_RES_VALID_BIT; @@ -2491,6 +2510,7 @@ int ice_vsi_release(struct ice_vsi *vsi) } ice_remove_vsi_fltr(&pf->hw, vsi->idx); + ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); ice_vsi_delete(vsi); ice_vsi_free_q_vectors(vsi); ice_vsi_clear_rings(vsi); @@ -2518,11 +2538,14 @@ int ice_vsi_release(struct ice_vsi *vsi) int ice_vsi_rebuild(struct ice_vsi *vsi) { u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; + struct ice_pf *pf; int ret, i; if (!vsi) return -EINVAL; + pf = vsi->back; + ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); ice_vsi_free_q_vectors(vsi); ice_free_res(vsi->back->sw_irq_tracker, vsi->sw_base_vector, vsi->idx); ice_free_res(vsi->back->hw_irq_tracker, vsi->hw_base_vector, vsi->idx); @@ -2532,6 +2555,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) ice_vsi_free_arrays(vsi, false); ice_dev_onetime_setup(&vsi->back->hw); ice_vsi_set_num_qs(vsi); + ice_vsi_set_tc_cfg(vsi); /* Initialize VSI struct elements and create VSI in FW */ ret = ice_vsi_init(vsi); @@ -2578,11 +2602,9 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) break; } - ice_vsi_set_tc_cfg(vsi); - /* configure VSI nodes based on number of queues and TC's */ for (i = 0; i < vsi->tc_cfg.numtc; i++) - max_txqs[i] = vsi->num_txq; + max_txqs[i] = pf->num_lan_tx; ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, max_txqs); diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 333312a1d595..8725569d11f0 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -349,6 +349,9 @@ ice_prepare_for_reset(struct ice_pf *pf) /* disable the VSIs and their queues that are not already DOWN */ ice_pf_dis_all_vsi(pf); + if (hw->port_info) + ice_sched_clear_port(hw->port_info); + ice_shutdown_all_ctrlq(hw); set_bit(__ICE_PREPARED_FOR_RESET, pf->state); @@ -405,7 +408,7 @@ static void ice_reset_subtask(struct ice_pf *pf) /* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an * OICR interrupt. The OICR handler (ice_misc_intr) determines what type * of reset is pending and sets bits in pf->state indicating the reset - * type and __ICE_RESET_OICR_RECV. So, if the latter bit is set + * type and __ICE_RESET_OICR_RECV. So, if the latter bit is set * prepare for pending reset if not already (for PF software-initiated * global resets the software should already be prepared for it as * indicated by __ICE_PREPARED_FOR_RESET; for global resets initiated @@ -1379,7 +1382,7 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf) * @pf: board private structure * * This sets up the handler for MSIX 0, which is used to manage the - * non-queue interrupts, e.g. AdminQ and errors. This is not used + * non-queue interrupts, e.g. AdminQ and errors. This is not used * when in MSI or Legacy interrupt mode. */ static int ice_req_irq_msix_misc(struct ice_pf *pf) @@ -1783,7 +1786,7 @@ static void ice_determine_q_usage(struct ice_pf *pf) pf->num_lan_tx = min_t(int, q_left_tx, num_online_cpus()); - /* only 1 rx queue unless RSS is enabled */ + /* only 1 Rx queue unless RSS is enabled */ if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) pf->num_lan_rx = 1; else @@ -2091,8 +2094,7 @@ static int ice_probe(struct pci_dev *pdev, ice_determine_q_usage(pf); - pf->num_alloc_vsi = min_t(u16, ICE_MAX_VSI_ALLOC, - hw->func_caps.guaranteed_num_vsi); + pf->num_alloc_vsi = hw->func_caps.guar_num_vsi; if (!pf->num_alloc_vsi) { err = -EIO; goto err_init_pf_unroll; @@ -2544,7 +2546,6 @@ static int ice_vsi_cfg(struct ice_vsi *vsi) if (err) return err; } - err = ice_vsi_cfg_txqs(vsi); if (!err) err = ice_vsi_cfg_rxqs(vsi); @@ -2563,8 +2564,12 @@ static void ice_napi_enable_all(struct ice_vsi *vsi) if (!vsi->netdev) return; - for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) - napi_enable(&vsi->q_vectors[q_idx]->napi); + for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { + struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; + + if (q_vector->rx.ring || q_vector->tx.ring) + napi_enable(&q_vector->napi); + } } /** @@ -2931,8 +2936,12 @@ static void ice_napi_disable_all(struct ice_vsi *vsi) if (!vsi->netdev) return; - for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) - napi_disable(&vsi->q_vectors[q_idx]->napi); + for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) { + struct ice_q_vector *q_vector = vsi->q_vectors[q_idx]; + + if (q_vector->rx.ring || q_vector->tx.ring) + napi_disable(&q_vector->napi); + } } /** @@ -3138,8 +3147,9 @@ static void ice_vsi_release_all(struct ice_pf *pf) /** * ice_dis_vsi - pause a VSI * @vsi: the VSI being paused + * @locked: is the rtnl_lock already held */ -static void ice_dis_vsi(struct ice_vsi *vsi) +static void ice_dis_vsi(struct ice_vsi *vsi, bool locked) { if (test_bit(__ICE_DOWN, vsi->state)) return; @@ -3148,9 +3158,13 @@ static void ice_dis_vsi(struct ice_vsi *vsi) if (vsi->type == ICE_VSI_PF && vsi->netdev) { if (netif_running(vsi->netdev)) { - rtnl_lock(); - vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); - rtnl_unlock(); + if (!locked) { + rtnl_lock(); + vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); + rtnl_unlock(); + } else { + vsi->netdev->netdev_ops->ndo_stop(vsi->netdev); + } } else { ice_vsi_close(vsi); } @@ -3189,7 +3203,7 @@ static void ice_pf_dis_all_vsi(struct ice_pf *pf) ice_for_each_vsi(pf, v) if (pf->vsi[v]) - ice_dis_vsi(pf->vsi[v]); + ice_dis_vsi(pf->vsi[v], false); } /** @@ -3618,6 +3632,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) * @dev: the netdev being configured * @nlh: RTNL message * @flags: bridge setlink flags + * @extack: netlink extended ack * * Sets the bridge mode (VEB/VEPA) of the switch to which the netdev (VSI) is * hooked up to. Iterates through the PF VSI list and sets the loopback mode (if @@ -3626,7 +3641,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode) */ static int ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, - u16 __always_unused flags) + u16 __always_unused flags, struct netlink_ext_ack *extack) { struct ice_netdev_priv *np = netdev_priv(dev); struct ice_pf *pf = np->vsi->back; @@ -3668,7 +3683,7 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, */ status = ice_update_sw_rule_bridge_mode(hw); if (status) { - netdev_err(dev, "update SW_RULE for bridge mode failed, = %d err %d aq_err %d\n", + netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %d\n", mode, status, hw->adminq.sq_last_status); /* revert hw->evb_veb */ hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB); @@ -3691,40 +3706,36 @@ static void ice_tx_timeout(struct net_device *netdev) struct ice_ring *tx_ring = NULL; struct ice_vsi *vsi = np->vsi; struct ice_pf *pf = vsi->back; - u32 head, val = 0, i; int hung_queue = -1; + u32 i; pf->tx_timeout_count++; - /* find the stopped queue the same way the stack does */ + /* find the stopped queue the same way dev_watchdog() does */ for (i = 0; i < netdev->num_tx_queues; i++) { - struct netdev_queue *q; unsigned long trans_start; + struct netdev_queue *q; q = netdev_get_tx_queue(netdev, i); trans_start = q->trans_start; if (netif_xmit_stopped(q) && time_after(jiffies, - (trans_start + netdev->watchdog_timeo))) { + trans_start + netdev->watchdog_timeo)) { hung_queue = i; break; } } - if (i == netdev->num_tx_queues) { + if (i == netdev->num_tx_queues) netdev_info(netdev, "tx_timeout: no netdev hung queue found\n"); - } else { + else /* now that we have an index, find the tx_ring struct */ - for (i = 0; i < vsi->num_txq; i++) { - if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { - if (hung_queue == - vsi->tx_rings[i]->q_index) { + for (i = 0; i < vsi->num_txq; i++) + if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) + if (hung_queue == vsi->tx_rings[i]->q_index) { tx_ring = vsi->tx_rings[i]; break; } - } - } - } /* Reset recovery level if enough time has elapsed after last timeout. * Also ensure no new reset action happens before next timeout period. @@ -3736,17 +3747,20 @@ static void ice_tx_timeout(struct net_device *netdev) return; if (tx_ring) { - head = tx_ring->next_to_clean; + struct ice_hw *hw = &pf->hw; + u32 head, val = 0; + + head = (rd32(hw, QTX_COMM_HEAD(vsi->txq_map[hung_queue])) & + QTX_COMM_HEAD_HEAD_M) >> QTX_COMM_HEAD_HEAD_S; /* Read interrupt register */ if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - val = rd32(&pf->hw, + val = rd32(hw, GLINT_DYN_CTL(tx_ring->q_vector->v_idx + - tx_ring->vsi->hw_base_vector)); + tx_ring->vsi->hw_base_vector)); - netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n", + netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n", vsi->vsi_num, hung_queue, tx_ring->next_to_clean, - head, tx_ring->next_to_use, - readl(tx_ring->tail), val); + head, tx_ring->next_to_use, val); } pf->tx_timeout_last_recovery = jiffies; @@ -3780,7 +3794,7 @@ static void ice_tx_timeout(struct net_device *netdev) * @netdev: network interface device structure * * The open entry point is called when a network interface is made - * active by the system (IFF_UP). At this point all resources needed + * active by the system (IFF_UP). At this point all resources needed * for transmit and receive operations are allocated, the interrupt * handler is registered with the OS, the netdev watchdog is enabled, * and the stack is notified that the interface is ready. @@ -3813,7 +3827,7 @@ static int ice_open(struct net_device *netdev) * @netdev: network interface device structure * * The stop entry point is called when an interface is de-activated by the OS, - * and the netdevice enters the DOWN state. The hardware is still under the + * and the netdevice enters the DOWN state. The hardware is still under the * driver's control, but the netdev interface is disabled. * * Returns success only - not allowed to fail @@ -3842,14 +3856,14 @@ ice_features_check(struct sk_buff *skb, size_t len; /* No point in doing any of this if neither checksum nor GSO are - * being requested for this frame. We can rule out both by just + * being requested for this frame. We can rule out both by just * checking for CHECKSUM_PARTIAL */ if (skb->ip_summed != CHECKSUM_PARTIAL) return features; /* We cannot support GSO if the MSS is going to be less than - * 64 bytes. If it is then we need to drop support for GSO. + * 64 bytes. If it is then we need to drop support for GSO. */ if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64)) features &= ~NETIF_F_GSO_MASK; diff --git a/drivers/net/ethernet/intel/ice/ice_sched.c b/drivers/net/ethernet/intel/ice/ice_sched.c index 7cc8aa18a22b..a1681853df2e 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.c +++ b/drivers/net/ethernet/intel/ice/ice_sched.c @@ -630,7 +630,7 @@ static void ice_sched_clear_tx_topo(struct ice_port_info *pi) * * Cleanup scheduling elements from SW DB */ -static void ice_sched_clear_port(struct ice_port_info *pi) +void ice_sched_clear_port(struct ice_port_info *pi) { if (!pi || pi->port_state != ICE_SCHED_PORT_STATE_READY) return; @@ -894,8 +894,7 @@ static u8 ice_sched_get_vsi_layer(struct ice_hw *hw) * This function removes the leaf node that was created by the FW * during initialization */ -static void -ice_rm_dflt_leaf_node(struct ice_port_info *pi) +static void ice_rm_dflt_leaf_node(struct ice_port_info *pi) { struct ice_sched_node *node; @@ -923,8 +922,7 @@ ice_rm_dflt_leaf_node(struct ice_port_info *pi) * This function frees all the nodes except root and TC that were created by * the FW during initialization */ -static void -ice_sched_rm_dflt_nodes(struct ice_port_info *pi) +static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi) { struct ice_sched_node *node; @@ -1339,7 +1337,7 @@ ice_sched_rm_vsi_child_nodes(struct ice_port_info *pi, * @num_nodes: pointer to num nodes array * * This function calculates the number of supported nodes needed to add this - * VSI into tx tree including the VSI, parent and intermediate nodes in below + * VSI into Tx tree including the VSI, parent and intermediate nodes in below * layers */ static void @@ -1376,13 +1374,13 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw, } /** - * ice_sched_add_vsi_support_nodes - add VSI supported nodes into tx tree + * ice_sched_add_vsi_support_nodes - add VSI supported nodes into Tx tree * @pi: port information structure * @vsi_handle: software VSI handle * @tc_node: pointer to TC node * @num_nodes: pointer to num nodes array * - * This function adds the VSI supported nodes into tx tree including the + * This function adds the VSI supported nodes into Tx tree including the * VSI, its parent and intermediate nodes in below layers */ static enum ice_status @@ -1527,7 +1525,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_handle, } /** - * ice_sched_cfg_vsi - configure the new/exisiting VSI + * ice_sched_cfg_vsi - configure the new/existing VSI * @pi: port information structure * @vsi_handle: software VSI handle * @tc: TC number @@ -1605,3 +1603,109 @@ ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, return status; } + +/** + * ice_sched_rm_agg_vsi_entry - remove agg related VSI info entry + * @pi: port information structure + * @vsi_handle: software VSI handle + * + * This function removes single aggregator VSI info entry from + * aggregator list. + */ +static void +ice_sched_rm_agg_vsi_info(struct ice_port_info *pi, u16 vsi_handle) +{ + struct ice_sched_agg_info *agg_info; + struct ice_sched_agg_info *atmp; + + list_for_each_entry_safe(agg_info, atmp, &pi->agg_list, list_entry) { + struct ice_sched_agg_vsi_info *agg_vsi_info; + struct ice_sched_agg_vsi_info *vtmp; + + list_for_each_entry_safe(agg_vsi_info, vtmp, + &agg_info->agg_vsi_list, list_entry) + if (agg_vsi_info->vsi_handle == vsi_handle) { + list_del(&agg_vsi_info->list_entry); + devm_kfree(ice_hw_to_dev(pi->hw), + agg_vsi_info); + return; + } + } +} + +/** + * ice_sched_rm_vsi_cfg - remove the VSI and its children nodes + * @pi: port information structure + * @vsi_handle: software VSI handle + * @owner: LAN or RDMA + * + * This function removes the VSI and its LAN or RDMA children nodes from the + * scheduler tree. + */ +static enum ice_status +ice_sched_rm_vsi_cfg(struct ice_port_info *pi, u16 vsi_handle, u8 owner) +{ + enum ice_status status = ICE_ERR_PARAM; + struct ice_vsi_ctx *vsi_ctx; + u8 i, j = 0; + + if (!ice_is_vsi_valid(pi->hw, vsi_handle)) + return status; + mutex_lock(&pi->sched_lock); + vsi_ctx = ice_get_vsi_ctx(pi->hw, vsi_handle); + if (!vsi_ctx) + goto exit_sched_rm_vsi_cfg; + + for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) { + struct ice_sched_node *vsi_node, *tc_node; + + tc_node = ice_sched_get_tc_node(pi, i); + if (!tc_node) + continue; + + vsi_node = ice_sched_get_vsi_node(pi->hw, tc_node, vsi_handle); + if (!vsi_node) + continue; + + while (j < vsi_node->num_children) { + if (vsi_node->children[j]->owner == owner) { + ice_free_sched_node(pi, vsi_node->children[j]); + + /* reset the counter again since the num + * children will be updated after node removal + */ + j = 0; + } else { + j++; + } + } + /* remove the VSI if it has no children */ + if (!vsi_node->num_children) { + ice_free_sched_node(pi, vsi_node); + vsi_ctx->sched.vsi_node[i] = NULL; + + /* clean up agg related vsi info if any */ + ice_sched_rm_agg_vsi_info(pi, vsi_handle); + } + if (owner == ICE_SCHED_NODE_OWNER_LAN) + vsi_ctx->sched.max_lanq[i] = 0; + } + status = 0; + +exit_sched_rm_vsi_cfg: + mutex_unlock(&pi->sched_lock); + return status; +} + +/** + * ice_rm_vsi_lan_cfg - remove VSI and its LAN children nodes + * @pi: port information structure + * @vsi_handle: software VSI handle + * + * This function clears the VSI and its LAN children nodes from scheduler tree + * for all TCs. + */ +enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle) +{ + return ice_sched_rm_vsi_cfg(pi, vsi_handle, ICE_SCHED_NODE_OWNER_LAN); +} diff --git a/drivers/net/ethernet/intel/ice/ice_sched.h b/drivers/net/ethernet/intel/ice/ice_sched.h index 5dc9cfa04c58..da5b4c166da8 100644 --- a/drivers/net/ethernet/intel/ice/ice_sched.h +++ b/drivers/net/ethernet/intel/ice/ice_sched.h @@ -12,6 +12,7 @@ struct ice_sched_agg_vsi_info { struct list_head list_entry; DECLARE_BITMAP(tc_bitmap, ICE_MAX_TRAFFIC_CLASS); + u16 vsi_handle; }; struct ice_sched_agg_info { @@ -25,6 +26,7 @@ struct ice_sched_agg_info { /* FW AQ command calls */ enum ice_status ice_sched_init_port(struct ice_port_info *pi); enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw); +void ice_sched_clear_port(struct ice_port_info *pi); void ice_sched_cleanup_all(struct ice_hw *hw); struct ice_sched_node * ice_sched_find_node_by_teid(struct ice_sched_node *start_node, u32 teid); @@ -39,4 +41,5 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, enum ice_status ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs, u8 owner, bool enable); +enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle); #endif /* _ICE_SCHED_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_sriov.c b/drivers/net/ethernet/intel/ice/ice_sriov.c index 027eba4e13f8..533b989a23e1 100644 --- a/drivers/net/ethernet/intel/ice/ice_sriov.c +++ b/drivers/net/ethernet/intel/ice/ice_sriov.c @@ -46,7 +46,7 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval, * @link_speed: variable containing the link_speed to be converted * * Convert link speed supported by HW to link speed supported by virtchnl. - * If adv_link_support is true, then return link speed in Mbps. Else return + * If adv_link_support is true, then return link speed in Mbps. Else return * link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller * needs to cast back to an enum virtchnl_link_speed in the case where * adv_link_support is false, but when adv_link_support is true the caller can diff --git a/drivers/net/ethernet/intel/ice/ice_switch.c b/drivers/net/ethernet/intel/ice/ice_switch.c index 40c9c6558956..2e5693107fa4 100644 --- a/drivers/net/ethernet/intel/ice/ice_switch.c +++ b/drivers/net/ethernet/intel/ice/ice_switch.c @@ -92,8 +92,7 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, * Allocate memory for the entire recipe table and initialize the structures/ * entries corresponding to basic recipes. */ -enum ice_status -ice_init_def_sw_recp(struct ice_hw *hw) +enum ice_status ice_init_def_sw_recp(struct ice_hw *hw) { struct ice_sw_recipe *recps; u8 i; @@ -130,7 +129,7 @@ ice_init_def_sw_recp(struct ice_hw *hw) * * NOTE: *req_desc is both an input/output parameter. * The caller of this function first calls this function with *request_desc set - * to 0. If the response from f/w has *req_desc set to 0, all the switch + * to 0. If the response from f/w has *req_desc set to 0, all the switch * configuration information has been returned; if non-zero (meaning not all * the information was returned), the caller should call this function again * with *req_desc set to the previous value returned by f/w to get the @@ -629,25 +628,36 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw) /** * ice_fill_sw_info - Helper function to populate lb_en and lan_en * @hw: pointer to the hardware structure - * @f_info: filter info structure to fill/update + * @fi: filter info structure to fill/update * * This helper function populates the lb_en and lan_en elements of the provided * ice_fltr_info struct using the switch's type and characteristics of the * switch rule being configured. */ -static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *f_info) +static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi) { - f_info->lb_en = false; - f_info->lan_en = false; - if ((f_info->flag & ICE_FLTR_TX) && - (f_info->fltr_act == ICE_FWD_TO_VSI || - f_info->fltr_act == ICE_FWD_TO_VSI_LIST || - f_info->fltr_act == ICE_FWD_TO_Q || - f_info->fltr_act == ICE_FWD_TO_QGRP)) { - f_info->lb_en = true; - if (!(hw->evb_veb && f_info->lkup_type == ICE_SW_LKUP_MAC && - is_unicast_ether_addr(f_info->l_data.mac.mac_addr))) - f_info->lan_en = true; + fi->lb_en = false; + fi->lan_en = false; + if ((fi->flag & ICE_FLTR_TX) && + (fi->fltr_act == ICE_FWD_TO_VSI || + fi->fltr_act == ICE_FWD_TO_VSI_LIST || + fi->fltr_act == ICE_FWD_TO_Q || + fi->fltr_act == ICE_FWD_TO_QGRP)) { + fi->lb_en = true; + /* Do not set lan_en to TRUE if + * 1. The switch is a VEB AND + * 2 + * 2.1 The lookup is MAC with unicast addr for MAC, OR + * 2.2 The lookup is MAC_VLAN with unicast addr for MAC + * + * In all other cases, the LAN enable has to be set to true. + */ + if (!(hw->evb_veb && + ((fi->lkup_type == ICE_SW_LKUP_MAC && + is_unicast_ether_addr(fi->l_data.mac.mac_addr)) || + (fi->lkup_type == ICE_SW_LKUP_MAC_VLAN && + is_unicast_ether_addr(fi->l_data.mac_vlan.mac_addr))))) + fi->lan_en = true; } } @@ -817,7 +827,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, /* Create two back-to-back switch rules and submit them to the HW using * one memory buffer: * 1. Large Action - * 2. Look up tx rx + * 2. Look up Tx Rx */ lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts); rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE; @@ -861,7 +871,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, lg_act->pdata.lg_act.act[2] = cpu_to_le32(act); - /* call the fill switch rule to fill the lookup tx rx structure */ + /* call the fill switch rule to fill the lookup Tx Rx structure */ ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx, ice_aqc_opc_update_sw_rules); @@ -1158,8 +1168,8 @@ enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw) * Call AQ command to add or update previously created VSI list with new VSI. * * Helper function to do book keeping associated with adding filter information - * The algorithm to do the booking keeping is described below : - * When a VSI needs to subscribe to a given filter( MAC/VLAN/Ethtype etc.) + * The algorithm to do the book keeping is described below : + * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.) * if only one VSI has been added till now * Allocate a new VSI list and add two VSIs * to this list using switch rule command @@ -1237,6 +1247,9 @@ ice_add_update_vsi_list(struct ice_hw *hw, u16 vsi_handle = new_fltr->vsi_handle; enum ice_adminq_opc opcode; + if (!m_entry->vsi_list_info) + return ICE_ERR_CFG; + /* A rule already exists with the new VSI being added */ if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) return 0; @@ -1853,7 +1866,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; /* Update the previous switch rule to a new VSI list which - * includes current VSI thats requested + * includes current VSI that is requested */ status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); if (status) diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c index fe5bbabbb41e..49fc38094185 100644 --- a/drivers/net/ethernet/intel/ice/ice_txrx.c +++ b/drivers/net/ethernet/intel/ice/ice_txrx.c @@ -219,7 +219,7 @@ static bool ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring, /** * ice_setup_tx_ring - Allocate the Tx descriptors - * @tx_ring: the tx ring to set up + * @tx_ring: the Tx ring to set up * * Return 0 on success, negative on error */ @@ -324,7 +324,7 @@ void ice_free_rx_ring(struct ice_ring *rx_ring) /** * ice_setup_rx_ring - Allocate the Rx descriptors - * @rx_ring: the rx ring to set up + * @rx_ring: the Rx ring to set up * * Return 0 on success, negative on error */ @@ -377,7 +377,7 @@ static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val) rx_ring->next_to_alloc = val; /* Force memory writes to complete before letting h/w - * know there are new descriptors to fetch. (Only + * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). */ @@ -586,7 +586,7 @@ static bool ice_add_rx_frag(struct ice_rx_buf *rx_buf, /** * ice_reuse_rx_page - page flip buffer and store it back on the ring - * @rx_ring: rx descriptor ring to store buffers on + * @rx_ring: Rx descriptor ring to store buffers on * @old_buf: donor buffer to have page reused * * Synchronizes page for reuse by the adapter @@ -609,7 +609,7 @@ static void ice_reuse_rx_page(struct ice_ring *rx_ring, /** * ice_fetch_rx_buf - Allocate skb and populate it - * @rx_ring: rx descriptor ring to transact packets on + * @rx_ring: Rx descriptor ring to transact packets on * @rx_desc: descriptor containing info written by hardware * * This function allocates an skb on the fly, and populates it with the page @@ -686,7 +686,7 @@ static struct sk_buff *ice_fetch_rx_buf(struct ice_ring *rx_ring, * ice_pull_tail - ice specific version of skb_pull_tail * @skb: pointer to current skb being adjusted * - * This function is an ice specific version of __pskb_pull_tail. The + * This function is an ice specific version of __pskb_pull_tail. The * main difference between this version and the original function is that * this function can make several assumptions about the state of things * that allow for significant optimizations versus the standard function. @@ -768,7 +768,7 @@ static bool ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc, * @rx_desc: Rx descriptor for current buffer * @skb: Current socket buffer containing buffer in progress * - * This function updates next to clean. If the buffer is an EOP buffer + * This function updates next to clean. If the buffer is an EOP buffer * this function exits returning false, otherwise it will place the * sk_buff in the next buffer to be chained and return true indicating * that this is in fact a non-EOP buffer. @@ -904,7 +904,7 @@ checksum_fail: /** * ice_process_skb_fields - Populate skb header fields from Rx descriptor - * @rx_ring: rx descriptor ring packet is being transacted on + * @rx_ring: Rx descriptor ring packet is being transacted on * @rx_desc: pointer to the EOP Rx descriptor * @skb: pointer to current skb being populated * @ptype: the packet type decoded by hardware @@ -927,7 +927,7 @@ static void ice_process_skb_fields(struct ice_ring *rx_ring, /** * ice_receive_skb - Send a completed packet up the stack - * @rx_ring: rx ring in play + * @rx_ring: Rx ring in play * @skb: packet to send up * @vlan_tag: vlan tag for packet * @@ -946,11 +946,11 @@ static void ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, /** * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf - * @rx_ring: rx descriptor ring to transact packets on + * @rx_ring: Rx descriptor ring to transact packets on * @budget: Total limit on number of packets to process * * This function provides a "bounce buffer" approach to Rx interrupt - * processing. The advantage to this is that on systems that have + * processing. The advantage to this is that on systems that have * expensive overhead for IOMMU access this provides a means of avoiding * it by maintaining the mapping of the page to the system. * @@ -1103,11 +1103,14 @@ int ice_napi_poll(struct napi_struct *napi, int budget) if (!clean_complete) return budget; - /* Work is done so exit the polling mode and re-enable the interrupt */ - napi_complete_done(napi, work_done); - if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) - ice_irq_dynamic_ena(&vsi->back->hw, vsi, q_vector); - return 0; + /* Exit the polling mode, but don't re-enable interrupts if stack might + * poll us due to busy-polling + */ + if (likely(napi_complete_done(napi, work_done))) + if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) + ice_irq_dynamic_ena(&vsi->back->hw, vsi, q_vector); + + return min(work_done, budget - 1); } /* helper function for building cmd/type/offset */ @@ -1122,7 +1125,7 @@ build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag) } /** - * __ice_maybe_stop_tx - 2nd level check for tx stop conditions + * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions * @tx_ring: the ring to be checked * @size: the size buffer we want to assure is available * @@ -1145,7 +1148,7 @@ static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) } /** - * ice_maybe_stop_tx - 1st level check for tx stop conditions + * ice_maybe_stop_tx - 1st level check for Tx stop conditions * @tx_ring: the ring to be checked * @size: the size buffer we want to assure is available * @@ -1155,6 +1158,7 @@ static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) { if (likely(ICE_DESC_UNUSED(tx_ring) >= size)) return 0; + return __ice_maybe_stop_tx(tx_ring, size); } @@ -1552,7 +1556,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off) * Finally, we add one to round up. Because 256 isn't an exact multiple of * 3, we'll underestimate near each multiple of 12K. This is actually more * accurate as we have 4K - 1 of wiggle room that we can fit into the last - * segment. For our purposes this is accurate out to 1M which is orders of + * segment. For our purposes this is accurate out to 1M which is orders of * magnitude greater than our largest possible GSO size. * * This would then be implemented as: @@ -1568,7 +1572,7 @@ static unsigned int ice_txd_use_count(unsigned int size) } /** - * ice_xmit_desc_count - calculate number of tx descriptors needed + * ice_xmit_desc_count - calculate number of Tx descriptors needed * @skb: send buffer * * Returns number of data descriptors needed for this skb. @@ -1620,7 +1624,7 @@ static bool __ice_chk_linearize(struct sk_buff *skb) nr_frags -= ICE_MAX_BUF_TXD - 2; frag = &skb_shinfo(skb)->frags[0]; - /* Initialize size to the negative value of gso_size minus 1. We + /* Initialize size to the negative value of gso_size minus 1. We * use this as the worst case scenerio in which the frag ahead * of us only provides one byte which is why we are limited to 6 * descriptors for a single transmit as the header and previous diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h index f4dbc81c1988..0ea428104215 100644 --- a/drivers/net/ethernet/intel/ice/ice_type.h +++ b/drivers/net/ethernet/intel/ice/ice_type.h @@ -124,6 +124,8 @@ struct ice_phy_info { /* Common HW capabilities for SW use */ struct ice_hw_common_caps { + u32 valid_functions; + /* TX/RX queues */ u16 num_rxq; /* Number/Total RX queues */ u16 rxq_first_id; /* First queue ID for RX queues */ @@ -150,7 +152,7 @@ struct ice_hw_func_caps { struct ice_hw_common_caps common_cap; u32 num_allocd_vfs; /* Number of allocated VFs */ u32 vf_base_id; /* Logical ID of the first VF */ - u32 guaranteed_num_vsi; + u32 guar_num_vsi; }; /* Device wide capabilities */ diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c index e71065f9d391..05ff4f910649 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c @@ -156,8 +156,6 @@ static void ice_free_vf_res(struct ice_vf *vf) clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); } -/***********************enable_vf routines*****************************/ - /** * ice_dis_vf_mappings * @vf: pointer to the VF structure @@ -215,6 +213,15 @@ void ice_free_vfs(struct ice_pf *pf) while (test_and_set_bit(__ICE_VF_DIS, pf->state)) usleep_range(1000, 2000); + /* Disable IOV before freeing resources. This lets any VF drivers + * running in the host get themselves cleaned up before we yank + * the carpet out from underneath their feet. + */ + if (!pci_vfs_assigned(pf->pdev)) + pci_disable_sriov(pf->pdev); + else + dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); + /* Avoid wait time by stopping all VFs at the same time */ for (i = 0; i < pf->num_alloc_vfs; i++) { if (!test_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states)) @@ -228,15 +235,6 @@ void ice_free_vfs(struct ice_pf *pf) clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states); } - /* Disable IOV before freeing resources. This lets any VF drivers - * running in the host get themselves cleaned up before we yank - * the carpet out from underneath their feet. - */ - if (!pci_vfs_assigned(pf->pdev)) - pci_disable_sriov(pf->pdev); - else - dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n"); - tmp = pf->num_alloc_vfs; pf->num_vf_qps = 0; pf->num_alloc_vfs = 0; @@ -454,7 +452,7 @@ static int ice_alloc_vsi_res(struct ice_vf *vf) /* Clear this bit after VF initialization since we shouldn't reclaim * and reassign interrupts for synchronous or asynchronous VFR events. - * We don't want to reconfigure interrupts since AVF driver doesn't + * We dont want to reconfigure interrupts since AVF driver doesn't * expect vector assignment to be changed unless there is a request for * more vectors. */ @@ -1105,7 +1103,7 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) * ice_process_vflr_event - Free VF resources via IRQ calls * @pf: pointer to the PF structure * - * called from the VLFR IRQ handler to + * called from the VFLR IRQ handler to * free up VF resources and state variables */ void ice_process_vflr_event(struct ice_pf *pf) @@ -1764,7 +1762,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) /* copy Tx queue info from VF into VSI */ vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr; vsi->tx_rings[i]->count = qpi->txq.ring_len; - /* copy Rx queue info from VF into vsi */ + /* copy Rx queue info from VF into VSI */ vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr; vsi->rx_rings[i]->count = qpi->rxq.ring_len; if (qpi->rxq.databuffer_size > ((16 * 1024) - 128)) { @@ -1830,7 +1828,7 @@ static bool ice_can_vf_change_mac(struct ice_vf *vf) * @msg: pointer to the msg buffer * @set: true if mac filters are being set, false otherwise * - * add guest mac address filter + * add guest MAC address filter */ static int ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set) @@ -1968,9 +1966,9 @@ static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg) * @msg: pointer to the msg buffer * * VFs get a default number of queues but can use this message to request a - * different number. If the request is successful, PF will reset the VF and + * different number. If the request is successful, PF will reset the VF and * return 0. If unsuccessful, PF will send message informing VF of number of - * available queue pairs via virtchnl message response to VF. + * available queue pairs via virtchnl message response to vf. */ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg) { @@ -1991,7 +1989,7 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg) tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx); if (req_queues <= 0) { dev_err(&pf->pdev->dev, - "VF %d tried to request %d queues. Ignoring.\n", + "VF %d tried to request %d queues. Ignoring.\n", vf->vf_id, req_queues); } else if (req_queues > ICE_MAX_QS_PER_VF) { dev_err(&pf->pdev->dev, diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h index 10131e0180f9..01470a8ee03a 100644 --- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h @@ -70,7 +70,7 @@ struct ice_vf { u8 spoofchk; u16 num_mac; u16 num_vlan; - u8 num_req_qs; /* num of queue pairs requested by VF */ + u8 num_req_qs; /* num of queue pairs requested by VF */ }; #ifdef CONFIG_PCI_IOV diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h index 8a28f3388f69..01fcfc6f3415 100644 --- a/drivers/net/ethernet/intel/igb/e1000_defines.h +++ b/drivers/net/ethernet/intel/igb/e1000_defines.h @@ -334,6 +334,7 @@ #define I210_RXPBSIZE_DEFAULT 0x000000A2 /* RXPBSIZE default */ #define I210_RXPBSIZE_MASK 0x0000003F +#define I210_RXPBSIZE_PB_30KB 0x0000001E #define I210_RXPBSIZE_PB_32KB 0x00000020 #define I210_TXPBSIZE_DEFAULT 0x04000014 /* TXPBSIZE default */ #define I210_TXPBSIZE_MASK 0xC0FFFFFF diff --git a/drivers/net/ethernet/intel/igb/igb.h b/drivers/net/ethernet/intel/igb/igb.h index ca54e268d157..fe1592ae8769 100644 --- a/drivers/net/ethernet/intel/igb/igb.h +++ b/drivers/net/ethernet/intel/igb/igb.h @@ -515,7 +515,7 @@ struct igb_adapter { /* OS defined structs */ struct pci_dev *pdev; - spinlock_t stats64_lock; + struct mutex stats64_lock; struct rtnl_link_stats64 stats64; /* structs defined in e1000_hw.h */ diff --git a/drivers/net/ethernet/intel/igb/igb_ethtool.c b/drivers/net/ethernet/intel/igb/igb_ethtool.c index 5acf3b743876..7426060b678f 100644 --- a/drivers/net/ethernet/intel/igb/igb_ethtool.c +++ b/drivers/net/ethernet/intel/igb/igb_ethtool.c @@ -2113,7 +2113,7 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct igb_adapter *adapter = netdev_priv(netdev); - if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE)) + if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_FILTER)) return -EOPNOTSUPP; if (!(adapter->flags & IGB_FLAG_WOL_SUPPORTED)) @@ -2295,7 +2295,7 @@ static void igb_get_ethtool_stats(struct net_device *netdev, int i, j; char *p; - spin_lock(&adapter->stats64_lock); + mutex_lock(&adapter->stats64_lock); igb_update_stats(adapter); for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { @@ -2338,7 +2338,7 @@ static void igb_get_ethtool_stats(struct net_device *netdev, } while (u64_stats_fetch_retry_irq(&ring->rx_syncp, start)); i += IGB_RX_QUEUE_STATS_LEN; } - spin_unlock(&adapter->stats64_lock); + mutex_unlock(&adapter->stats64_lock); } static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data) diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c index 5df88ad8ac81..87bdf1604ae2 100644 --- a/drivers/net/ethernet/intel/igb/igb_main.c +++ b/drivers/net/ethernet/intel/igb/igb_main.c @@ -1850,13 +1850,12 @@ static void igb_config_tx_modes(struct igb_adapter *adapter, int queue) * configuration' in respect to these parameters. */ - netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d \ - idleslope %d sendslope %d hiCredit %d \ - locredit %d\n", - (ring->cbs_enable) ? "enabled" : "disabled", - (ring->launchtime_enable) ? "enabled" : "disabled", queue, - ring->idleslope, ring->sendslope, ring->hicredit, - ring->locredit); + netdev_dbg(netdev, "Qav Tx mode: cbs %s, launchtime %s, queue %d idleslope %d sendslope %d hiCredit %d locredit %d\n", + ring->cbs_enable ? "enabled" : "disabled", + ring->launchtime_enable ? "enabled" : "disabled", + queue, + ring->idleslope, ring->sendslope, + ring->hicredit, ring->locredit); } static int igb_save_txtime_params(struct igb_adapter *adapter, int queue, @@ -1935,7 +1934,7 @@ static void igb_setup_tx_mode(struct igb_adapter *adapter) val = rd32(E1000_RXPBS); val &= ~I210_RXPBSIZE_MASK; - val |= I210_RXPBSIZE_PB_32KB; + val |= I210_RXPBSIZE_PB_30KB; wr32(E1000_RXPBS, val); /* Section 8.12.9 states that MAX_TPKT_SIZE from DTXMXPKTSZ @@ -2204,9 +2203,9 @@ void igb_down(struct igb_adapter *adapter) del_timer_sync(&adapter->phy_info_timer); /* record the stats before reset*/ - spin_lock(&adapter->stats64_lock); + mutex_lock(&adapter->stats64_lock); igb_update_stats(adapter); - spin_unlock(&adapter->stats64_lock); + mutex_unlock(&adapter->stats64_lock); adapter->link_speed = 0; adapter->link_duplex = 0; @@ -3841,7 +3840,7 @@ static int igb_sw_init(struct igb_adapter *adapter) adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; spin_lock_init(&adapter->nfc_lock); - spin_lock_init(&adapter->stats64_lock); + mutex_init(&adapter->stats64_lock); #ifdef CONFIG_PCI_IOV switch (hw->mac.type) { case e1000_82576: @@ -5407,9 +5406,9 @@ no_wait: } } - spin_lock(&adapter->stats64_lock); + mutex_lock(&adapter->stats64_lock); igb_update_stats(adapter); - spin_unlock(&adapter->stats64_lock); + mutex_unlock(&adapter->stats64_lock); for (i = 0; i < adapter->num_tx_queues; i++) { struct igb_ring *tx_ring = adapter->tx_ring[i]; @@ -6019,6 +6018,8 @@ static int igb_tx_map(struct igb_ring *tx_ring, /* set the timestamp */ first->time_stamp = jiffies; + skb_tx_timestamp(skb); + /* Force memory writes to complete before letting h/w know there * are new descriptors to fetch. (Only applicable for weak-ordered * memory model archs, such as IA-64). @@ -6147,8 +6148,6 @@ netdev_tx_t igb_xmit_frame_ring(struct sk_buff *skb, else if (!tso) igb_tx_csum(tx_ring, first); - skb_tx_timestamp(skb); - if (igb_tx_map(tx_ring, first, hdr_len)) goto cleanup_tx_tstamp; @@ -6236,10 +6235,10 @@ static void igb_get_stats64(struct net_device *netdev, { struct igb_adapter *adapter = netdev_priv(netdev); - spin_lock(&adapter->stats64_lock); + mutex_lock(&adapter->stats64_lock); igb_update_stats(adapter); memcpy(stats, &adapter->stats64, sizeof(*stats)); - spin_unlock(&adapter->stats64_lock); + mutex_unlock(&adapter->stats64_lock); } /** @@ -7753,11 +7752,13 @@ static int igb_poll(struct napi_struct *napi, int budget) if (!clean_complete) return budget; - /* If not enough Rx work done, exit the polling mode */ - napi_complete_done(napi, work_done); - igb_ring_irq_enable(q_vector); + /* Exit the polling mode, but don't re-enable interrupts if stack might + * poll us due to busy-polling + */ + if (likely(napi_complete_done(napi, work_done))) + igb_ring_irq_enable(q_vector); - return 0; + return min(work_done, budget - 1); } /** @@ -8770,9 +8771,11 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake, rtnl_unlock(); #ifdef CONFIG_PM - retval = pci_save_state(pdev); - if (retval) - return retval; + if (!runtime) { + retval = pci_save_state(pdev); + if (retval) + return retval; + } #endif status = rd32(E1000_STATUS); diff --git a/drivers/net/ethernet/intel/igb/igb_ptp.c b/drivers/net/ethernet/intel/igb/igb_ptp.c index 2b95dc9c7a6a..fd3071f55bd3 100644 --- a/drivers/net/ethernet/intel/igb/igb_ptp.c +++ b/drivers/net/ethernet/intel/igb/igb_ptp.c @@ -277,17 +277,53 @@ static int igb_ptp_adjtime_i210(struct ptp_clock_info *ptp, s64 delta) return 0; } -static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp, - struct timespec64 *ts) +static int igb_ptp_gettimex_82576(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) { struct igb_adapter *igb = container_of(ptp, struct igb_adapter, ptp_caps); + struct e1000_hw *hw = &igb->hw; unsigned long flags; + u32 lo, hi; u64 ns; spin_lock_irqsave(&igb->tmreg_lock, flags); - ns = timecounter_read(&igb->tc); + ptp_read_system_prets(sts); + lo = rd32(E1000_SYSTIML); + ptp_read_system_postts(sts); + hi = rd32(E1000_SYSTIMH); + + ns = timecounter_cyc2time(&igb->tc, ((u64)hi << 32) | lo); + + spin_unlock_irqrestore(&igb->tmreg_lock, flags); + + *ts = ns_to_timespec64(ns); + + return 0; +} + +static int igb_ptp_gettimex_82580(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) +{ + struct igb_adapter *igb = container_of(ptp, struct igb_adapter, + ptp_caps); + struct e1000_hw *hw = &igb->hw; + unsigned long flags; + u32 lo, hi; + u64 ns; + + spin_lock_irqsave(&igb->tmreg_lock, flags); + + ptp_read_system_prets(sts); + rd32(E1000_SYSTIMR); + ptp_read_system_postts(sts); + lo = rd32(E1000_SYSTIML); + hi = rd32(E1000_SYSTIMH); + + ns = timecounter_cyc2time(&igb->tc, ((u64)hi << 32) | lo); spin_unlock_irqrestore(&igb->tmreg_lock, flags); @@ -296,16 +332,22 @@ static int igb_ptp_gettime_82576(struct ptp_clock_info *ptp, return 0; } -static int igb_ptp_gettime_i210(struct ptp_clock_info *ptp, - struct timespec64 *ts) +static int igb_ptp_gettimex_i210(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) { struct igb_adapter *igb = container_of(ptp, struct igb_adapter, ptp_caps); + struct e1000_hw *hw = &igb->hw; unsigned long flags; spin_lock_irqsave(&igb->tmreg_lock, flags); - igb_ptp_read_i210(igb, ts); + ptp_read_system_prets(sts); + rd32(E1000_SYSTIMR); + ptp_read_system_postts(sts); + ts->tv_nsec = rd32(E1000_SYSTIML); + ts->tv_sec = rd32(E1000_SYSTIMH); spin_unlock_irqrestore(&igb->tmreg_lock, flags); @@ -658,9 +700,12 @@ static void igb_ptp_overflow_check(struct work_struct *work) struct igb_adapter *igb = container_of(work, struct igb_adapter, ptp_overflow_work.work); struct timespec64 ts; + u64 ns; - igb->ptp_caps.gettime64(&igb->ptp_caps, &ts); + /* Update the timecounter */ + ns = timecounter_read(&igb->tc); + ts = ns_to_timespec64(ns); pr_debug("igb overflow check at %lld.%09lu\n", (long long) ts.tv_sec, ts.tv_nsec); @@ -1126,7 +1171,7 @@ void igb_ptp_init(struct igb_adapter *adapter) adapter->ptp_caps.pps = 0; adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82576; adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; - adapter->ptp_caps.gettime64 = igb_ptp_gettime_82576; + adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_82576; adapter->ptp_caps.settime64 = igb_ptp_settime_82576; adapter->ptp_caps.enable = igb_ptp_feature_enable; adapter->cc.read = igb_ptp_read_82576; @@ -1145,7 +1190,7 @@ void igb_ptp_init(struct igb_adapter *adapter) adapter->ptp_caps.pps = 0; adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580; adapter->ptp_caps.adjtime = igb_ptp_adjtime_82576; - adapter->ptp_caps.gettime64 = igb_ptp_gettime_82576; + adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_82580; adapter->ptp_caps.settime64 = igb_ptp_settime_82576; adapter->ptp_caps.enable = igb_ptp_feature_enable; adapter->cc.read = igb_ptp_read_82580; @@ -1173,7 +1218,7 @@ void igb_ptp_init(struct igb_adapter *adapter) adapter->ptp_caps.pin_config = adapter->sdp_config; adapter->ptp_caps.adjfine = igb_ptp_adjfine_82580; adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210; - adapter->ptp_caps.gettime64 = igb_ptp_gettime_i210; + adapter->ptp_caps.gettimex64 = igb_ptp_gettimex_i210; adapter->ptp_caps.settime64 = igb_ptp_settime_i210; adapter->ptp_caps.enable = igb_ptp_feature_enable_i210; adapter->ptp_caps.verify = igb_ptp_verify_pin; diff --git a/drivers/net/ethernet/intel/igbvf/mbx.c b/drivers/net/ethernet/intel/igbvf/mbx.c index 163e5838f7c2..a3cd7ac48d4b 100644 --- a/drivers/net/ethernet/intel/igbvf/mbx.c +++ b/drivers/net/ethernet/intel/igbvf/mbx.c @@ -241,7 +241,7 @@ static s32 e1000_write_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size) s32 err; u16 i; - WARN_ON_ONCE(!spin_is_locked(&hw->mbx_lock)); + lockdep_assert_held(&hw->mbx_lock); /* lock the mailbox to prevent pf/vf race condition */ err = e1000_obtain_mbx_lock_vf(hw); @@ -279,7 +279,7 @@ static s32 e1000_read_mbx_vf(struct e1000_hw *hw, u32 *msg, u16 size) s32 err; u16 i; - WARN_ON_ONCE(!spin_is_locked(&hw->mbx_lock)); + lockdep_assert_held(&hw->mbx_lock); /* lock the mailbox to prevent pf/vf race condition */ err = e1000_obtain_mbx_lock_vf(hw); diff --git a/drivers/net/ethernet/intel/igbvf/netdev.c b/drivers/net/ethernet/intel/igbvf/netdev.c index 820d49eb41ab..4eab83faec62 100644 --- a/drivers/net/ethernet/intel/igbvf/netdev.c +++ b/drivers/net/ethernet/intel/igbvf/netdev.c @@ -1186,10 +1186,13 @@ static int igbvf_poll(struct napi_struct *napi, int budget) igbvf_clean_rx_irq(adapter, &work_done, budget); - /* If not enough Rx work done, exit the polling mode */ - if (work_done < budget) { - napi_complete_done(napi, work_done); + if (work_done == budget) + return budget; + /* Exit the polling mode, but don't re-enable interrupts if stack might + * poll us due to busy-polling + */ + if (likely(napi_complete_done(napi, work_done))) { if (adapter->requested_itr & 3) igbvf_set_itr(adapter); diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index cdf18a5d9e08..b1039dd3dd13 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -5,23 +5,12 @@ #define _IGC_H_ #include <linux/kobject.h> - #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/vmalloc.h> - #include <linux/ethtool.h> - #include <linux/sctp.h> -#define IGC_ERR(args...) pr_err("igc: " args) - -#define PFX "igc: " - -#include <linux/timecounter.h> -#include <linux/net_tstamp.h> -#include <linux/ptp_clock_kernel.h> - #include "igc_hw.h" /* main */ diff --git a/drivers/net/ethernet/intel/igc/igc_base.c b/drivers/net/ethernet/intel/igc/igc_base.c index 832da609d9a7..df40af759542 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.c +++ b/drivers/net/ethernet/intel/igc/igc_base.c @@ -237,7 +237,6 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw) { struct igc_phy_info *phy = &hw->phy; s32 ret_val = 0; - u32 ctrl_ext; if (hw->phy.media_type != igc_media_type_copper) { phy->type = igc_phy_none; @@ -247,8 +246,6 @@ static s32 igc_init_phy_params_base(struct igc_hw *hw) phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT_2500; phy->reset_delay_us = 100; - ctrl_ext = rd32(IGC_CTRL_EXT); - /* set lan id */ hw->bus.func = (rd32(IGC_STATUS) & IGC_STATUS_FUNC_MASK) >> IGC_STATUS_FUNC_SHIFT; @@ -287,8 +284,6 @@ out: static s32 igc_get_invariants_base(struct igc_hw *hw) { struct igc_mac_info *mac = &hw->mac; - u32 link_mode = 0; - u32 ctrl_ext = 0; s32 ret_val = 0; switch (hw->device_id) { @@ -302,9 +297,6 @@ static s32 igc_get_invariants_base(struct igc_hw *hw) hw->phy.media_type = igc_media_type_copper; - ctrl_ext = rd32(IGC_CTRL_EXT); - link_mode = ctrl_ext & IGC_CTRL_EXT_LINK_MODE_MASK; - /* mac initialization and operations */ ret_val = igc_init_mac_params_base(hw); if (ret_val) diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index 9d85707e8a81..f20183037fb2 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -865,6 +865,8 @@ static int igc_tx_map(struct igc_ring *tx_ring, /* set the timestamp */ first->time_stamp = jiffies; + skb_tx_timestamp(skb); + /* Force memory writes to complete before letting h/w know there * are new descriptors to fetch. (Only applicable for weak-ordered * memory model archs, such as IA-64). @@ -959,8 +961,6 @@ static netdev_tx_t igc_xmit_frame_ring(struct sk_buff *skb, first->bytecount = skb->len; first->gso_segs = 1; - skb_tx_timestamp(skb); - /* record initial flags and protocol */ first->tx_flags = tx_flags; first->protocol = protocol; @@ -1108,7 +1108,7 @@ static struct sk_buff *igc_build_skb(struct igc_ring *rx_ring, /* update pointers within the skb to store the data */ skb_reserve(skb, IGC_SKB_PAD); - __skb_put(skb, size); + __skb_put(skb, size); /* update buffer offset */ #if (PAGE_SIZE < 8192) @@ -1160,9 +1160,9 @@ static struct sk_buff *igc_construct_skb(struct igc_ring *rx_ring, (va + headlen) - page_address(rx_buffer->page), size, truesize); #if (PAGE_SIZE < 8192) - rx_buffer->page_offset ^= truesize; + rx_buffer->page_offset ^= truesize; #else - rx_buffer->page_offset += truesize; + rx_buffer->page_offset += truesize; #endif } else { rx_buffer->pagecnt_bias++; @@ -1668,8 +1668,8 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) tx_buffer->next_to_watch, jiffies, tx_buffer->next_to_watch->wb.status); - netif_stop_subqueue(tx_ring->netdev, - tx_ring->queue_index); + netif_stop_subqueue(tx_ring->netdev, + tx_ring->queue_index); /* we are about to reset, no point in enabling stuff */ return true; @@ -1700,20 +1700,6 @@ static bool igc_clean_tx_irq(struct igc_q_vector *q_vector, int napi_budget) } /** - * igc_ioctl - I/O control method - * @netdev: network interface device structure - * @ifreq: frequency - * @cmd: command - */ -static int igc_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) -{ - switch (cmd) { - default: - return -EOPNOTSUPP; - } -} - -/** * igc_up - Open the interface and prepare it to handle traffic * @adapter: board private structure */ @@ -2866,11 +2852,13 @@ static int igc_poll(struct napi_struct *napi, int budget) if (!clean_complete) return budget; - /* If not enough Rx work done, exit the polling mode */ - napi_complete_done(napi, work_done); - igc_ring_irq_enable(q_vector); + /* Exit the polling mode, but don't re-enable interrupts if stack might + * poll us due to busy-polling + */ + if (likely(napi_complete_done(napi, work_done))) + igc_ring_irq_enable(q_vector); - return 0; + return min(work_done, budget - 1); } /** @@ -3358,7 +3346,7 @@ static int __igc_open(struct net_device *netdev, bool resuming) goto err_req_irq; /* Notify the stack of the actual queue counts. */ - netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); + err = netif_set_real_num_tx_queues(netdev, adapter->num_tx_queues); if (err) goto err_set_queues; @@ -3445,7 +3433,6 @@ static const struct net_device_ops igc_netdev_ops = { .ndo_set_mac_address = igc_set_mac, .ndo_change_mtu = igc_change_mtu, .ndo_get_stats = igc_get_stats, - .ndo_do_ioctl = igc_ioctl, }; /* PCIe configuration access */ @@ -3532,26 +3519,23 @@ static int igc_probe(struct pci_dev *pdev, struct net_device *netdev; struct igc_hw *hw; const struct igc_info *ei = igc_info_tbl[ent->driver_data]; - int err, pci_using_dac; + int err; err = pci_enable_device_mem(pdev); if (err) return err; - pci_using_dac = 0; err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); if (!err) { err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); - if (!err) - pci_using_dac = 1; } else { err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { - IGC_ERR("Wrong DMA configuration, aborting\n"); + dev_err(&pdev->dev, "igc: Wrong DMA config\n"); goto err_dma; } } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h index 143bdd5ee2a0..08d85e336bd4 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h @@ -12,6 +12,7 @@ #include <linux/aer.h> #include <linux/if_vlan.h> #include <linux/jiffies.h> +#include <linux/phy.h> #include <linux/timecounter.h> #include <linux/net_tstamp.h> @@ -561,6 +562,7 @@ struct ixgbe_adapter { struct net_device *netdev; struct bpf_prog *xdp_prog; struct pci_dev *pdev; + struct mii_bus *mii_bus; unsigned long state; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c index 732b1e6ecc43..acba067cc15a 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c @@ -2206,7 +2206,8 @@ static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct ixgbe_adapter *adapter = netdev_priv(netdev); - if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) + if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE | + WAKE_FILTER)) return -EOPNOTSUPP; if (ixgbe_wol_exclusion(adapter, wol)) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c index fd1b0546fd67..ff85ce5791a3 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c @@ -4,6 +4,7 @@ #include "ixgbe.h" #include <net/xfrm.h> #include <crypto/aead.h> +#include <linux/if_bridge.h> #define IXGBE_IPSEC_KEY_BITS 160 static const char aes_gcm_name[] = "rfc4106(gcm(aes))"; @@ -693,7 +694,8 @@ static int ixgbe_ipsec_add_sa(struct xfrm_state *xs) } else { struct tx_sa tsa; - if (adapter->num_vfs) + if (adapter->num_vfs && + adapter->bridge_mode != BRIDGE_MODE_VEPA) return -EOPNOTSUPP; /* find the first unused index */ @@ -1063,11 +1065,13 @@ int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring, struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev); struct ixgbe_ipsec *ipsec = adapter->ipsec; struct xfrm_state *xs; + struct sec_path *sp; struct tx_sa *tsa; - if (unlikely(!first->skb->sp->len)) { + sp = skb_sec_path(first->skb); + if (unlikely(!sp->len)) { netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n", - __func__, first->skb->sp->len); + __func__, sp->len); return 0; } @@ -1157,6 +1161,7 @@ void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring, struct xfrm_state *xs = NULL; struct ipv6hdr *ip6 = NULL; struct iphdr *ip4 = NULL; + struct sec_path *sp; void *daddr; __be32 spi; u8 *c_hdr; @@ -1196,12 +1201,12 @@ void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring, if (unlikely(!xs)) return; - skb->sp = secpath_dup(skb->sp); - if (unlikely(!skb->sp)) + sp = secpath_set(skb); + if (unlikely(!sp)) return; - skb->sp->xvec[skb->sp->len++] = xs; - skb->sp->olen++; + sp->xvec[sp->len++] = xs; + sp->olen++; xo = xfrm_offload(skb); xo->flags = CRYPTO_DONE; xo->status = CRYPTO_SUCCESS; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 113b38e0defb..daff8183534b 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -39,6 +39,7 @@ #include "ixgbe.h" #include "ixgbe_common.h" #include "ixgbe_dcb_82599.h" +#include "ixgbe_phy.h" #include "ixgbe_sriov.h" #include "ixgbe_model.h" #include "ixgbe_txrx_common.h" @@ -6077,9 +6078,9 @@ void ixgbe_down(struct ixgbe_adapter *adapter) /* Disable Rx */ ixgbe_disable_rx(adapter); - /* synchronize_sched() needed for pending XDP buffers to drain */ + /* synchronize_rcu() needed for pending XDP buffers to drain */ if (adapter->xdp_ring[0]) - synchronize_sched(); + synchronize_rcu(); ixgbe_irq_disable(adapter); @@ -8269,6 +8270,8 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring, /* set the timestamp */ first->time_stamp = jiffies; + skb_tx_timestamp(skb); + /* * Force memory writes to complete before letting h/w know there * are new descriptors to fetch. (Only applicable for weak-ordered @@ -8646,8 +8649,6 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, } } - skb_tx_timestamp(skb); - #ifdef CONFIG_PCI_IOV /* * Use the l2switch_enable flag - would be false if the DMA @@ -8695,7 +8696,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, #endif /* IXGBE_FCOE */ #ifdef CONFIG_IXGBE_IPSEC - if (skb->sp && !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx)) + if (secpath_exists(skb) && + !ixgbe_ipsec_tx(tx_ring, first, &ipsec_tx)) goto out_drop; #endif tso = ixgbe_tso(tx_ring, first, &hdr_len, &ipsec_tx); @@ -8789,6 +8791,15 @@ ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr) u16 value; int rc; + if (adapter->mii_bus) { + int regnum = addr; + + if (devad != MDIO_DEVAD_NONE) + regnum |= (devad << 16) | MII_ADDR_C45; + + return mdiobus_read(adapter->mii_bus, prtad, regnum); + } + if (prtad != hw->phy.mdio.prtad) return -EINVAL; rc = hw->phy.ops.read_reg(hw, addr, devad, &value); @@ -8803,6 +8814,15 @@ static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad, struct ixgbe_adapter *adapter = netdev_priv(netdev); struct ixgbe_hw *hw = &adapter->hw; + if (adapter->mii_bus) { + int regnum = addr; + + if (devad != MDIO_DEVAD_NONE) + regnum |= (devad << 16) | MII_ADDR_C45; + + return mdiobus_write(adapter->mii_bus, prtad, regnum, value); + } + if (prtad != hw->phy.mdio.prtad) return -EINVAL; return hw->phy.ops.write_reg(hw, addr, devad, value); @@ -9979,7 +9999,8 @@ static int ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter, } static int ixgbe_ndo_bridge_setlink(struct net_device *dev, - struct nlmsghdr *nlh, u16 flags) + struct nlmsghdr *nlh, u16 flags, + struct netlink_ext_ack *extack) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct nlattr *attr, *br_spec; @@ -10191,7 +10212,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, */ if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) { #ifdef CONFIG_IXGBE_IPSEC - if (!skb->sp) + if (!secpath_exists(skb)) #endif features &= ~NETIF_F_TSO; } @@ -10476,7 +10497,7 @@ void ixgbe_txrx_ring_disable(struct ixgbe_adapter *adapter, int ring) ixgbe_disable_rxr_hw(adapter, rx_ring); if (xdp_ring) - synchronize_sched(); + synchronize_rcu(); /* Rx/Tx/XDP Tx share the same napi context. */ napi_disable(&rx_ring->q_vector->napi); @@ -10517,7 +10538,8 @@ void ixgbe_txrx_ring_enable(struct ixgbe_adapter *adapter, int ring) ixgbe_configure_rx_ring(adapter, rx_ring); clear_bit(__IXGBE_TX_DISABLED, &tx_ring->state); - clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state); + if (xdp_ring) + clear_bit(__IXGBE_TX_DISABLED, &xdp_ring->state); } /** @@ -11119,6 +11141,8 @@ skip_sriov: IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL, true); + ixgbe_mii_bus_init(hw); + return 0; err_register: @@ -11169,6 +11193,8 @@ static void ixgbe_remove(struct pci_dev *pdev) set_bit(__IXGBE_REMOVING, &adapter->state); cancel_work_sync(&adapter->service_task); + if (adapter->mii_bus) + mdiobus_unregister(adapter->mii_bus); #ifdef CONFIG_IXGBE_DCA if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c index 919a7af84b42..cc4907f9ff02 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c @@ -3,6 +3,7 @@ #include <linux/pci.h> #include <linux/delay.h> +#include <linux/iopoll.h> #include <linux/sched.h> #include "ixgbe.h" @@ -658,6 +659,304 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, return status; } +#define IXGBE_HW_READ_REG(addr) IXGBE_READ_REG(hw, addr) + +/** + * ixgbe_msca_cmd - Write the command register and poll for completion/timeout + * @hw: pointer to hardware structure + * @cmd: command register value to write + **/ +static s32 ixgbe_msca_cmd(struct ixgbe_hw *hw, u32 cmd) +{ + IXGBE_WRITE_REG(hw, IXGBE_MSCA, cmd); + + return readx_poll_timeout(IXGBE_HW_READ_REG, IXGBE_MSCA, cmd, + !(cmd & IXGBE_MSCA_MDI_COMMAND), 10, + 10 * IXGBE_MDIO_COMMAND_TIMEOUT); +} + +/** + * ixgbe_mii_bus_read_generic - Read a clause 22/45 register with gssr flags + * @hw: pointer to hardware structure + * @addr: address + * @regnum: register number + * @gssr: semaphore flags to acquire + **/ +static s32 ixgbe_mii_bus_read_generic(struct ixgbe_hw *hw, int addr, + int regnum, u32 gssr) +{ + u32 hwaddr, cmd; + s32 data; + + if (hw->mac.ops.acquire_swfw_sync(hw, gssr)) + return -EBUSY; + + hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT; + if (regnum & MII_ADDR_C45) { + hwaddr |= regnum & GENMASK(21, 0); + cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND; + } else { + hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT; + cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL | + IXGBE_MSCA_READ_AUTOINC | IXGBE_MSCA_MDI_COMMAND; + } + + data = ixgbe_msca_cmd(hw, cmd); + if (data < 0) + goto mii_bus_read_done; + + /* For a clause 45 access the address cycle just completed, we still + * need to do the read command, otherwise just get the data + */ + if (!(regnum & MII_ADDR_C45)) + goto do_mii_bus_read; + + cmd = hwaddr | IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND; + data = ixgbe_msca_cmd(hw, cmd); + if (data < 0) + goto mii_bus_read_done; + +do_mii_bus_read: + data = IXGBE_READ_REG(hw, IXGBE_MSRWD); + data = (data >> IXGBE_MSRWD_READ_DATA_SHIFT) & GENMASK(16, 0); + +mii_bus_read_done: + hw->mac.ops.release_swfw_sync(hw, gssr); + return data; +} + +/** + * ixgbe_mii_bus_write_generic - Write a clause 22/45 register with gssr flags + * @hw: pointer to hardware structure + * @addr: address + * @regnum: register number + * @val: value to write + * @gssr: semaphore flags to acquire + **/ +static s32 ixgbe_mii_bus_write_generic(struct ixgbe_hw *hw, int addr, + int regnum, u16 val, u32 gssr) +{ + u32 hwaddr, cmd; + s32 err; + + if (hw->mac.ops.acquire_swfw_sync(hw, gssr)) + return -EBUSY; + + IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)val); + + hwaddr = addr << IXGBE_MSCA_PHY_ADDR_SHIFT; + if (regnum & MII_ADDR_C45) { + hwaddr |= regnum & GENMASK(21, 0); + cmd = hwaddr | IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND; + } else { + hwaddr |= (regnum & GENMASK(5, 0)) << IXGBE_MSCA_DEV_TYPE_SHIFT; + cmd = hwaddr | IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE | + IXGBE_MSCA_MDI_COMMAND; + } + + /* For clause 45 this is an address cycle, for clause 22 this is the + * entire transaction + */ + err = ixgbe_msca_cmd(hw, cmd); + if (err < 0 || !(regnum & MII_ADDR_C45)) + goto mii_bus_write_done; + + cmd = hwaddr | IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND; + err = ixgbe_msca_cmd(hw, cmd); + +mii_bus_write_done: + hw->mac.ops.release_swfw_sync(hw, gssr); + return err; +} + +/** + * ixgbe_mii_bus_read - Read a clause 22/45 register + * @hw: pointer to hardware structure + * @addr: address + * @regnum: register number + **/ +static s32 ixgbe_mii_bus_read(struct mii_bus *bus, int addr, int regnum) +{ + struct ixgbe_adapter *adapter = bus->priv; + struct ixgbe_hw *hw = &adapter->hw; + u32 gssr = hw->phy.phy_semaphore_mask; + + return ixgbe_mii_bus_read_generic(hw, addr, regnum, gssr); +} + +/** + * ixgbe_mii_bus_write - Write a clause 22/45 register + * @hw: pointer to hardware structure + * @addr: address + * @regnum: register number + * @val: value to write + **/ +static s32 ixgbe_mii_bus_write(struct mii_bus *bus, int addr, int regnum, + u16 val) +{ + struct ixgbe_adapter *adapter = bus->priv; + struct ixgbe_hw *hw = &adapter->hw; + u32 gssr = hw->phy.phy_semaphore_mask; + + return ixgbe_mii_bus_write_generic(hw, addr, regnum, val, gssr); +} + +/** + * ixgbe_x550em_a_mii_bus_read - Read a clause 22/45 register on x550em_a + * @hw: pointer to hardware structure + * @addr: address + * @regnum: register number + **/ +static s32 ixgbe_x550em_a_mii_bus_read(struct mii_bus *bus, int addr, + int regnum) +{ + struct ixgbe_adapter *adapter = bus->priv; + struct ixgbe_hw *hw = &adapter->hw; + u32 gssr = hw->phy.phy_semaphore_mask; + + gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM; + return ixgbe_mii_bus_read_generic(hw, addr, regnum, gssr); +} + +/** + * ixgbe_x550em_a_mii_bus_write - Write a clause 22/45 register on x550em_a + * @hw: pointer to hardware structure + * @addr: address + * @regnum: register number + * @val: value to write + **/ +static s32 ixgbe_x550em_a_mii_bus_write(struct mii_bus *bus, int addr, + int regnum, u16 val) +{ + struct ixgbe_adapter *adapter = bus->priv; + struct ixgbe_hw *hw = &adapter->hw; + u32 gssr = hw->phy.phy_semaphore_mask; + + gssr |= IXGBE_GSSR_TOKEN_SM | IXGBE_GSSR_PHY0_SM; + return ixgbe_mii_bus_write_generic(hw, addr, regnum, val, gssr); +} + +/** + * ixgbe_get_first_secondary_devfn - get first device downstream of root port + * @devfn: PCI_DEVFN of root port on domain 0, bus 0 + * + * Returns pci_dev pointer to PCI_DEVFN(0, 0) on subordinate side of root + * on domain 0, bus 0, devfn = 'devfn' + **/ +static struct pci_dev *ixgbe_get_first_secondary_devfn(unsigned int devfn) +{ + struct pci_dev *rp_pdev; + int bus; + + rp_pdev = pci_get_domain_bus_and_slot(0, 0, devfn); + if (rp_pdev && rp_pdev->subordinate) { + bus = rp_pdev->subordinate->number; + return pci_get_domain_bus_and_slot(0, bus, 0); + } + + return NULL; +} + +/** + * ixgbe_x550em_a_has_mii - is this the first ixgbe x550em_a PCI function? + * @hw: pointer to hardware structure + * + * Returns true if hw points to lowest numbered PCI B:D.F x550_em_a device in + * the SoC. There are up to 4 MACs sharing a single MDIO bus on the x550em_a, + * but we only want to register one MDIO bus. + **/ +static bool ixgbe_x550em_a_has_mii(struct ixgbe_hw *hw) +{ + struct ixgbe_adapter *adapter = hw->back; + struct pci_dev *pdev = adapter->pdev; + struct pci_dev *func0_pdev; + + /* For the C3000 family of SoCs (x550em_a) the internal ixgbe devices + * are always downstream of root ports @ 0000:00:16.0 & 0000:00:17.0 + * It's not valid for function 0 to be disabled and function 1 is up, + * so the lowest numbered ixgbe dev will be device 0 function 0 on one + * of those two root ports + */ + func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x16, 0)); + if (func0_pdev) { + if (func0_pdev == pdev) + return true; + else + return false; + } + func0_pdev = ixgbe_get_first_secondary_devfn(PCI_DEVFN(0x17, 0)); + if (func0_pdev == pdev) + return true; + + return false; +} + +/** + * ixgbe_mii_bus_init - mii_bus structure setup + * @hw: pointer to hardware structure + * + * Returns 0 on success, negative on failure + * + * ixgbe_mii_bus_init initializes a mii_bus structure in adapter + **/ +s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw) +{ + struct ixgbe_adapter *adapter = hw->back; + struct pci_dev *pdev = adapter->pdev; + struct device *dev = &adapter->netdev->dev; + struct mii_bus *bus; + + adapter->mii_bus = devm_mdiobus_alloc(dev); + if (!adapter->mii_bus) + return -ENOMEM; + + bus = adapter->mii_bus; + + switch (hw->device_id) { + /* C3000 SoCs */ + case IXGBE_DEV_ID_X550EM_A_KR: + case IXGBE_DEV_ID_X550EM_A_KR_L: + case IXGBE_DEV_ID_X550EM_A_SFP_N: + case IXGBE_DEV_ID_X550EM_A_SGMII: + case IXGBE_DEV_ID_X550EM_A_SGMII_L: + case IXGBE_DEV_ID_X550EM_A_10G_T: + case IXGBE_DEV_ID_X550EM_A_SFP: + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + if (!ixgbe_x550em_a_has_mii(hw)) + goto ixgbe_no_mii_bus; + bus->read = &ixgbe_x550em_a_mii_bus_read; + bus->write = &ixgbe_x550em_a_mii_bus_write; + break; + default: + bus->read = &ixgbe_mii_bus_read; + bus->write = &ixgbe_mii_bus_write; + break; + } + + /* Use the position of the device in the PCI hierarchy as the id */ + snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mdio-%s", ixgbe_driver_name, + pci_name(pdev)); + + bus->name = "ixgbe-mdio"; + bus->priv = adapter; + bus->parent = dev; + bus->phy_mask = GENMASK(31, 0); + + /* Support clause 22/45 natively. ixgbe_probe() sets MDIO_EMULATE_C22 + * unfortunately that causes some clause 22 frames to be sent with + * clause 45 addressing. We don't want that. + */ + hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_SUPPORTS_C22; + + return mdiobus_register(bus); + +ixgbe_no_mii_bus: + devm_mdiobus_free(dev, bus); + adapter->mii_bus = NULL; + return -ENODEV; +} + /** * ixgbe_setup_phy_link_generic - Set and restart autoneg * @hw: pointer to hardware structure diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h index 64e44e01c973..214b01085718 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h @@ -120,6 +120,8 @@ /* SFP+ SFF-8472 Compliance code */ #define IXGBE_SFF_SFF_8472_UNSUP 0x00 +s32 ixgbe_mii_bus_init(struct ixgbe_hw *hw); + s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw); s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw); s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c index b3e0d8bb5cbd..d81a50dc9535 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c @@ -443,22 +443,52 @@ static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) } /** - * ixgbe_ptp_gettime + * ixgbe_ptp_gettimex * @ptp: the ptp clock structure - * @ts: timespec structure to hold the current time value + * @ts: timespec to hold the PHC timestamp + * @sts: structure to hold the system time before and after reading the PHC * * read the timecounter and return the correct value on ns, * after converting it into a struct timespec. */ -static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +static int ixgbe_ptp_gettimex(struct ptp_clock_info *ptp, + struct timespec64 *ts, + struct ptp_system_timestamp *sts) { struct ixgbe_adapter *adapter = container_of(ptp, struct ixgbe_adapter, ptp_caps); + struct ixgbe_hw *hw = &adapter->hw; unsigned long flags; - u64 ns; + u64 ns, stamp; spin_lock_irqsave(&adapter->tmreg_lock, flags); - ns = timecounter_read(&adapter->hw_tc); + + switch (adapter->hw.mac.type) { + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_x550em_a: + /* Upper 32 bits represent billions of cycles, lower 32 bits + * represent cycles. However, we use timespec64_to_ns for the + * correct math even though the units haven't been corrected + * yet. + */ + ptp_read_system_prets(sts); + IXGBE_READ_REG(hw, IXGBE_SYSTIMR); + ptp_read_system_postts(sts); + ts->tv_nsec = IXGBE_READ_REG(hw, IXGBE_SYSTIML); + ts->tv_sec = IXGBE_READ_REG(hw, IXGBE_SYSTIMH); + stamp = timespec64_to_ns(ts); + break; + default: + ptp_read_system_prets(sts); + stamp = IXGBE_READ_REG(hw, IXGBE_SYSTIML); + ptp_read_system_postts(sts); + stamp |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32; + break; + } + + ns = timecounter_cyc2time(&adapter->hw_tc, stamp); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); *ts = ns_to_timespec64(ns); @@ -567,10 +597,14 @@ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter) { bool timeout = time_is_before_jiffies(adapter->last_overflow_check + IXGBE_OVERFLOW_PERIOD); - struct timespec64 ts; + unsigned long flags; if (timeout) { - ixgbe_ptp_gettime(&adapter->ptp_caps, &ts); + /* Update the timecounter */ + spin_lock_irqsave(&adapter->tmreg_lock, flags); + timecounter_read(&adapter->hw_tc); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + adapter->last_overflow_check = jiffies; } } @@ -1216,7 +1250,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) adapter->ptp_caps.pps = 1; adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599; adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; - adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime; + adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex; adapter->ptp_caps.settime64 = ixgbe_ptp_settime; adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; adapter->ptp_setup_sdp = ixgbe_ptp_setup_sdp_x540; @@ -1233,7 +1267,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) adapter->ptp_caps.pps = 0; adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599; adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; - adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime; + adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex; adapter->ptp_caps.settime64 = ixgbe_ptp_settime; adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; break; @@ -1249,7 +1283,7 @@ static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) adapter->ptp_caps.pps = 0; adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_X550; adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; - adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime; + adapter->ptp_caps.gettimex64 = ixgbe_ptp_gettimex; adapter->ptp_caps.settime64 = ixgbe_ptp_settime; adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; adapter->ptp_setup_sdp = NULL; diff --git a/drivers/net/ethernet/intel/ixgbevf/ipsec.c b/drivers/net/ethernet/intel/ixgbevf/ipsec.c index e8a3231be0bf..5170dd9d8705 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ipsec.c +++ b/drivers/net/ethernet/intel/ixgbevf/ipsec.c @@ -450,12 +450,14 @@ int ixgbevf_ipsec_tx(struct ixgbevf_ring *tx_ring, struct ixgbevf_adapter *adapter = netdev_priv(tx_ring->netdev); struct ixgbevf_ipsec *ipsec = adapter->ipsec; struct xfrm_state *xs; + struct sec_path *sp; struct tx_sa *tsa; u16 sa_idx; - if (unlikely(!first->skb->sp->len)) { + sp = skb_sec_path(first->skb); + if (unlikely(!sp->len)) { netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n", - __func__, first->skb->sp->len); + __func__, sp->len); return 0; } @@ -546,6 +548,7 @@ void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring, struct xfrm_state *xs = NULL; struct ipv6hdr *ip6 = NULL; struct iphdr *ip4 = NULL; + struct sec_path *sp; void *daddr; __be32 spi; u8 *c_hdr; @@ -585,12 +588,12 @@ void ixgbevf_ipsec_rx(struct ixgbevf_ring *rx_ring, if (unlikely(!xs)) return; - skb->sp = secpath_dup(skb->sp); - if (unlikely(!skb->sp)) + sp = secpath_set(skb); + if (unlikely(!sp)) return; - skb->sp->xvec[skb->sp->len++] = xs; - skb->sp->olen++; + sp->xvec[sp->len++] = xs; + sp->olen++; xo = xfrm_offload(skb); xo->flags = CRYPTO_DONE; xo->status = CRYPTO_SUCCESS; diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c index 5e47ede7e832..49e23afa05a2 100644 --- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c +++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c @@ -1293,16 +1293,20 @@ static int ixgbevf_poll(struct napi_struct *napi, int budget) /* If all work not completed, return budget and keep polling */ if (!clean_complete) return budget; - /* all work done, exit the polling mode */ - napi_complete_done(napi, work_done); - if (adapter->rx_itr_setting == 1) - ixgbevf_set_itr(q_vector); - if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && - !test_bit(__IXGBEVF_REMOVING, &adapter->state)) - ixgbevf_irq_enable_queues(adapter, - BIT(q_vector->v_idx)); - return 0; + /* Exit the polling mode, but don't re-enable interrupts if stack might + * poll us due to busy-polling + */ + if (likely(napi_complete_done(napi, work_done))) { + if (adapter->rx_itr_setting == 1) + ixgbevf_set_itr(q_vector); + if (!test_bit(__IXGBEVF_DOWN, &adapter->state) && + !test_bit(__IXGBEVF_REMOVING, &adapter->state)) + ixgbevf_irq_enable_queues(adapter, + BIT(q_vector->v_idx)); + } + + return min(work_done, budget - 1); } /** @@ -4016,6 +4020,8 @@ static void ixgbevf_tx_map(struct ixgbevf_ring *tx_ring, /* set the timestamp */ first->time_stamp = jiffies; + skb_tx_timestamp(skb); + /* Force memory writes to complete before letting h/w know there * are new descriptors to fetch. (Only applicable for weak-ordered * memory model archs, such as IA-64). @@ -4151,7 +4157,7 @@ static int ixgbevf_xmit_frame_ring(struct sk_buff *skb, first->protocol = vlan_get_protocol(skb); #ifdef CONFIG_IXGBEVF_IPSEC - if (skb->sp && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx)) + if (secpath_exists(skb) && !ixgbevf_ipsec_tx(tx_ring, first, &ipsec_tx)) goto out_drop; #endif tso = ixgbevf_tso(tx_ring, first, &hdr_len, &ipsec_tx); diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 1e9bcbdc6a90..2f427271a793 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -1499,23 +1499,16 @@ mv643xx_eth_get_link_ksettings_phy(struct mv643xx_eth_private *mp, struct ethtool_link_ksettings *cmd) { struct net_device *dev = mp->dev; - u32 supported, advertising; phy_ethtool_ksettings_get(dev->phydev, cmd); /* * The MAC does not support 1000baseT_Half. */ - ethtool_convert_link_mode_to_legacy_u32(&supported, - cmd->link_modes.supported); - ethtool_convert_link_mode_to_legacy_u32(&advertising, - cmd->link_modes.advertising); - supported &= ~SUPPORTED_1000baseT_Half; - advertising &= ~ADVERTISED_1000baseT_Half; - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, - supported); - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, - advertising); + linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + cmd->link_modes.supported); + linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + cmd->link_modes.advertising); return 0; } @@ -3031,10 +3024,12 @@ static void phy_init(struct mv643xx_eth_private *mp, int speed, int duplex) phy->autoneg = AUTONEG_ENABLE; phy->speed = 0; phy->duplex = 0; - phy->advertising = phy->supported | ADVERTISED_Autoneg; + linkmode_copy(phy->advertising, phy->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + phy->advertising); } else { phy->autoneg = AUTONEG_DISABLE; - phy->advertising = 0; + linkmode_zero(phy->advertising); phy->speed = speed; phy->duplex = duplex; } diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index 61b23497f836..9d4568eb2297 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -4248,8 +4248,7 @@ static int mvneta_ethtool_set_eee(struct net_device *dev, /* The Armada 37x documents do not give limits for this other than * it being an 8-bit register. */ - if (eee->tx_lpi_enabled && - (eee->tx_lpi_timer < 0 || eee->tx_lpi_timer > 255)) + if (eee->tx_lpi_enabled && eee->tx_lpi_timer > 255) return -EINVAL; lpi_ctl0 = mvreg_read(pp, MVNETA_LPI_CTRL_0); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c index 12db256c8c9f..742f0c1f60df 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.c @@ -31,6 +31,7 @@ * @resp: command response * @link_info: link related information * @event_cb: callback for linkchange events + * @event_cb_lock: lock for serializing callback with unregister * @cmd_pend: flag set before new command is started * flag cleared after command response is received * @cgx: parent cgx port @@ -43,6 +44,7 @@ struct lmac { u64 resp; struct cgx_link_user_info link_info; struct cgx_event_cb event_cb; + spinlock_t event_cb_lock; bool cmd_pend; struct cgx *cgx; u8 lmac_id; @@ -55,6 +57,8 @@ struct cgx { u8 cgx_id; u8 lmac_count; struct lmac *lmac_idmap[MAX_LMAC_PER_CGX]; + struct work_struct cgx_cmd_work; + struct workqueue_struct *cgx_cmd_workq; struct list_head cgx_list; }; @@ -66,6 +70,9 @@ static u32 cgx_speed_mbps[CGX_LINK_SPEED_MAX]; /* Convert firmware lmac type encoding to string */ static char *cgx_lmactype_string[LMAC_MODE_MAX]; +/* CGX PHY management internal APIs */ +static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool en); + /* Supported devices */ static const struct pci_device_id cgx_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_CGX) }, @@ -92,17 +99,21 @@ static inline struct lmac *lmac_pdata(u8 lmac_id, struct cgx *cgx) return cgx->lmac_idmap[lmac_id]; } -int cgx_get_cgx_cnt(void) +int cgx_get_cgxcnt_max(void) { struct cgx *cgx_dev; - int count = 0; + int idmax = -ENODEV; list_for_each_entry(cgx_dev, &cgx_list, cgx_list) - count++; + if (cgx_dev->cgx_id > idmax) + idmax = cgx_dev->cgx_id; + + if (idmax < 0) + return 0; - return count; + return idmax + 1; } -EXPORT_SYMBOL(cgx_get_cgx_cnt); +EXPORT_SYMBOL(cgx_get_cgxcnt_max); int cgx_get_lmac_cnt(void *cgxd) { @@ -445,6 +456,9 @@ static inline void cgx_link_change_handler(u64 lstat, lmac->link_info = event.link_uinfo; linfo = &lmac->link_info; + /* Ensure callback doesn't get unregistered until we finish it */ + spin_lock(&lmac->event_cb_lock); + if (!lmac->event_cb.notify_link_chg) { dev_dbg(dev, "cgx port %d:%d Link change handler null", cgx->cgx_id, lmac->lmac_id); @@ -455,11 +469,13 @@ static inline void cgx_link_change_handler(u64 lstat, dev_info(dev, "cgx port %d:%d Link is %s %d Mbps\n", cgx->cgx_id, lmac->lmac_id, linfo->link_up ? "UP" : "DOWN", linfo->speed); - return; + goto err; } if (lmac->event_cb.notify_link_chg(&event, lmac->event_cb.data)) dev_err(dev, "event notification failure\n"); +err: + spin_unlock(&lmac->event_cb_lock); } static inline bool cgx_cmdresp_is_linkevent(u64 event) @@ -482,6 +498,60 @@ static inline bool cgx_event_is_linkevent(u64 event) return false; } +static inline int cgx_fwi_get_mkex_prfl_sz(u64 *prfl_sz, + struct cgx *cgx) +{ + u64 req = 0; + u64 resp; + int err; + + req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_MKEX_PRFL_SIZE, req); + err = cgx_fwi_cmd_generic(req, &resp, cgx, 0); + if (!err) + *prfl_sz = FIELD_GET(RESP_MKEX_PRFL_SIZE, resp); + + return err; +} + +static inline int cgx_fwi_get_mkex_prfl_addr(u64 *prfl_addr, + struct cgx *cgx) +{ + u64 req = 0; + u64 resp; + int err; + + req = FIELD_SET(CMDREG_ID, CGX_CMD_GET_MKEX_PRFL_ADDR, req); + err = cgx_fwi_cmd_generic(req, &resp, cgx, 0); + if (!err) + *prfl_addr = FIELD_GET(RESP_MKEX_PRFL_ADDR, resp); + + return err; +} + +int cgx_get_mkex_prfl_info(u64 *addr, u64 *size) +{ + struct cgx *cgx_dev; + int err; + + if (!addr || !size) + return -EINVAL; + + cgx_dev = list_first_entry(&cgx_list, struct cgx, cgx_list); + if (!cgx_dev) + return -ENXIO; + + err = cgx_fwi_get_mkex_prfl_sz(size, cgx_dev); + if (err) + return -EIO; + + err = cgx_fwi_get_mkex_prfl_addr(addr, cgx_dev); + if (err) + return -EIO; + + return 0; +} +EXPORT_SYMBOL(cgx_get_mkex_prfl_info); + static irqreturn_t cgx_fwi_event_handler(int irq, void *data) { struct lmac *lmac = data; @@ -548,6 +618,38 @@ int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id) } EXPORT_SYMBOL(cgx_lmac_evh_register); +int cgx_lmac_evh_unregister(void *cgxd, int lmac_id) +{ + struct lmac *lmac; + unsigned long flags; + struct cgx *cgx = cgxd; + + lmac = lmac_pdata(lmac_id, cgx); + if (!lmac) + return -ENODEV; + + spin_lock_irqsave(&lmac->event_cb_lock, flags); + lmac->event_cb.notify_link_chg = NULL; + lmac->event_cb.data = NULL; + spin_unlock_irqrestore(&lmac->event_cb_lock, flags); + + return 0; +} +EXPORT_SYMBOL(cgx_lmac_evh_unregister); + +static int cgx_fwi_link_change(struct cgx *cgx, int lmac_id, bool enable) +{ + u64 req = 0; + u64 resp; + + if (enable) + req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_UP, req); + else + req = FIELD_SET(CMDREG_ID, CGX_CMD_LINK_BRING_DOWN, req); + + return cgx_fwi_cmd_generic(req, &resp, cgx, lmac_id); +} + static inline int cgx_fwi_read_version(u64 *resp, struct cgx *cgx) { u64 req = 0; @@ -581,6 +683,34 @@ static int cgx_lmac_verify_fwi_version(struct cgx *cgx) return 0; } +static void cgx_lmac_linkup_work(struct work_struct *work) +{ + struct cgx *cgx = container_of(work, struct cgx, cgx_cmd_work); + struct device *dev = &cgx->pdev->dev; + int i, err; + + /* Do Link up for all the lmacs */ + for (i = 0; i < cgx->lmac_count; i++) { + err = cgx_fwi_link_change(cgx, i, true); + if (err) + dev_info(dev, "cgx port %d:%d Link up command failed\n", + cgx->cgx_id, i); + } +} + +int cgx_lmac_linkup_start(void *cgxd) +{ + struct cgx *cgx = cgxd; + + if (!cgx) + return -ENODEV; + + queue_work(cgx->cgx_cmd_workq, &cgx->cgx_cmd_work); + + return 0; +} +EXPORT_SYMBOL(cgx_lmac_linkup_start); + static int cgx_lmac_init(struct cgx *cgx) { struct lmac *lmac; @@ -602,6 +732,7 @@ static int cgx_lmac_init(struct cgx *cgx) lmac->cgx = cgx; init_waitqueue_head(&lmac->wq_cmd_cmplt); mutex_init(&lmac->cmd_lock); + spin_lock_init(&lmac->event_cb_lock); err = request_irq(pci_irq_vector(cgx->pdev, CGX_LMAC_FWI + i * 9), cgx_fwi_event_handler, 0, lmac->name, lmac); @@ -624,6 +755,12 @@ static int cgx_lmac_exit(struct cgx *cgx) struct lmac *lmac; int i; + if (cgx->cgx_cmd_workq) { + flush_workqueue(cgx->cgx_cmd_workq); + destroy_workqueue(cgx->cgx_cmd_workq); + cgx->cgx_cmd_workq = NULL; + } + /* Free all lmac related resources */ for (i = 0; i < cgx->lmac_count; i++) { lmac = cgx->lmac_idmap[i]; @@ -679,8 +816,19 @@ static int cgx_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_release_regions; } + cgx->cgx_id = (pci_resource_start(pdev, PCI_CFG_REG_BAR_NUM) >> 24) + & CGX_ID_MASK; + + /* init wq for processing linkup requests */ + INIT_WORK(&cgx->cgx_cmd_work, cgx_lmac_linkup_work); + cgx->cgx_cmd_workq = alloc_workqueue("cgx_cmd_workq", 0, 0); + if (!cgx->cgx_cmd_workq) { + dev_err(dev, "alloc workqueue failed for cgx cmd"); + err = -ENOMEM; + goto err_release_regions; + } + list_add(&cgx->cgx_list, &cgx_list); - cgx->cgx_id = cgx_get_cgx_cnt() - 1; cgx_link_usertable_init(); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h index 0a66d2717442..206dc5dc1df8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx.h @@ -20,40 +20,41 @@ /* PCI BAR nos */ #define PCI_CFG_REG_BAR_NUM 0 -#define MAX_CGX 3 +#define CGX_ID_MASK 0x7 #define MAX_LMAC_PER_CGX 4 +#define CGX_FIFO_LEN 65536 /* 64K for both Rx & Tx */ #define CGX_OFFSET(x) ((x) * MAX_LMAC_PER_CGX) /* Registers */ #define CGXX_CMRX_CFG 0x00 -#define CMR_EN BIT_ULL(55) -#define DATA_PKT_TX_EN BIT_ULL(53) -#define DATA_PKT_RX_EN BIT_ULL(54) -#define CGX_LMAC_TYPE_SHIFT 40 -#define CGX_LMAC_TYPE_MASK 0xF +#define CMR_EN BIT_ULL(55) +#define DATA_PKT_TX_EN BIT_ULL(53) +#define DATA_PKT_RX_EN BIT_ULL(54) +#define CGX_LMAC_TYPE_SHIFT 40 +#define CGX_LMAC_TYPE_MASK 0xF #define CGXX_CMRX_INT 0x040 -#define FW_CGX_INT BIT_ULL(1) +#define FW_CGX_INT BIT_ULL(1) #define CGXX_CMRX_INT_ENA_W1S 0x058 #define CGXX_CMRX_RX_ID_MAP 0x060 #define CGXX_CMRX_RX_STAT0 0x070 #define CGXX_CMRX_RX_LMACS 0x128 #define CGXX_CMRX_RX_DMAC_CTL0 0x1F8 -#define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3) -#define CGX_DMAC_CAM_ACCEPT BIT_ULL(3) -#define CGX_DMAC_MCAST_MODE BIT_ULL(1) -#define CGX_DMAC_BCAST_MODE BIT_ULL(0) +#define CGX_DMAC_CTL0_CAM_ENABLE BIT_ULL(3) +#define CGX_DMAC_CAM_ACCEPT BIT_ULL(3) +#define CGX_DMAC_MCAST_MODE BIT_ULL(1) +#define CGX_DMAC_BCAST_MODE BIT_ULL(0) #define CGXX_CMRX_RX_DMAC_CAM0 0x200 -#define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48) +#define CGX_DMAC_CAM_ADDR_ENABLE BIT_ULL(48) #define CGXX_CMRX_RX_DMAC_CAM1 0x400 -#define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0) +#define CGX_RX_DMAC_ADR_MASK GENMASK_ULL(47, 0) #define CGXX_CMRX_TX_STAT0 0x700 #define CGXX_SCRATCH0_REG 0x1050 #define CGXX_SCRATCH1_REG 0x1058 #define CGX_CONST 0x2000 #define CGXX_SPUX_CONTROL1 0x10000 -#define CGXX_SPUX_CONTROL1_LBK BIT_ULL(14) +#define CGXX_SPUX_CONTROL1_LBK BIT_ULL(14) #define CGXX_GMP_PCS_MRX_CTL 0x30000 -#define CGXX_GMP_PCS_MRX_CTL_LBK BIT_ULL(14) +#define CGXX_GMP_PCS_MRX_CTL_LBK BIT_ULL(14) #define CGX_COMMAND_REG CGXX_SCRATCH1_REG #define CGX_EVENT_REG CGXX_SCRATCH0_REG @@ -94,11 +95,12 @@ struct cgx_event_cb { extern struct pci_driver cgx_driver; -int cgx_get_cgx_cnt(void); +int cgx_get_cgxcnt_max(void); int cgx_get_lmac_cnt(void *cgxd); void *cgx_get_pdata(int cgx_id); int cgx_set_pkind(void *cgxd, u8 lmac_id, int pkind); int cgx_lmac_evh_register(struct cgx_event_cb *cb, void *cgxd, int lmac_id); +int cgx_lmac_evh_unregister(void *cgxd, int lmac_id); int cgx_get_tx_stats(void *cgxd, int lmac_id, int idx, u64 *tx_stat); int cgx_get_rx_stats(void *cgxd, int lmac_id, int idx, u64 *rx_stat); int cgx_lmac_rx_tx_enable(void *cgxd, int lmac_id, bool enable); @@ -108,4 +110,6 @@ void cgx_lmac_promisc_config(int cgx_id, int lmac_id, bool enable); int cgx_lmac_internal_loopback(void *cgxd, int lmac_id, bool enable); int cgx_get_link_info(void *cgxd, int lmac_id, struct cgx_link_user_info *linfo); +int cgx_lmac_linkup_start(void *cgxd); +int cgx_get_mkex_prfl_info(u64 *addr, u64 *size); #endif /* CGX_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h index fa17af3f4ba7..fb3ba4968a9b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/cgx_fw_if.h @@ -78,8 +78,8 @@ enum cgx_cmd_id { CGX_CMD_LINK_STATE_CHANGE, CGX_CMD_MODE_CHANGE, /* hot plug support */ CGX_CMD_INTF_SHUTDOWN, - CGX_CMD_IRQ_ENABLE, - CGX_CMD_IRQ_DISABLE, + CGX_CMD_GET_MKEX_PRFL_SIZE, + CGX_CMD_GET_MKEX_PRFL_ADDR }; /* async event ids */ @@ -139,6 +139,16 @@ enum cgx_cmd_own { */ #define RESP_MAC_ADDR GENMASK_ULL(56, 9) +/* Response to cmd ID as CGX_CMD_GET_MKEX_PRFL_SIZE with cmd status as + * CGX_STAT_SUCCESS + */ +#define RESP_MKEX_PRFL_SIZE GENMASK_ULL(63, 9) + +/* Response to cmd ID as CGX_CMD_GET_MKEX_PRFL_ADDR with cmd status as + * CGX_STAT_SUCCESS + */ +#define RESP_MKEX_PRFL_ADDR GENMASK_ULL(63, 9) + /* Response to cmd ID - CGX_CMD_LINK_BRING_UP/DOWN, event ID CGX_EVT_LINK_CHANGE * status can be either CGX_STAT_FAIL or CGX_STAT_SUCCESS * diff --git a/drivers/net/ethernet/marvell/octeontx2/af/common.h b/drivers/net/ethernet/marvell/octeontx2/af/common.h index d39ada404c8f..ec50a21c5aaf 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/common.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/common.h @@ -143,6 +143,14 @@ enum nix_scheduler { NIX_TXSCH_LVL_CNT = 0x5, }; +#define TXSCH_TL1_DFLT_RR_QTM ((1 << 24) - 1) +#define TXSCH_TL1_DFLT_RR_PRIO (0x1ull) + +/* Min/Max packet sizes, excluding FCS */ +#define NIC_HW_MIN_FRS 40 +#define NIC_HW_MAX_FRS 9212 +#define SDP_HW_MAX_FRS 65535 + /* NIX RX action operation*/ #define NIX_RX_ACTIONOP_DROP (0x0ull) #define NIX_RX_ACTIONOP_UCAST (0x1ull) @@ -169,7 +177,9 @@ enum nix_scheduler { #define MAX_LMAC_PKIND 12 #define NIX_LINK_CGX_LMAC(a, b) (0 + 4 * (a) + (b)) +#define NIX_LINK_LBK(a) (12 + (a)) #define NIX_CHAN_CGX_LMAC_CHX(a, b, c) (0x800 + 0x100 * (a) + 0x10 * (b) + (c)) +#define NIX_CHAN_LBK_CHX(a, b) (0 + 0x100 * (a) + (b)) /* NIX LSO format indices. * As of now TSO is the only one using, so statically assigning indices. @@ -186,26 +196,4 @@ enum nix_scheduler { #define DEFAULT_RSS_CONTEXT_GROUP 0 #define MAX_RSS_INDIR_TBL_SIZE 256 /* 1 << Max adder bits */ -/* NIX flow tag, key type flags */ -#define FLOW_KEY_TYPE_PORT BIT(0) -#define FLOW_KEY_TYPE_IPV4 BIT(1) -#define FLOW_KEY_TYPE_IPV6 BIT(2) -#define FLOW_KEY_TYPE_TCP BIT(3) -#define FLOW_KEY_TYPE_UDP BIT(4) -#define FLOW_KEY_TYPE_SCTP BIT(5) - -/* NIX flow tag algorithm indices, max is 31 */ -enum { - FLOW_KEY_ALG_PORT, - FLOW_KEY_ALG_IP, - FLOW_KEY_ALG_TCP, - FLOW_KEY_ALG_UDP, - FLOW_KEY_ALG_SCTP, - FLOW_KEY_ALG_TCP_UDP, - FLOW_KEY_ALG_TCP_SCTP, - FLOW_KEY_ALG_UDP_SCTP, - FLOW_KEY_ALG_TCP_UDP_SCTP, - FLOW_KEY_ALG_MAX, -}; - #endif /* COMMON_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c index 85ba24a05774..d6f9ed8ea966 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.c @@ -290,7 +290,7 @@ EXPORT_SYMBOL(otx2_mbox_nonempty); const char *otx2_mbox_id2name(u16 id) { switch (id) { -#define M(_name, _id, _1, _2) case _id: return # _name; +#define M(_name, _id, _1, _2, _3) case _id: return # _name; MBOX_MESSAGES #undef M default: diff --git a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h index a15a59c9a239..76a4575d18ff 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/mbox.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/mbox.h @@ -120,54 +120,101 @@ static inline struct mbox_msghdr *otx2_mbox_alloc_msg(struct otx2_mbox *mbox, #define MBOX_MESSAGES \ /* Generic mbox IDs (range 0x000 - 0x1FF) */ \ -M(READY, 0x001, msg_req, ready_msg_rsp) \ -M(ATTACH_RESOURCES, 0x002, rsrc_attach, msg_rsp) \ -M(DETACH_RESOURCES, 0x003, rsrc_detach, msg_rsp) \ -M(MSIX_OFFSET, 0x004, msg_req, msix_offset_rsp) \ +M(READY, 0x001, ready, msg_req, ready_msg_rsp) \ +M(ATTACH_RESOURCES, 0x002, attach_resources, rsrc_attach, msg_rsp) \ +M(DETACH_RESOURCES, 0x003, detach_resources, rsrc_detach, msg_rsp) \ +M(MSIX_OFFSET, 0x004, msix_offset, msg_req, msix_offset_rsp) \ +M(VF_FLR, 0x006, vf_flr, msg_req, msg_rsp) \ /* CGX mbox IDs (range 0x200 - 0x3FF) */ \ -M(CGX_START_RXTX, 0x200, msg_req, msg_rsp) \ -M(CGX_STOP_RXTX, 0x201, msg_req, msg_rsp) \ -M(CGX_STATS, 0x202, msg_req, cgx_stats_rsp) \ -M(CGX_MAC_ADDR_SET, 0x203, cgx_mac_addr_set_or_get, \ +M(CGX_START_RXTX, 0x200, cgx_start_rxtx, msg_req, msg_rsp) \ +M(CGX_STOP_RXTX, 0x201, cgx_stop_rxtx, msg_req, msg_rsp) \ +M(CGX_STATS, 0x202, cgx_stats, msg_req, cgx_stats_rsp) \ +M(CGX_MAC_ADDR_SET, 0x203, cgx_mac_addr_set, cgx_mac_addr_set_or_get, \ cgx_mac_addr_set_or_get) \ -M(CGX_MAC_ADDR_GET, 0x204, cgx_mac_addr_set_or_get, \ +M(CGX_MAC_ADDR_GET, 0x204, cgx_mac_addr_get, cgx_mac_addr_set_or_get, \ cgx_mac_addr_set_or_get) \ -M(CGX_PROMISC_ENABLE, 0x205, msg_req, msg_rsp) \ -M(CGX_PROMISC_DISABLE, 0x206, msg_req, msg_rsp) \ -M(CGX_START_LINKEVENTS, 0x207, msg_req, msg_rsp) \ -M(CGX_STOP_LINKEVENTS, 0x208, msg_req, msg_rsp) \ -M(CGX_GET_LINKINFO, 0x209, msg_req, cgx_link_info_msg) \ -M(CGX_INTLBK_ENABLE, 0x20A, msg_req, msg_rsp) \ -M(CGX_INTLBK_DISABLE, 0x20B, msg_req, msg_rsp) \ +M(CGX_PROMISC_ENABLE, 0x205, cgx_promisc_enable, msg_req, msg_rsp) \ +M(CGX_PROMISC_DISABLE, 0x206, cgx_promisc_disable, msg_req, msg_rsp) \ +M(CGX_START_LINKEVENTS, 0x207, cgx_start_linkevents, msg_req, msg_rsp) \ +M(CGX_STOP_LINKEVENTS, 0x208, cgx_stop_linkevents, msg_req, msg_rsp) \ +M(CGX_GET_LINKINFO, 0x209, cgx_get_linkinfo, msg_req, cgx_link_info_msg) \ +M(CGX_INTLBK_ENABLE, 0x20A, cgx_intlbk_enable, msg_req, msg_rsp) \ +M(CGX_INTLBK_DISABLE, 0x20B, cgx_intlbk_disable, msg_req, msg_rsp) \ /* NPA mbox IDs (range 0x400 - 0x5FF) */ \ -M(NPA_LF_ALLOC, 0x400, npa_lf_alloc_req, npa_lf_alloc_rsp) \ -M(NPA_LF_FREE, 0x401, msg_req, msg_rsp) \ -M(NPA_AQ_ENQ, 0x402, npa_aq_enq_req, npa_aq_enq_rsp) \ -M(NPA_HWCTX_DISABLE, 0x403, hwctx_disable_req, msg_rsp) \ +M(NPA_LF_ALLOC, 0x400, npa_lf_alloc, \ + npa_lf_alloc_req, npa_lf_alloc_rsp) \ +M(NPA_LF_FREE, 0x401, npa_lf_free, msg_req, msg_rsp) \ +M(NPA_AQ_ENQ, 0x402, npa_aq_enq, npa_aq_enq_req, npa_aq_enq_rsp) \ +M(NPA_HWCTX_DISABLE, 0x403, npa_hwctx_disable, hwctx_disable_req, msg_rsp)\ /* SSO/SSOW mbox IDs (range 0x600 - 0x7FF) */ \ /* TIM mbox IDs (range 0x800 - 0x9FF) */ \ /* CPT mbox IDs (range 0xA00 - 0xBFF) */ \ /* NPC mbox IDs (range 0x6000 - 0x7FFF) */ \ +M(NPC_MCAM_ALLOC_ENTRY, 0x6000, npc_mcam_alloc_entry, npc_mcam_alloc_entry_req,\ + npc_mcam_alloc_entry_rsp) \ +M(NPC_MCAM_FREE_ENTRY, 0x6001, npc_mcam_free_entry, \ + npc_mcam_free_entry_req, msg_rsp) \ +M(NPC_MCAM_WRITE_ENTRY, 0x6002, npc_mcam_write_entry, \ + npc_mcam_write_entry_req, msg_rsp) \ +M(NPC_MCAM_ENA_ENTRY, 0x6003, npc_mcam_ena_entry, \ + npc_mcam_ena_dis_entry_req, msg_rsp) \ +M(NPC_MCAM_DIS_ENTRY, 0x6004, npc_mcam_dis_entry, \ + npc_mcam_ena_dis_entry_req, msg_rsp) \ +M(NPC_MCAM_SHIFT_ENTRY, 0x6005, npc_mcam_shift_entry, npc_mcam_shift_entry_req,\ + npc_mcam_shift_entry_rsp) \ +M(NPC_MCAM_ALLOC_COUNTER, 0x6006, npc_mcam_alloc_counter, \ + npc_mcam_alloc_counter_req, \ + npc_mcam_alloc_counter_rsp) \ +M(NPC_MCAM_FREE_COUNTER, 0x6007, npc_mcam_free_counter, \ + npc_mcam_oper_counter_req, msg_rsp) \ +M(NPC_MCAM_UNMAP_COUNTER, 0x6008, npc_mcam_unmap_counter, \ + npc_mcam_unmap_counter_req, msg_rsp) \ +M(NPC_MCAM_CLEAR_COUNTER, 0x6009, npc_mcam_clear_counter, \ + npc_mcam_oper_counter_req, msg_rsp) \ +M(NPC_MCAM_COUNTER_STATS, 0x600a, npc_mcam_counter_stats, \ + npc_mcam_oper_counter_req, \ + npc_mcam_oper_counter_rsp) \ +M(NPC_MCAM_ALLOC_AND_WRITE_ENTRY, 0x600b, npc_mcam_alloc_and_write_entry, \ + npc_mcam_alloc_and_write_entry_req, \ + npc_mcam_alloc_and_write_entry_rsp) \ +M(NPC_GET_KEX_CFG, 0x600c, npc_get_kex_cfg, \ + msg_req, npc_get_kex_cfg_rsp) \ /* NIX mbox IDs (range 0x8000 - 0xFFFF) */ \ -M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc_req, nix_lf_alloc_rsp) \ -M(NIX_LF_FREE, 0x8001, msg_req, msg_rsp) \ -M(NIX_AQ_ENQ, 0x8002, nix_aq_enq_req, nix_aq_enq_rsp) \ -M(NIX_HWCTX_DISABLE, 0x8003, hwctx_disable_req, msg_rsp) \ -M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc_req, nix_txsch_alloc_rsp) \ -M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free_req, msg_rsp) \ -M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_config, msg_rsp) \ -M(NIX_STATS_RST, 0x8007, msg_req, msg_rsp) \ -M(NIX_VTAG_CFG, 0x8008, nix_vtag_config, msg_rsp) \ -M(NIX_RSS_FLOWKEY_CFG, 0x8009, nix_rss_flowkey_cfg, msg_rsp) \ -M(NIX_SET_MAC_ADDR, 0x800a, nix_set_mac_addr, msg_rsp) \ -M(NIX_SET_RX_MODE, 0x800b, nix_rx_mode, msg_rsp) +M(NIX_LF_ALLOC, 0x8000, nix_lf_alloc, \ + nix_lf_alloc_req, nix_lf_alloc_rsp) \ +M(NIX_LF_FREE, 0x8001, nix_lf_free, msg_req, msg_rsp) \ +M(NIX_AQ_ENQ, 0x8002, nix_aq_enq, nix_aq_enq_req, nix_aq_enq_rsp) \ +M(NIX_HWCTX_DISABLE, 0x8003, nix_hwctx_disable, \ + hwctx_disable_req, msg_rsp) \ +M(NIX_TXSCH_ALLOC, 0x8004, nix_txsch_alloc, \ + nix_txsch_alloc_req, nix_txsch_alloc_rsp) \ +M(NIX_TXSCH_FREE, 0x8005, nix_txsch_free, nix_txsch_free_req, msg_rsp) \ +M(NIX_TXSCHQ_CFG, 0x8006, nix_txschq_cfg, nix_txschq_config, msg_rsp) \ +M(NIX_STATS_RST, 0x8007, nix_stats_rst, msg_req, msg_rsp) \ +M(NIX_VTAG_CFG, 0x8008, nix_vtag_cfg, nix_vtag_config, msg_rsp) \ +M(NIX_RSS_FLOWKEY_CFG, 0x8009, nix_rss_flowkey_cfg, \ + nix_rss_flowkey_cfg, \ + nix_rss_flowkey_cfg_rsp) \ +M(NIX_SET_MAC_ADDR, 0x800a, nix_set_mac_addr, nix_set_mac_addr, msg_rsp) \ +M(NIX_SET_RX_MODE, 0x800b, nix_set_rx_mode, nix_rx_mode, msg_rsp) \ +M(NIX_SET_HW_FRS, 0x800c, nix_set_hw_frs, nix_frs_cfg, msg_rsp) \ +M(NIX_LF_START_RX, 0x800d, nix_lf_start_rx, msg_req, msg_rsp) \ +M(NIX_LF_STOP_RX, 0x800e, nix_lf_stop_rx, msg_req, msg_rsp) \ +M(NIX_MARK_FORMAT_CFG, 0x800f, nix_mark_format_cfg, \ + nix_mark_format_cfg, \ + nix_mark_format_cfg_rsp) \ +M(NIX_SET_RX_CFG, 0x8010, nix_set_rx_cfg, nix_rx_cfg, msg_rsp) \ +M(NIX_LSO_FORMAT_CFG, 0x8011, nix_lso_format_cfg, \ + nix_lso_format_cfg, \ + nix_lso_format_cfg_rsp) \ +M(NIX_RXVLAN_ALLOC, 0x8012, nix_rxvlan_alloc, msg_req, msg_rsp) /* Messages initiated by AF (range 0xC00 - 0xDFF) */ #define MBOX_UP_CGX_MESSAGES \ -M(CGX_LINK_EVENT, 0xC00, cgx_link_info_msg, msg_rsp) +M(CGX_LINK_EVENT, 0xC00, cgx_link_event, cgx_link_info_msg, msg_rsp) enum { -#define M(_name, _id, _1, _2) MBOX_MSG_ ## _name = _id, +#define M(_name, _id, _1, _2, _3) MBOX_MSG_ ## _name = _id, MBOX_MESSAGES MBOX_UP_CGX_MESSAGES #undef M @@ -191,6 +238,13 @@ struct msg_rsp { struct mbox_msghdr hdr; }; +/* RVU mailbox error codes + * Range 256 - 300. + */ +enum rvu_af_status { + RVU_INVALID_VF_ID = -256, +}; + struct ready_msg_rsp { struct mbox_msghdr hdr; u16 sclk_feq; /* SCLK frequency */ @@ -347,6 +401,8 @@ struct hwctx_disable_req { u8 ctype; }; +/* NIX mbox message formats */ + /* NIX mailbox error codes * Range 401 - 500. */ @@ -365,6 +421,12 @@ enum nix_af_status { NIX_AF_INVAL_TXSCHQ_CFG = -412, NIX_AF_SMQ_FLUSH_FAILED = -413, NIX_AF_ERR_LF_RESET = -414, + NIX_AF_ERR_RSS_NOSPC_FIELD = -415, + NIX_AF_ERR_RSS_NOSPC_ALGO = -416, + NIX_AF_ERR_MARK_CFG_FAIL = -417, + NIX_AF_ERR_LSO_CFG_FAIL = -418, + NIX_AF_INVAL_NPA_PF_FUNC = -419, + NIX_AF_INVAL_SSO_PF_FUNC = -420, }; /* For NIX LF context alloc and init */ @@ -392,6 +454,10 @@ struct nix_lf_alloc_rsp { u8 lso_tsov4_idx; u8 lso_tsov6_idx; u8 mac_addr[ETH_ALEN]; + u8 lf_rx_stats; /* NIX_AF_CONST1::LF_RX_STATS */ + u8 lf_tx_stats; /* NIX_AF_CONST1::LF_TX_STATS */ + u16 cints; /* NIX_AF_CONST2::CINTS */ + u16 qints; /* NIX_AF_CONST2::QINTS */ }; /* NIX AQ enqueue msg */ @@ -472,6 +538,7 @@ struct nix_txschq_config { struct nix_vtag_config { struct mbox_msghdr hdr; + /* '0' for 4 octet VTAG, '1' for 8 octet VTAG */ u8 vtag_size; /* cfg_type is '0' for tx vlan cfg * cfg_type is '1' for rx vlan cfg @@ -492,7 +559,7 @@ struct nix_vtag_config { /* valid when cfg_type is '1' */ struct { - /* rx vtag type index */ + /* rx vtag type index, valid values are in 0..7 range */ u8 vtag_type; /* rx vtag strip */ u8 strip_vtag :1; @@ -505,15 +572,40 @@ struct nix_vtag_config { struct nix_rss_flowkey_cfg { struct mbox_msghdr hdr; int mcam_index; /* MCAM entry index to modify */ +#define NIX_FLOW_KEY_TYPE_PORT BIT(0) +#define NIX_FLOW_KEY_TYPE_IPV4 BIT(1) +#define NIX_FLOW_KEY_TYPE_IPV6 BIT(2) +#define NIX_FLOW_KEY_TYPE_TCP BIT(3) +#define NIX_FLOW_KEY_TYPE_UDP BIT(4) +#define NIX_FLOW_KEY_TYPE_SCTP BIT(5) u32 flowkey_cfg; /* Flowkey types selected */ u8 group; /* RSS context or group */ }; +struct nix_rss_flowkey_cfg_rsp { + struct mbox_msghdr hdr; + u8 alg_idx; /* Selected algo index */ +}; + struct nix_set_mac_addr { struct mbox_msghdr hdr; u8 mac_addr[ETH_ALEN]; /* MAC address to be set for this pcifunc */ }; +struct nix_mark_format_cfg { + struct mbox_msghdr hdr; + u8 offset; + u8 y_mask; + u8 y_val; + u8 r_mask; + u8 r_val; +}; + +struct nix_mark_format_cfg_rsp { + struct mbox_msghdr hdr; + u8 mark_format_idx; +}; + struct nix_rx_mode { struct mbox_msghdr hdr; #define NIX_RX_MODE_UCAST BIT(0) @@ -522,4 +614,182 @@ struct nix_rx_mode { u16 mode; }; +struct nix_rx_cfg { + struct mbox_msghdr hdr; +#define NIX_RX_OL3_VERIFY BIT(0) +#define NIX_RX_OL4_VERIFY BIT(1) + u8 len_verify; /* Outer L3/L4 len check */ +#define NIX_RX_CSUM_OL4_VERIFY BIT(0) + u8 csum_verify; /* Outer L4 checksum verification */ +}; + +struct nix_frs_cfg { + struct mbox_msghdr hdr; + u8 update_smq; /* Update SMQ's min/max lens */ + u8 update_minlen; /* Set minlen also */ + u8 sdp_link; /* Set SDP RX link */ + u16 maxlen; + u16 minlen; +}; + +struct nix_lso_format_cfg { + struct mbox_msghdr hdr; + u64 field_mask; +#define NIX_LSO_FIELD_MAX 8 + u64 fields[NIX_LSO_FIELD_MAX]; +}; + +struct nix_lso_format_cfg_rsp { + struct mbox_msghdr hdr; + u8 lso_format_idx; +}; + +/* NPC mbox message structs */ + +#define NPC_MCAM_ENTRY_INVALID 0xFFFF +#define NPC_MCAM_INVALID_MAP 0xFFFF + +/* NPC mailbox error codes + * Range 701 - 800. + */ +enum npc_af_status { + NPC_MCAM_INVALID_REQ = -701, + NPC_MCAM_ALLOC_DENIED = -702, + NPC_MCAM_ALLOC_FAILED = -703, + NPC_MCAM_PERM_DENIED = -704, +}; + +struct npc_mcam_alloc_entry_req { + struct mbox_msghdr hdr; +#define NPC_MAX_NONCONTIG_ENTRIES 256 + u8 contig; /* Contiguous entries ? */ +#define NPC_MCAM_ANY_PRIO 0 +#define NPC_MCAM_LOWER_PRIO 1 +#define NPC_MCAM_HIGHER_PRIO 2 + u8 priority; /* Lower or higher w.r.t ref_entry */ + u16 ref_entry; + u16 count; /* Number of entries requested */ +}; + +struct npc_mcam_alloc_entry_rsp { + struct mbox_msghdr hdr; + u16 entry; /* Entry allocated or start index if contiguous. + * Invalid incase of non-contiguous. + */ + u16 count; /* Number of entries allocated */ + u16 free_count; /* Number of entries available */ + u16 entry_list[NPC_MAX_NONCONTIG_ENTRIES]; +}; + +struct npc_mcam_free_entry_req { + struct mbox_msghdr hdr; + u16 entry; /* Entry index to be freed */ + u8 all; /* If all entries allocated to this PFVF to be freed */ +}; + +struct mcam_entry { +#define NPC_MAX_KWS_IN_KEY 7 /* Number of keywords in max keywidth */ + u64 kw[NPC_MAX_KWS_IN_KEY]; + u64 kw_mask[NPC_MAX_KWS_IN_KEY]; + u64 action; + u64 vtag_action; +}; + +struct npc_mcam_write_entry_req { + struct mbox_msghdr hdr; + struct mcam_entry entry_data; + u16 entry; /* MCAM entry to write this match key */ + u16 cntr; /* Counter for this MCAM entry */ + u8 intf; /* Rx or Tx interface */ + u8 enable_entry;/* Enable this MCAM entry ? */ + u8 set_cntr; /* Set counter for this entry ? */ +}; + +/* Enable/Disable a given entry */ +struct npc_mcam_ena_dis_entry_req { + struct mbox_msghdr hdr; + u16 entry; +}; + +struct npc_mcam_shift_entry_req { + struct mbox_msghdr hdr; +#define NPC_MCAM_MAX_SHIFTS 64 + u16 curr_entry[NPC_MCAM_MAX_SHIFTS]; + u16 new_entry[NPC_MCAM_MAX_SHIFTS]; + u16 shift_count; /* Number of entries to shift */ +}; + +struct npc_mcam_shift_entry_rsp { + struct mbox_msghdr hdr; + u16 failed_entry_idx; /* Index in 'curr_entry', not entry itself */ +}; + +struct npc_mcam_alloc_counter_req { + struct mbox_msghdr hdr; + u8 contig; /* Contiguous counters ? */ +#define NPC_MAX_NONCONTIG_COUNTERS 64 + u16 count; /* Number of counters requested */ +}; + +struct npc_mcam_alloc_counter_rsp { + struct mbox_msghdr hdr; + u16 cntr; /* Counter allocated or start index if contiguous. + * Invalid incase of non-contiguous. + */ + u16 count; /* Number of counters allocated */ + u16 cntr_list[NPC_MAX_NONCONTIG_COUNTERS]; +}; + +struct npc_mcam_oper_counter_req { + struct mbox_msghdr hdr; + u16 cntr; /* Free a counter or clear/fetch it's stats */ +}; + +struct npc_mcam_oper_counter_rsp { + struct mbox_msghdr hdr; + u64 stat; /* valid only while fetching counter's stats */ +}; + +struct npc_mcam_unmap_counter_req { + struct mbox_msghdr hdr; + u16 cntr; + u16 entry; /* Entry and counter to be unmapped */ + u8 all; /* Unmap all entries using this counter ? */ +}; + +struct npc_mcam_alloc_and_write_entry_req { + struct mbox_msghdr hdr; + struct mcam_entry entry_data; + u16 ref_entry; + u8 priority; /* Lower or higher w.r.t ref_entry */ + u8 intf; /* Rx or Tx interface */ + u8 enable_entry;/* Enable this MCAM entry ? */ + u8 alloc_cntr; /* Allocate counter and map ? */ +}; + +struct npc_mcam_alloc_and_write_entry_rsp { + struct mbox_msghdr hdr; + u16 entry; + u16 cntr; +}; + +struct npc_get_kex_cfg_rsp { + struct mbox_msghdr hdr; + u64 rx_keyx_cfg; /* NPC_AF_INTF(0)_KEX_CFG */ + u64 tx_keyx_cfg; /* NPC_AF_INTF(1)_KEX_CFG */ +#define NPC_MAX_INTF 2 +#define NPC_MAX_LID 8 +#define NPC_MAX_LT 16 +#define NPC_MAX_LD 2 +#define NPC_MAX_LFL 16 + /* NPC_AF_KEX_LDATA(0..1)_FLAGS_CFG */ + u64 kex_ld_flags[NPC_MAX_LD]; + /* NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG */ + u64 intf_lid_lt_ld[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD]; + /* NPC_AF_INTF(0..1)_LDATA(0..1)_FLAGS(0..15)_CFG */ + u64 intf_ld_flags[NPC_MAX_INTF][NPC_MAX_LD][NPC_MAX_LFL]; +#define MKEX_NAME_LEN 128 + u8 mkex_pfl_name[MKEX_NAME_LEN]; +}; + #endif /* MBOX_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/npc.h b/drivers/net/ethernet/marvell/octeontx2/af/npc.h index f98b0113def3..8d6d90fdfb73 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/npc.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/npc.h @@ -259,4 +259,28 @@ struct nix_rx_action { #endif }; +/* NIX Receive Vtag Action Structure */ +#define VTAG0_VALID_BIT BIT_ULL(15) +#define VTAG0_TYPE_MASK GENMASK_ULL(14, 12) +#define VTAG0_LID_MASK GENMASK_ULL(10, 8) +#define VTAG0_RELPTR_MASK GENMASK_ULL(7, 0) + +struct npc_mcam_kex { + /* MKEX Profle Header */ + u64 mkex_sign; /* "mcam-kex-profile" (8 bytes/ASCII characters) */ + u8 name[MKEX_NAME_LEN]; /* MKEX Profile name */ + u64 cpu_model; /* Format as profiled by CPU hardware */ + u64 kpu_version; /* KPU firmware/profile version */ + u64 reserved; /* Reserved for extension */ + + /* MKEX Profle Data */ + u64 keyx_cfg[NPC_MAX_INTF]; /* NPC_AF_INTF(0..1)_KEX_CFG */ + /* NPC_AF_KEX_LDATA(0..1)_FLAGS_CFG */ + u64 kex_ld_flags[NPC_MAX_LD]; + /* NPC_AF_INTF(0..1)_LID(0..7)_LT(0..15)_LD(0..1)_CFG */ + u64 intf_lid_lt_ld[NPC_MAX_INTF][NPC_MAX_LID][NPC_MAX_LT][NPC_MAX_LD]; + /* NPC_AF_INTF(0..1)_LDATA(0..1)_FLAGS(0..15)_CFG */ + u64 intf_ld_flags[NPC_MAX_INTF][NPC_MAX_LD][NPC_MAX_LFL]; +} __packed; + #endif /* NPC_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index dc28fa2b9481..e581091c09c4 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -29,6 +29,16 @@ static void rvu_set_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, struct rvu_block *block, int lf); static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, struct rvu_block *block, int lf); +static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc); + +static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, + int type, int num, + void (mbox_handler)(struct work_struct *), + void (mbox_up_handler)(struct work_struct *)); +enum { + TYPE_AFVF, + TYPE_AFPF, +}; /* Supported devices */ static const struct pci_device_id rvu_id_table[] = { @@ -42,6 +52,10 @@ MODULE_LICENSE("GPL v2"); MODULE_VERSION(DRV_VERSION); MODULE_DEVICE_TABLE(pci, rvu_id_table); +static char *mkex_profile; /* MKEX profile name */ +module_param(mkex_profile, charp, 0000); +MODULE_PARM_DESC(mkex_profile, "MKEX profile name string"); + /* Poll a RVU block's register 'offset', for a 'zero' * or 'nonzero' at bits specified by 'mask' */ @@ -153,17 +167,17 @@ int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot) u16 match = 0; int lf; - spin_lock(&rvu->rsrc_lock); + mutex_lock(&rvu->rsrc_lock); for (lf = 0; lf < block->lf.max; lf++) { if (block->fn_map[lf] == pcifunc) { if (slot == match) { - spin_unlock(&rvu->rsrc_lock); + mutex_unlock(&rvu->rsrc_lock); return lf; } match++; } } - spin_unlock(&rvu->rsrc_lock); + mutex_unlock(&rvu->rsrc_lock); return -ENODEV; } @@ -337,6 +351,28 @@ struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc) return &rvu->pf[rvu_get_pf(pcifunc)]; } +static bool is_pf_func_valid(struct rvu *rvu, u16 pcifunc) +{ + int pf, vf, nvfs; + u64 cfg; + + pf = rvu_get_pf(pcifunc); + if (pf >= rvu->hw->total_pfs) + return false; + + if (!(pcifunc & RVU_PFVF_FUNC_MASK)) + return true; + + /* Check if VF is within number of VFs attached to this PF */ + vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); + nvfs = (cfg >> 12) & 0xFF; + if (vf >= nvfs) + return false; + + return true; +} + bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr) { struct rvu_block *block; @@ -597,6 +633,8 @@ static void rvu_free_hw_resources(struct rvu *rvu) dma_unmap_resource(rvu->dev, rvu->msix_base_iova, max_msix * PCI_MSIX_ENTRY_SIZE, DMA_BIDIRECTIONAL, 0); + + mutex_destroy(&rvu->rsrc_lock); } static int rvu_setup_hw_resources(struct rvu *rvu) @@ -752,7 +790,7 @@ init: if (!rvu->hwvf) return -ENOMEM; - spin_lock_init(&rvu->rsrc_lock); + mutex_init(&rvu->rsrc_lock); err = rvu_setup_msix_resources(rvu); if (err) @@ -777,17 +815,26 @@ init: err = rvu_npc_init(rvu); if (err) - return err; + goto exit; + + err = rvu_cgx_init(rvu); + if (err) + goto exit; err = rvu_npa_init(rvu); if (err) - return err; + goto cgx_err; err = rvu_nix_init(rvu); if (err) - return err; + goto cgx_err; return 0; + +cgx_err: + rvu_cgx_exit(rvu); +exit: + return err; } /* NPA and NIX admin queue APIs */ @@ -830,7 +877,7 @@ int rvu_aq_alloc(struct rvu *rvu, struct admin_queue **ad_queue, return 0; } -static int rvu_mbox_handler_READY(struct rvu *rvu, struct msg_req *req, +static int rvu_mbox_handler_ready(struct rvu *rvu, struct msg_req *req, struct ready_msg_rsp *rsp) { return 0; @@ -858,6 +905,22 @@ static u16 rvu_get_rsrc_mapcount(struct rvu_pfvf *pfvf, int blktype) return 0; } +bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype) +{ + struct rvu_pfvf *pfvf; + + if (!is_pf_func_valid(rvu, pcifunc)) + return false; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + + /* Check if this PFFUNC has a LF of type blktype attached */ + if (!rvu_get_rsrc_mapcount(pfvf, blktype)) + return false; + + return true; +} + static int rvu_lookup_rsrc(struct rvu *rvu, struct rvu_block *block, int pcifunc, int slot) { @@ -926,7 +989,7 @@ static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach, struct rvu_block *block; int blkid; - spin_lock(&rvu->rsrc_lock); + mutex_lock(&rvu->rsrc_lock); /* Check for partial resource detach */ if (detach && detach->partial) @@ -956,11 +1019,11 @@ static int rvu_detach_rsrcs(struct rvu *rvu, struct rsrc_detach *detach, rvu_detach_block(rvu, pcifunc, block->type); } - spin_unlock(&rvu->rsrc_lock); + mutex_unlock(&rvu->rsrc_lock); return 0; } -static int rvu_mbox_handler_DETACH_RESOURCES(struct rvu *rvu, +static int rvu_mbox_handler_detach_resources(struct rvu *rvu, struct rsrc_detach *detach, struct msg_rsp *rsp) { @@ -1108,7 +1171,7 @@ fail: return -ENOSPC; } -static int rvu_mbox_handler_ATTACH_RESOURCES(struct rvu *rvu, +static int rvu_mbox_handler_attach_resources(struct rvu *rvu, struct rsrc_attach *attach, struct msg_rsp *rsp) { @@ -1119,7 +1182,7 @@ static int rvu_mbox_handler_ATTACH_RESOURCES(struct rvu *rvu, if (!attach->modify) rvu_detach_rsrcs(rvu, NULL, pcifunc); - spin_lock(&rvu->rsrc_lock); + mutex_lock(&rvu->rsrc_lock); /* Check if the request can be accommodated */ err = rvu_check_rsrc_availability(rvu, attach, pcifunc); @@ -1163,7 +1226,7 @@ static int rvu_mbox_handler_ATTACH_RESOURCES(struct rvu *rvu, } exit: - spin_unlock(&rvu->rsrc_lock); + mutex_unlock(&rvu->rsrc_lock); return err; } @@ -1231,7 +1294,7 @@ static void rvu_clear_msix_offset(struct rvu *rvu, struct rvu_pfvf *pfvf, rvu_free_rsrc_contig(&pfvf->msix, nvecs, offset); } -static int rvu_mbox_handler_MSIX_OFFSET(struct rvu *rvu, struct msg_req *req, +static int rvu_mbox_handler_msix_offset(struct rvu *rvu, struct msg_req *req, struct msix_offset_rsp *rsp) { struct rvu_hwinfo *hw = rvu->hw; @@ -1280,22 +1343,51 @@ static int rvu_mbox_handler_MSIX_OFFSET(struct rvu *rvu, struct msg_req *req, return 0; } -static int rvu_process_mbox_msg(struct rvu *rvu, int devid, +static int rvu_mbox_handler_vf_flr(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + u16 vf, numvfs; + u64 cfg; + + vf = pcifunc & RVU_PFVF_FUNC_MASK; + cfg = rvu_read64(rvu, BLKADDR_RVUM, + RVU_PRIV_PFX_CFG(rvu_get_pf(pcifunc))); + numvfs = (cfg >> 12) & 0xFF; + + if (vf && vf <= numvfs) + __rvu_flr_handler(rvu, pcifunc); + else + return RVU_INVALID_VF_ID; + + return 0; +} + +static int rvu_process_mbox_msg(struct otx2_mbox *mbox, int devid, struct mbox_msghdr *req) { + struct rvu *rvu = pci_get_drvdata(mbox->pdev); + /* Check if valid, if not reply with a invalid msg */ if (req->sig != OTX2_MBOX_REQ_SIG) goto bad_message; switch (req->id) { -#define M(_name, _id, _req_type, _rsp_type) \ +#define M(_name, _id, _fn_name, _req_type, _rsp_type) \ case _id: { \ struct _rsp_type *rsp; \ int err; \ \ rsp = (struct _rsp_type *)otx2_mbox_alloc_msg( \ - &rvu->mbox, devid, \ + mbox, devid, \ sizeof(struct _rsp_type)); \ + /* some handlers should complete even if reply */ \ + /* could not be allocated */ \ + if (!rsp && \ + _id != MBOX_MSG_DETACH_RESOURCES && \ + _id != MBOX_MSG_NIX_TXSCH_FREE && \ + _id != MBOX_MSG_VF_FLR) \ + return -ENOMEM; \ if (rsp) { \ rsp->hdr.id = _id; \ rsp->hdr.sig = OTX2_MBOX_RSP_SIG; \ @@ -1303,9 +1395,9 @@ static int rvu_process_mbox_msg(struct rvu *rvu, int devid, rsp->hdr.rc = 0; \ } \ \ - err = rvu_mbox_handler_ ## _name(rvu, \ - (struct _req_type *)req, \ - rsp); \ + err = rvu_mbox_handler_ ## _fn_name(rvu, \ + (struct _req_type *)req, \ + rsp); \ if (rsp && err) \ rsp->hdr.rc = err; \ \ @@ -1313,29 +1405,38 @@ static int rvu_process_mbox_msg(struct rvu *rvu, int devid, } MBOX_MESSAGES #undef M - break; + bad_message: default: - otx2_reply_invalid_msg(&rvu->mbox, devid, req->pcifunc, - req->id); + otx2_reply_invalid_msg(mbox, devid, req->pcifunc, req->id); return -ENODEV; } } -static void rvu_mbox_handler(struct work_struct *work) +static void __rvu_mbox_handler(struct rvu_work *mwork, int type) { - struct rvu_work *mwork = container_of(work, struct rvu_work, work); struct rvu *rvu = mwork->rvu; + int offset, err, id, devid; struct otx2_mbox_dev *mdev; struct mbox_hdr *req_hdr; struct mbox_msghdr *msg; + struct mbox_wq_info *mw; struct otx2_mbox *mbox; - int offset, id, err; - u16 pf; - mbox = &rvu->mbox; - pf = mwork - rvu->mbox_wrk; - mdev = &mbox->dev[pf]; + switch (type) { + case TYPE_AFPF: + mw = &rvu->afpf_wq_info; + break; + case TYPE_AFVF: + mw = &rvu->afvf_wq_info; + break; + default: + return; + } + + devid = mwork - mw->mbox_wrk; + mbox = &mw->mbox; + mdev = &mbox->dev[devid]; /* Process received mbox messages */ req_hdr = mdev->mbase + mbox->rx_start; @@ -1347,10 +1448,21 @@ static void rvu_mbox_handler(struct work_struct *work) for (id = 0; id < req_hdr->num_msgs; id++) { msg = mdev->mbase + offset; - /* Set which PF sent this message based on mbox IRQ */ - msg->pcifunc &= ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT); - msg->pcifunc |= (pf << RVU_PFVF_PF_SHIFT); - err = rvu_process_mbox_msg(rvu, pf, msg); + /* Set which PF/VF sent this message based on mbox IRQ */ + switch (type) { + case TYPE_AFPF: + msg->pcifunc &= + ~(RVU_PFVF_PF_MASK << RVU_PFVF_PF_SHIFT); + msg->pcifunc |= (devid << RVU_PFVF_PF_SHIFT); + break; + case TYPE_AFVF: + msg->pcifunc &= + ~(RVU_PFVF_FUNC_MASK << RVU_PFVF_FUNC_SHIFT); + msg->pcifunc |= (devid << RVU_PFVF_FUNC_SHIFT) + 1; + break; + } + + err = rvu_process_mbox_msg(mbox, devid, msg); if (!err) { offset = mbox->rx_start + msg->next_msgoff; continue; @@ -1358,31 +1470,57 @@ static void rvu_mbox_handler(struct work_struct *work) if (msg->pcifunc & RVU_PFVF_FUNC_MASK) dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d:VF%d\n", - err, otx2_mbox_id2name(msg->id), msg->id, pf, + err, otx2_mbox_id2name(msg->id), + msg->id, devid, (msg->pcifunc & RVU_PFVF_FUNC_MASK) - 1); else dev_warn(rvu->dev, "Error %d when processing message %s (0x%x) from PF%d\n", - err, otx2_mbox_id2name(msg->id), msg->id, pf); + err, otx2_mbox_id2name(msg->id), + msg->id, devid); } - /* Send mbox responses to PF */ - otx2_mbox_msg_send(mbox, pf); + /* Send mbox responses to VF/PF */ + otx2_mbox_msg_send(mbox, devid); +} + +static inline void rvu_afpf_mbox_handler(struct work_struct *work) +{ + struct rvu_work *mwork = container_of(work, struct rvu_work, work); + + __rvu_mbox_handler(mwork, TYPE_AFPF); } -static void rvu_mbox_up_handler(struct work_struct *work) +static inline void rvu_afvf_mbox_handler(struct work_struct *work) { struct rvu_work *mwork = container_of(work, struct rvu_work, work); + + __rvu_mbox_handler(mwork, TYPE_AFVF); +} + +static void __rvu_mbox_up_handler(struct rvu_work *mwork, int type) +{ struct rvu *rvu = mwork->rvu; struct otx2_mbox_dev *mdev; struct mbox_hdr *rsp_hdr; struct mbox_msghdr *msg; + struct mbox_wq_info *mw; struct otx2_mbox *mbox; - int offset, id; - u16 pf; + int offset, id, devid; + + switch (type) { + case TYPE_AFPF: + mw = &rvu->afpf_wq_info; + break; + case TYPE_AFVF: + mw = &rvu->afvf_wq_info; + break; + default: + return; + } - mbox = &rvu->mbox_up; - pf = mwork - rvu->mbox_wrk_up; - mdev = &mbox->dev[pf]; + devid = mwork - mw->mbox_wrk_up; + mbox = &mw->mbox_up; + mdev = &mbox->dev[devid]; rsp_hdr = mdev->mbase + mbox->rx_start; if (rsp_hdr->num_msgs == 0) { @@ -1423,128 +1561,182 @@ end: mdev->msgs_acked++; } - otx2_mbox_reset(mbox, 0); + otx2_mbox_reset(mbox, devid); } -static int rvu_mbox_init(struct rvu *rvu) +static inline void rvu_afpf_mbox_up_handler(struct work_struct *work) { - struct rvu_hwinfo *hw = rvu->hw; - void __iomem *hwbase = NULL; + struct rvu_work *mwork = container_of(work, struct rvu_work, work); + + __rvu_mbox_up_handler(mwork, TYPE_AFPF); +} + +static inline void rvu_afvf_mbox_up_handler(struct work_struct *work) +{ + struct rvu_work *mwork = container_of(work, struct rvu_work, work); + + __rvu_mbox_up_handler(mwork, TYPE_AFVF); +} + +static int rvu_mbox_init(struct rvu *rvu, struct mbox_wq_info *mw, + int type, int num, + void (mbox_handler)(struct work_struct *), + void (mbox_up_handler)(struct work_struct *)) +{ + void __iomem *hwbase = NULL, *reg_base; + int err, i, dir, dir_up; struct rvu_work *mwork; + const char *name; u64 bar4_addr; - int err, pf; - rvu->mbox_wq = alloc_workqueue("rvu_afpf_mailbox", - WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, - hw->total_pfs); - if (!rvu->mbox_wq) + switch (type) { + case TYPE_AFPF: + name = "rvu_afpf_mailbox"; + bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR); + dir = MBOX_DIR_AFPF; + dir_up = MBOX_DIR_AFPF_UP; + reg_base = rvu->afreg_base; + break; + case TYPE_AFVF: + name = "rvu_afvf_mailbox"; + bar4_addr = rvupf_read64(rvu, RVU_PF_VF_BAR4_ADDR); + dir = MBOX_DIR_PFVF; + dir_up = MBOX_DIR_PFVF_UP; + reg_base = rvu->pfreg_base; + break; + default: + return -EINVAL; + } + + mw->mbox_wq = alloc_workqueue(name, + WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, + num); + if (!mw->mbox_wq) return -ENOMEM; - rvu->mbox_wrk = devm_kcalloc(rvu->dev, hw->total_pfs, - sizeof(struct rvu_work), GFP_KERNEL); - if (!rvu->mbox_wrk) { + mw->mbox_wrk = devm_kcalloc(rvu->dev, num, + sizeof(struct rvu_work), GFP_KERNEL); + if (!mw->mbox_wrk) { err = -ENOMEM; goto exit; } - rvu->mbox_wrk_up = devm_kcalloc(rvu->dev, hw->total_pfs, - sizeof(struct rvu_work), GFP_KERNEL); - if (!rvu->mbox_wrk_up) { + mw->mbox_wrk_up = devm_kcalloc(rvu->dev, num, + sizeof(struct rvu_work), GFP_KERNEL); + if (!mw->mbox_wrk_up) { err = -ENOMEM; goto exit; } - /* Map mbox region shared with PFs */ - bar4_addr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PF_BAR4_ADDR); /* Mailbox is a reserved memory (in RAM) region shared between * RVU devices, shouldn't be mapped as device memory to allow * unaligned accesses. */ - hwbase = ioremap_wc(bar4_addr, MBOX_SIZE * hw->total_pfs); + hwbase = ioremap_wc(bar4_addr, MBOX_SIZE * num); if (!hwbase) { dev_err(rvu->dev, "Unable to map mailbox region\n"); err = -ENOMEM; goto exit; } - err = otx2_mbox_init(&rvu->mbox, hwbase, rvu->pdev, rvu->afreg_base, - MBOX_DIR_AFPF, hw->total_pfs); + err = otx2_mbox_init(&mw->mbox, hwbase, rvu->pdev, reg_base, dir, num); if (err) goto exit; - err = otx2_mbox_init(&rvu->mbox_up, hwbase, rvu->pdev, rvu->afreg_base, - MBOX_DIR_AFPF_UP, hw->total_pfs); + err = otx2_mbox_init(&mw->mbox_up, hwbase, rvu->pdev, + reg_base, dir_up, num); if (err) goto exit; - for (pf = 0; pf < hw->total_pfs; pf++) { - mwork = &rvu->mbox_wrk[pf]; + for (i = 0; i < num; i++) { + mwork = &mw->mbox_wrk[i]; mwork->rvu = rvu; - INIT_WORK(&mwork->work, rvu_mbox_handler); - } + INIT_WORK(&mwork->work, mbox_handler); - for (pf = 0; pf < hw->total_pfs; pf++) { - mwork = &rvu->mbox_wrk_up[pf]; + mwork = &mw->mbox_wrk_up[i]; mwork->rvu = rvu; - INIT_WORK(&mwork->work, rvu_mbox_up_handler); + INIT_WORK(&mwork->work, mbox_up_handler); } return 0; exit: if (hwbase) iounmap((void __iomem *)hwbase); - destroy_workqueue(rvu->mbox_wq); + destroy_workqueue(mw->mbox_wq); return err; } -static void rvu_mbox_destroy(struct rvu *rvu) +static void rvu_mbox_destroy(struct mbox_wq_info *mw) { - if (rvu->mbox_wq) { - flush_workqueue(rvu->mbox_wq); - destroy_workqueue(rvu->mbox_wq); - rvu->mbox_wq = NULL; + if (mw->mbox_wq) { + flush_workqueue(mw->mbox_wq); + destroy_workqueue(mw->mbox_wq); + mw->mbox_wq = NULL; } - if (rvu->mbox.hwbase) - iounmap((void __iomem *)rvu->mbox.hwbase); + if (mw->mbox.hwbase) + iounmap((void __iomem *)mw->mbox.hwbase); - otx2_mbox_destroy(&rvu->mbox); - otx2_mbox_destroy(&rvu->mbox_up); + otx2_mbox_destroy(&mw->mbox); + otx2_mbox_destroy(&mw->mbox_up); } -static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq) +static void rvu_queue_work(struct mbox_wq_info *mw, int first, + int mdevs, u64 intr) { - struct rvu *rvu = (struct rvu *)rvu_irq; struct otx2_mbox_dev *mdev; struct otx2_mbox *mbox; struct mbox_hdr *hdr; + int i; + + for (i = first; i < mdevs; i++) { + /* start from 0 */ + if (!(intr & BIT_ULL(i - first))) + continue; + + mbox = &mw->mbox; + mdev = &mbox->dev[i]; + hdr = mdev->mbase + mbox->rx_start; + if (hdr->num_msgs) + queue_work(mw->mbox_wq, &mw->mbox_wrk[i].work); + + mbox = &mw->mbox_up; + mdev = &mbox->dev[i]; + hdr = mdev->mbase + mbox->rx_start; + if (hdr->num_msgs) + queue_work(mw->mbox_wq, &mw->mbox_wrk_up[i].work); + } +} + +static irqreturn_t rvu_mbox_intr_handler(int irq, void *rvu_irq) +{ + struct rvu *rvu = (struct rvu *)rvu_irq; + int vfs = rvu->vfs; u64 intr; - u8 pf; intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT); /* Clear interrupts */ rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT, intr); /* Sync with mbox memory region */ - smp_wmb(); + rmb(); - for (pf = 0; pf < rvu->hw->total_pfs; pf++) { - if (intr & (1ULL << pf)) { - mbox = &rvu->mbox; - mdev = &mbox->dev[pf]; - hdr = mdev->mbase + mbox->rx_start; - if (hdr->num_msgs) - queue_work(rvu->mbox_wq, - &rvu->mbox_wrk[pf].work); - mbox = &rvu->mbox_up; - mdev = &mbox->dev[pf]; - hdr = mdev->mbase + mbox->rx_start; - if (hdr->num_msgs) - queue_work(rvu->mbox_wq, - &rvu->mbox_wrk_up[pf].work); - } + rvu_queue_work(&rvu->afpf_wq_info, 0, rvu->hw->total_pfs, intr); + + /* Handle VF interrupts */ + if (vfs > 64) { + intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(1)); + rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), intr); + + rvu_queue_work(&rvu->afvf_wq_info, 64, vfs, intr); + vfs -= 64; } + intr = rvupf_read64(rvu, RVU_PF_VFPF_MBOX_INTX(0)); + rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), intr); + + rvu_queue_work(&rvu->afvf_wq_info, 0, vfs, intr); + return IRQ_HANDLED; } @@ -1561,6 +1753,216 @@ static void rvu_enable_mbox_intr(struct rvu *rvu) INTR_MASK(hw->total_pfs) & ~1ULL); } +static void rvu_blklf_teardown(struct rvu *rvu, u16 pcifunc, u8 blkaddr) +{ + struct rvu_block *block; + int slot, lf, num_lfs; + int err; + + block = &rvu->hw->block[blkaddr]; + num_lfs = rvu_get_rsrc_mapcount(rvu_get_pfvf(rvu, pcifunc), + block->type); + if (!num_lfs) + return; + for (slot = 0; slot < num_lfs; slot++) { + lf = rvu_get_lf(rvu, block, pcifunc, slot); + if (lf < 0) + continue; + + /* Cleanup LF and reset it */ + if (block->addr == BLKADDR_NIX0) + rvu_nix_lf_teardown(rvu, pcifunc, block->addr, lf); + else if (block->addr == BLKADDR_NPA) + rvu_npa_lf_teardown(rvu, pcifunc, lf); + + err = rvu_lf_reset(rvu, block, lf); + if (err) { + dev_err(rvu->dev, "Failed to reset blkaddr %d LF%d\n", + block->addr, lf); + } + } +} + +static void __rvu_flr_handler(struct rvu *rvu, u16 pcifunc) +{ + mutex_lock(&rvu->flr_lock); + /* Reset order should reflect inter-block dependencies: + * 1. Reset any packet/work sources (NIX, CPT, TIM) + * 2. Flush and reset SSO/SSOW + * 3. Cleanup pools (NPA) + */ + rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NIX0); + rvu_blklf_teardown(rvu, pcifunc, BLKADDR_CPT0); + rvu_blklf_teardown(rvu, pcifunc, BLKADDR_TIM); + rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSOW); + rvu_blklf_teardown(rvu, pcifunc, BLKADDR_SSO); + rvu_blklf_teardown(rvu, pcifunc, BLKADDR_NPA); + rvu_detach_rsrcs(rvu, NULL, pcifunc); + mutex_unlock(&rvu->flr_lock); +} + +static void rvu_afvf_flr_handler(struct rvu *rvu, int vf) +{ + int reg = 0; + + /* pcifunc = 0(PF0) | (vf + 1) */ + __rvu_flr_handler(rvu, vf + 1); + + if (vf >= 64) { + reg = 1; + vf = vf - 64; + } + + /* Signal FLR finish and enable IRQ */ + rvupf_write64(rvu, RVU_PF_VFTRPENDX(reg), BIT_ULL(vf)); + rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(reg), BIT_ULL(vf)); +} + +static void rvu_flr_handler(struct work_struct *work) +{ + struct rvu_work *flrwork = container_of(work, struct rvu_work, work); + struct rvu *rvu = flrwork->rvu; + u16 pcifunc, numvfs, vf; + u64 cfg; + int pf; + + pf = flrwork - rvu->flr_wrk; + if (pf >= rvu->hw->total_pfs) { + rvu_afvf_flr_handler(rvu, pf - rvu->hw->total_pfs); + return; + } + + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); + numvfs = (cfg >> 12) & 0xFF; + pcifunc = pf << RVU_PFVF_PF_SHIFT; + + for (vf = 0; vf < numvfs; vf++) + __rvu_flr_handler(rvu, (pcifunc | (vf + 1))); + + __rvu_flr_handler(rvu, pcifunc); + + /* Signal FLR finish */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, BIT_ULL(pf)); + + /* Enable interrupt */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S, BIT_ULL(pf)); +} + +static void rvu_afvf_queue_flr_work(struct rvu *rvu, int start_vf, int numvfs) +{ + int dev, vf, reg = 0; + u64 intr; + + if (start_vf >= 64) + reg = 1; + + intr = rvupf_read64(rvu, RVU_PF_VFFLR_INTX(reg)); + if (!intr) + return; + + for (vf = 0; vf < numvfs; vf++) { + if (!(intr & BIT_ULL(vf))) + continue; + dev = vf + start_vf + rvu->hw->total_pfs; + queue_work(rvu->flr_wq, &rvu->flr_wrk[dev].work); + /* Clear and disable the interrupt */ + rvupf_write64(rvu, RVU_PF_VFFLR_INTX(reg), BIT_ULL(vf)); + rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(reg), BIT_ULL(vf)); + } +} + +static irqreturn_t rvu_flr_intr_handler(int irq, void *rvu_irq) +{ + struct rvu *rvu = (struct rvu *)rvu_irq; + u64 intr; + u8 pf; + + intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT); + if (!intr) + goto afvf_flr; + + for (pf = 0; pf < rvu->hw->total_pfs; pf++) { + if (intr & (1ULL << pf)) { + /* PF is already dead do only AF related operations */ + queue_work(rvu->flr_wq, &rvu->flr_wrk[pf].work); + /* clear interrupt */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT, + BIT_ULL(pf)); + /* Disable the interrupt */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C, + BIT_ULL(pf)); + } + } + +afvf_flr: + rvu_afvf_queue_flr_work(rvu, 0, 64); + if (rvu->vfs > 64) + rvu_afvf_queue_flr_work(rvu, 64, rvu->vfs - 64); + + return IRQ_HANDLED; +} + +static void rvu_me_handle_vfset(struct rvu *rvu, int idx, u64 intr) +{ + int vf; + + /* Nothing to be done here other than clearing the + * TRPEND bit. + */ + for (vf = 0; vf < 64; vf++) { + if (intr & (1ULL << vf)) { + /* clear the trpend due to ME(master enable) */ + rvupf_write64(rvu, RVU_PF_VFTRPENDX(idx), BIT_ULL(vf)); + /* clear interrupt */ + rvupf_write64(rvu, RVU_PF_VFME_INTX(idx), BIT_ULL(vf)); + } + } +} + +/* Handles ME interrupts from VFs of AF */ +static irqreturn_t rvu_me_vf_intr_handler(int irq, void *rvu_irq) +{ + struct rvu *rvu = (struct rvu *)rvu_irq; + int vfset; + u64 intr; + + intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT); + + for (vfset = 0; vfset <= 1; vfset++) { + intr = rvupf_read64(rvu, RVU_PF_VFME_INTX(vfset)); + if (intr) + rvu_me_handle_vfset(rvu, vfset, intr); + } + + return IRQ_HANDLED; +} + +/* Handles ME interrupts from PFs */ +static irqreturn_t rvu_me_pf_intr_handler(int irq, void *rvu_irq) +{ + struct rvu *rvu = (struct rvu *)rvu_irq; + u64 intr; + u8 pf; + + intr = rvu_read64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT); + + /* Nothing to be done here other than clearing the + * TRPEND bit. + */ + for (pf = 0; pf < rvu->hw->total_pfs; pf++) { + if (intr & (1ULL << pf)) { + /* clear the trpend due to ME(master enable) */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFTRPEND, + BIT_ULL(pf)); + /* clear interrupt */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT, + BIT_ULL(pf)); + } + } + + return IRQ_HANDLED; +} + static void rvu_unregister_interrupts(struct rvu *rvu) { int irq; @@ -1569,6 +1971,14 @@ static void rvu_unregister_interrupts(struct rvu *rvu) rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFAF_MBOX_INT_ENA_W1C, INTR_MASK(rvu->hw->total_pfs) & ~1ULL); + /* Disable the PF FLR interrupt */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1C, + INTR_MASK(rvu->hw->total_pfs) & ~1ULL); + + /* Disable the PF ME interrupt */ + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1C, + INTR_MASK(rvu->hw->total_pfs) & ~1ULL); + for (irq = 0; irq < rvu->num_vec; irq++) { if (rvu->irq_allocated[irq]) free_irq(pci_irq_vector(rvu->pdev, irq), rvu); @@ -1578,9 +1988,25 @@ static void rvu_unregister_interrupts(struct rvu *rvu) rvu->num_vec = 0; } +static int rvu_afvf_msix_vectors_num_ok(struct rvu *rvu) +{ + struct rvu_pfvf *pfvf = &rvu->pf[0]; + int offset; + + pfvf = &rvu->pf[0]; + offset = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff; + + /* Make sure there are enough MSIX vectors configured so that + * VF interrupts can be handled. Offset equal to zero means + * that PF vectors are not configured and overlapping AF vectors. + */ + return (pfvf->msix.max >= RVU_AF_INT_VEC_CNT + RVU_PF_INT_VEC_CNT) && + offset; +} + static int rvu_register_interrupts(struct rvu *rvu) { - int ret; + int ret, offset, pf_vec_start; rvu->num_vec = pci_msix_vec_count(rvu->pdev); @@ -1620,13 +2046,331 @@ static int rvu_register_interrupts(struct rvu *rvu) /* Enable mailbox interrupts from all PFs */ rvu_enable_mbox_intr(rvu); + /* Register FLR interrupt handler */ + sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE], + "RVUAF FLR"); + ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFFLR), + rvu_flr_intr_handler, 0, + &rvu->irq_name[RVU_AF_INT_VEC_PFFLR * NAME_SIZE], + rvu); + if (ret) { + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for FLR\n"); + goto fail; + } + rvu->irq_allocated[RVU_AF_INT_VEC_PFFLR] = true; + + /* Enable FLR interrupt for all PFs*/ + rvu_write64(rvu, BLKADDR_RVUM, + RVU_AF_PFFLR_INT, INTR_MASK(rvu->hw->total_pfs)); + + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFFLR_INT_ENA_W1S, + INTR_MASK(rvu->hw->total_pfs) & ~1ULL); + + /* Register ME interrupt handler */ + sprintf(&rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE], + "RVUAF ME"); + ret = request_irq(pci_irq_vector(rvu->pdev, RVU_AF_INT_VEC_PFME), + rvu_me_pf_intr_handler, 0, + &rvu->irq_name[RVU_AF_INT_VEC_PFME * NAME_SIZE], + rvu); + if (ret) { + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for ME\n"); + } + rvu->irq_allocated[RVU_AF_INT_VEC_PFME] = true; + + /* Enable ME interrupt for all PFs*/ + rvu_write64(rvu, BLKADDR_RVUM, + RVU_AF_PFME_INT, INTR_MASK(rvu->hw->total_pfs)); + + rvu_write64(rvu, BLKADDR_RVUM, RVU_AF_PFME_INT_ENA_W1S, + INTR_MASK(rvu->hw->total_pfs) & ~1ULL); + + if (!rvu_afvf_msix_vectors_num_ok(rvu)) + return 0; + + /* Get PF MSIX vectors offset. */ + pf_vec_start = rvu_read64(rvu, BLKADDR_RVUM, + RVU_PRIV_PFX_INT_CFG(0)) & 0x3ff; + + /* Register MBOX0 interrupt. */ + offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX0; + sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox0"); + ret = request_irq(pci_irq_vector(rvu->pdev, offset), + rvu_mbox_intr_handler, 0, + &rvu->irq_name[offset * NAME_SIZE], + rvu); + if (ret) + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for Mbox0\n"); + + rvu->irq_allocated[offset] = true; + + /* Register MBOX1 interrupt. MBOX1 IRQ number follows MBOX0 so + * simply increment current offset by 1. + */ + offset = pf_vec_start + RVU_PF_INT_VEC_VFPF_MBOX1; + sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF Mbox1"); + ret = request_irq(pci_irq_vector(rvu->pdev, offset), + rvu_mbox_intr_handler, 0, + &rvu->irq_name[offset * NAME_SIZE], + rvu); + if (ret) + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for Mbox1\n"); + + rvu->irq_allocated[offset] = true; + + /* Register FLR interrupt handler for AF's VFs */ + offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR0; + sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR0"); + ret = request_irq(pci_irq_vector(rvu->pdev, offset), + rvu_flr_intr_handler, 0, + &rvu->irq_name[offset * NAME_SIZE], rvu); + if (ret) { + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for RVUAFVF FLR0\n"); + goto fail; + } + rvu->irq_allocated[offset] = true; + + offset = pf_vec_start + RVU_PF_INT_VEC_VFFLR1; + sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF FLR1"); + ret = request_irq(pci_irq_vector(rvu->pdev, offset), + rvu_flr_intr_handler, 0, + &rvu->irq_name[offset * NAME_SIZE], rvu); + if (ret) { + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for RVUAFVF FLR1\n"); + goto fail; + } + rvu->irq_allocated[offset] = true; + + /* Register ME interrupt handler for AF's VFs */ + offset = pf_vec_start + RVU_PF_INT_VEC_VFME0; + sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME0"); + ret = request_irq(pci_irq_vector(rvu->pdev, offset), + rvu_me_vf_intr_handler, 0, + &rvu->irq_name[offset * NAME_SIZE], rvu); + if (ret) { + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for RVUAFVF ME0\n"); + goto fail; + } + rvu->irq_allocated[offset] = true; + + offset = pf_vec_start + RVU_PF_INT_VEC_VFME1; + sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAFVF ME1"); + ret = request_irq(pci_irq_vector(rvu->pdev, offset), + rvu_me_vf_intr_handler, 0, + &rvu->irq_name[offset * NAME_SIZE], rvu); + if (ret) { + dev_err(rvu->dev, + "RVUAF: IRQ registration failed for RVUAFVF ME1\n"); + goto fail; + } + rvu->irq_allocated[offset] = true; return 0; fail: - pci_free_irq_vectors(rvu->pdev); + rvu_unregister_interrupts(rvu); + return ret; +} + +static void rvu_flr_wq_destroy(struct rvu *rvu) +{ + if (rvu->flr_wq) { + flush_workqueue(rvu->flr_wq); + destroy_workqueue(rvu->flr_wq); + rvu->flr_wq = NULL; + } +} + +static int rvu_flr_init(struct rvu *rvu) +{ + int dev, num_devs; + u64 cfg; + int pf; + + /* Enable FLR for all PFs*/ + for (pf = 0; pf < rvu->hw->total_pfs; pf++) { + cfg = rvu_read64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf)); + rvu_write64(rvu, BLKADDR_RVUM, RVU_PRIV_PFX_CFG(pf), + cfg | BIT_ULL(22)); + } + + rvu->flr_wq = alloc_workqueue("rvu_afpf_flr", + WQ_UNBOUND | WQ_HIGHPRI | WQ_MEM_RECLAIM, + 1); + if (!rvu->flr_wq) + return -ENOMEM; + + num_devs = rvu->hw->total_pfs + pci_sriov_get_totalvfs(rvu->pdev); + rvu->flr_wrk = devm_kcalloc(rvu->dev, num_devs, + sizeof(struct rvu_work), GFP_KERNEL); + if (!rvu->flr_wrk) { + destroy_workqueue(rvu->flr_wq); + return -ENOMEM; + } + + for (dev = 0; dev < num_devs; dev++) { + rvu->flr_wrk[dev].rvu = rvu; + INIT_WORK(&rvu->flr_wrk[dev].work, rvu_flr_handler); + } + + mutex_init(&rvu->flr_lock); + + return 0; +} + +static void rvu_disable_afvf_intr(struct rvu *rvu) +{ + int vfs = rvu->vfs; + + rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(0), INTR_MASK(vfs)); + rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs)); + rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs)); + if (vfs <= 64) + return; + + rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1CX(1), + INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64)); +} + +static void rvu_enable_afvf_intr(struct rvu *rvu) +{ + int vfs = rvu->vfs; + + /* Clear any pending interrupts and enable AF VF interrupts for + * the first 64 VFs. + */ + /* Mbox */ + rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(0), INTR_MASK(vfs)); + rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(0), INTR_MASK(vfs)); + + /* FLR */ + rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs)); + rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs)); + rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(0), INTR_MASK(vfs)); + + /* Same for remaining VFs, if any. */ + if (vfs <= 64) + return; + + rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INTX(1), INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_PF_VFPF_MBOX_INT_ENA_W1SX(1), + INTR_MASK(vfs - 64)); + + rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64)); + rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64)); +} + +#define PCI_DEVID_OCTEONTX2_LBK 0xA061 + +static int lbk_get_num_chans(void) +{ + struct pci_dev *pdev; + void __iomem *base; + int ret = -EIO; + + pdev = pci_get_device(PCI_VENDOR_ID_CAVIUM, PCI_DEVID_OCTEONTX2_LBK, + NULL); + if (!pdev) + goto err; + + base = pci_ioremap_bar(pdev, 0); + if (!base) + goto err_put; + + /* Read number of available LBK channels from LBK(0)_CONST register. */ + ret = (readq(base + 0x10) >> 32) & 0xffff; + iounmap(base); +err_put: + pci_dev_put(pdev); +err: return ret; } +static int rvu_enable_sriov(struct rvu *rvu) +{ + struct pci_dev *pdev = rvu->pdev; + int err, chans, vfs; + + if (!rvu_afvf_msix_vectors_num_ok(rvu)) { + dev_warn(&pdev->dev, + "Skipping SRIOV enablement since not enough IRQs are available\n"); + return 0; + } + + chans = lbk_get_num_chans(); + if (chans < 0) + return chans; + + vfs = pci_sriov_get_totalvfs(pdev); + + /* Limit VFs in case we have more VFs than LBK channels available. */ + if (vfs > chans) + vfs = chans; + + /* AF's VFs work in pairs and talk over consecutive loopback channels. + * Thus we want to enable maximum even number of VFs. In case + * odd number of VFs are available then the last VF on the list + * remains disabled. + */ + if (vfs & 0x1) { + dev_warn(&pdev->dev, + "Number of VFs should be even. Enabling %d out of %d.\n", + vfs - 1, vfs); + vfs--; + } + + if (!vfs) + return 0; + + /* Save VFs number for reference in VF interrupts handlers. + * Since interrupts might start arriving during SRIOV enablement + * ordinary API cannot be used to get number of enabled VFs. + */ + rvu->vfs = vfs; + + err = rvu_mbox_init(rvu, &rvu->afvf_wq_info, TYPE_AFVF, vfs, + rvu_afvf_mbox_handler, rvu_afvf_mbox_up_handler); + if (err) + return err; + + rvu_enable_afvf_intr(rvu); + /* Make sure IRQs are enabled before SRIOV. */ + mb(); + + err = pci_enable_sriov(pdev, vfs); + if (err) { + rvu_disable_afvf_intr(rvu); + rvu_mbox_destroy(&rvu->afvf_wq_info); + return err; + } + + return 0; +} + +static void rvu_disable_sriov(struct rvu *rvu) +{ + rvu_disable_afvf_intr(rvu); + rvu_mbox_destroy(&rvu->afvf_wq_info); + pci_disable_sriov(rvu->pdev); +} + +static void rvu_update_module_params(struct rvu *rvu) +{ + const char *default_pfl_name = "default"; + + strscpy(rvu->mkex_pfl_name, + mkex_profile ? mkex_profile : default_pfl_name, MKEX_NAME_LEN); +} + static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct device *dev = &pdev->dev; @@ -1680,6 +2424,9 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_release_regions; } + /* Store module params in rvu structure */ + rvu_update_module_params(rvu); + /* Check which blocks the HW supports */ rvu_check_block_implemented(rvu); @@ -1689,24 +2436,35 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (err) goto err_release_regions; - err = rvu_mbox_init(rvu); + /* Init mailbox btw AF and PFs */ + err = rvu_mbox_init(rvu, &rvu->afpf_wq_info, TYPE_AFPF, + rvu->hw->total_pfs, rvu_afpf_mbox_handler, + rvu_afpf_mbox_up_handler); if (err) goto err_hwsetup; - err = rvu_cgx_probe(rvu); + err = rvu_flr_init(rvu); if (err) goto err_mbox; err = rvu_register_interrupts(rvu); if (err) - goto err_cgx; + goto err_flr; + + /* Enable AF's VFs (if any) */ + err = rvu_enable_sriov(rvu); + if (err) + goto err_irq; return 0; -err_cgx: - rvu_cgx_wq_destroy(rvu); +err_irq: + rvu_unregister_interrupts(rvu); +err_flr: + rvu_flr_wq_destroy(rvu); err_mbox: - rvu_mbox_destroy(rvu); + rvu_mbox_destroy(&rvu->afpf_wq_info); err_hwsetup: + rvu_cgx_exit(rvu); rvu_reset_all_blocks(rvu); rvu_free_hw_resources(rvu); err_release_regions: @@ -1725,8 +2483,10 @@ static void rvu_remove(struct pci_dev *pdev) struct rvu *rvu = pci_get_drvdata(pdev); rvu_unregister_interrupts(rvu); - rvu_cgx_wq_destroy(rvu); - rvu_mbox_destroy(rvu); + rvu_flr_wq_destroy(rvu); + rvu_cgx_exit(rvu); + rvu_mbox_destroy(&rvu->afpf_wq_info); + rvu_disable_sriov(rvu); rvu_reset_all_blocks(rvu); rvu_free_hw_resources(rvu); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index 2c0580cd2807..c9d60b0554c0 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -11,6 +11,7 @@ #ifndef RVU_H #define RVU_H +#include <linux/pci.h> #include "rvu_struct.h" #include "common.h" #include "mbox.h" @@ -18,6 +19,9 @@ /* PCI device IDs */ #define PCI_DEVID_OCTEONTX2_RVU_AF 0xA065 +/* Subsystem Device ID */ +#define PCI_SUBSYS_DEVID_96XX 0xB200 + /* PCI BAR nos */ #define PCI_AF_REG_BAR_NUM 0 #define PCI_PF_REG_BAR_NUM 2 @@ -64,7 +68,7 @@ struct nix_mcast { struct qmem *mcast_buf; int replay_pkind; int next_free_mce; - spinlock_t mce_lock; /* Serialize MCE updates */ + struct mutex mce_lock; /* Serialize MCE updates */ }; struct nix_mce_list { @@ -74,15 +78,27 @@ struct nix_mce_list { }; struct npc_mcam { - spinlock_t lock; /* MCAM entries and counters update lock */ + struct rsrc_bmap counters; + struct mutex lock; /* MCAM entries and counters update lock */ + unsigned long *bmap; /* bitmap, 0 => bmap_entries */ + unsigned long *bmap_reverse; /* Reverse bitmap, bmap_entries => 0 */ + u16 bmap_entries; /* Number of unreserved MCAM entries */ + u16 bmap_fcnt; /* MCAM entries free count */ + u16 *entry2pfvf_map; + u16 *entry2cntr_map; + u16 *cntr2pfvf_map; + u16 *cntr_refcnt; u8 keysize; /* MCAM keysize 112/224/448 bits */ u8 banks; /* Number of MCAM banks */ u8 banks_per_entry;/* Number of keywords in key */ u16 banksize; /* Number of MCAM entries in each bank */ u16 total_entries; /* Total number of MCAM entries */ - u16 entries; /* Total minus reserved for NIX LFs */ u16 nixlf_offset; /* Offset of nixlf rsvd uncast entries */ u16 pf_offset; /* Offset of PF's rsvd bcast, promisc entries */ + u16 lprio_count; + u16 lprio_start; + u16 hprio_count; + u16 hprio_end; }; /* Structure for per RVU func info ie PF/VF */ @@ -122,18 +138,35 @@ struct rvu_pfvf { u16 tx_chan_base; u8 rx_chan_cnt; /* total number of RX channels */ u8 tx_chan_cnt; /* total number of TX channels */ + u16 maxlen; + u16 minlen; u8 mac_addr[ETH_ALEN]; /* MAC address of this PF/VF */ /* Broadcast pkt replication info */ u16 bcast_mce_idx; struct nix_mce_list bcast_mce_list; + + /* VLAN offload */ + struct mcam_entry entry; + int rxvlan_index; + bool rxvlan; }; struct nix_txsch { struct rsrc_bmap schq; u8 lvl; - u16 *pfvf_map; +#define NIX_TXSCHQ_TL1_CFG_DONE BIT_ULL(0) +#define TXSCH_MAP_FUNC(__pfvf_map) ((__pfvf_map) & 0xFFFF) +#define TXSCH_MAP_FLAGS(__pfvf_map) ((__pfvf_map) >> 16) +#define TXSCH_MAP(__func, __flags) (((__func) & 0xFFFF) | ((__flags) << 16)) + u32 *pfvf_map; +}; + +struct nix_mark_format { + u8 total; + u8 in_use; + u32 *cfg; }; struct npc_pkind { @@ -141,9 +174,23 @@ struct npc_pkind { u32 *pfchan_map; }; +struct nix_flowkey { +#define NIX_FLOW_KEY_ALG_MAX 32 + u32 flowkey[NIX_FLOW_KEY_ALG_MAX]; + int in_use; +}; + +struct nix_lso { + u8 total; + u8 in_use; +}; + struct nix_hw { struct nix_txsch txsch[NIX_TXSCH_LVL_CNT]; /* Tx schedulers */ struct nix_mcast mcast; + struct nix_flowkey flowkey; + struct nix_mark_format mark_format; + struct nix_lso lso; }; struct rvu_hwinfo { @@ -164,6 +211,16 @@ struct rvu_hwinfo { struct npc_mcam mcam; }; +struct mbox_wq_info { + struct otx2_mbox mbox; + struct rvu_work *mbox_wrk; + + struct otx2_mbox mbox_up; + struct rvu_work *mbox_wrk_up; + + struct workqueue_struct *mbox_wq; +}; + struct rvu { void __iomem *afreg_base; void __iomem *pfreg_base; @@ -172,14 +229,17 @@ struct rvu { struct rvu_hwinfo *hw; struct rvu_pfvf *pf; struct rvu_pfvf *hwvf; - spinlock_t rsrc_lock; /* Serialize resource alloc/free */ + struct mutex rsrc_lock; /* Serialize resource alloc/free */ + int vfs; /* Number of VFs attached to RVU */ /* Mbox */ - struct otx2_mbox mbox; - struct rvu_work *mbox_wrk; - struct otx2_mbox mbox_up; - struct rvu_work *mbox_wrk_up; - struct workqueue_struct *mbox_wq; + struct mbox_wq_info afpf_wq_info; + struct mbox_wq_info afvf_wq_info; + + /* PF FLR */ + struct rvu_work *flr_wrk; + struct workqueue_struct *flr_wq; + struct mutex flr_lock; /* Serialize FLRs */ /* MSI-X */ u16 num_vec; @@ -190,7 +250,7 @@ struct rvu { /* CGX */ #define PF_CGXMAP_BASE 1 /* PF 0 is reserved for RVU PF */ u8 cgx_mapped_pfs; - u8 cgx_cnt; /* available cgx ports */ + u8 cgx_cnt_max; /* CGX port count max */ u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */ u16 *cgxlmac2pf_map; /* bitmap of mapped pfs for * every cgx lmac port @@ -201,6 +261,8 @@ struct rvu { struct workqueue_struct *cgx_evh_wq; spinlock_t cgx_evq_lock; /* cgx event queue lock */ struct list_head cgx_evq_head; /* cgx event queue head */ + + char mkex_pfl_name[MKEX_NAME_LEN]; /* Configured MKEX profile name */ }; static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val) @@ -223,9 +285,22 @@ static inline u64 rvupf_read64(struct rvu *rvu, u64 offset) return readq(rvu->pfreg_base + offset); } +static inline bool is_rvu_9xxx_A0(struct rvu *rvu) +{ + struct pci_dev *pdev = rvu->pdev; + + return (pdev->revision == 0x00) && + (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX); +} + /* Function Prototypes * RVU */ +static inline int is_afvf(u16 pcifunc) +{ + return !(pcifunc & ~RVU_PFVF_FUNC_MASK); +} + int rvu_alloc_bitmap(struct rsrc_bmap *rsrc); int rvu_alloc_rsrc(struct rsrc_bmap *rsrc); void rvu_free_rsrc(struct rsrc_bmap *rsrc, int id); @@ -236,6 +311,7 @@ int rvu_get_pf(u16 pcifunc); struct rvu_pfvf *rvu_get_pfvf(struct rvu *rvu, int pcifunc); void rvu_get_pf_numvfs(struct rvu *rvu, int pf, int *numvfs, int *hwvf); bool is_block_implemented(struct rvu_hwinfo *hw, int blkaddr); +bool is_pffunc_map_valid(struct rvu *rvu, u16 pcifunc, int blktype); int rvu_get_lf(struct rvu *rvu, struct rvu_block *block, u16 pcifunc, u16 slot); int rvu_lf_reset(struct rvu *rvu, struct rvu_block *block, int lf); int rvu_get_blkaddr(struct rvu *rvu, int blktype, u16 pcifunc); @@ -266,89 +342,110 @@ static inline void rvu_get_cgx_lmac_id(u8 map, u8 *cgx_id, u8 *lmac_id) *lmac_id = (map & 0xF); } -int rvu_cgx_probe(struct rvu *rvu); -void rvu_cgx_wq_destroy(struct rvu *rvu); +int rvu_cgx_init(struct rvu *rvu); +int rvu_cgx_exit(struct rvu *rvu); void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu); int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start); -int rvu_mbox_handler_CGX_START_RXTX(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp); -int rvu_mbox_handler_CGX_STOP_RXTX(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp); -int rvu_mbox_handler_CGX_STATS(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req, struct cgx_stats_rsp *rsp); -int rvu_mbox_handler_CGX_MAC_ADDR_SET(struct rvu *rvu, +int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req, struct cgx_mac_addr_set_or_get *rsp); -int rvu_mbox_handler_CGX_MAC_ADDR_GET(struct rvu *rvu, +int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req, struct cgx_mac_addr_set_or_get *rsp); -int rvu_mbox_handler_CGX_PROMISC_ENABLE(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp); -int rvu_mbox_handler_CGX_PROMISC_DISABLE(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp); -int rvu_mbox_handler_CGX_START_LINKEVENTS(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp); -int rvu_mbox_handler_CGX_STOP_LINKEVENTS(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp); -int rvu_mbox_handler_CGX_GET_LINKINFO(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req, struct cgx_link_info_msg *rsp); -int rvu_mbox_handler_CGX_INTLBK_ENABLE(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp); -int rvu_mbox_handler_CGX_INTLBK_DISABLE(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp); /* NPA APIs */ int rvu_npa_init(struct rvu *rvu); void rvu_npa_freemem(struct rvu *rvu); -int rvu_mbox_handler_NPA_AQ_ENQ(struct rvu *rvu, +void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf); +int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu, struct npa_aq_enq_req *req, struct npa_aq_enq_rsp *rsp); -int rvu_mbox_handler_NPA_HWCTX_DISABLE(struct rvu *rvu, +int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req, struct msg_rsp *rsp); -int rvu_mbox_handler_NPA_LF_ALLOC(struct rvu *rvu, +int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu, struct npa_lf_alloc_req *req, struct npa_lf_alloc_rsp *rsp); -int rvu_mbox_handler_NPA_LF_FREE(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp); /* NIX APIs */ +bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc); int rvu_nix_init(struct rvu *rvu); +int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw, + int blkaddr, u32 cfg); void rvu_nix_freemem(struct rvu *rvu); int rvu_get_nixlf_count(struct rvu *rvu); -int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu, +void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int npalf); +int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, struct nix_lf_alloc_req *req, struct nix_lf_alloc_rsp *rsp); -int rvu_mbox_handler_NIX_LF_FREE(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp); -int rvu_mbox_handler_NIX_AQ_ENQ(struct rvu *rvu, +int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, struct nix_aq_enq_req *req, struct nix_aq_enq_rsp *rsp); -int rvu_mbox_handler_NIX_HWCTX_DISABLE(struct rvu *rvu, +int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req, struct msg_rsp *rsp); -int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu, +int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, struct nix_txsch_alloc_req *req, struct nix_txsch_alloc_rsp *rsp); -int rvu_mbox_handler_NIX_TXSCH_FREE(struct rvu *rvu, +int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu, struct nix_txsch_free_req *req, struct msg_rsp *rsp); -int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu, +int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, struct nix_txschq_config *req, struct msg_rsp *rsp); -int rvu_mbox_handler_NIX_STATS_RST(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp); -int rvu_mbox_handler_NIX_VTAG_CFG(struct rvu *rvu, +int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu, struct nix_vtag_config *req, struct msg_rsp *rsp); -int rvu_mbox_handler_NIX_RSS_FLOWKEY_CFG(struct rvu *rvu, +int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp); +int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu, struct nix_rss_flowkey_cfg *req, - struct msg_rsp *rsp); -int rvu_mbox_handler_NIX_SET_MAC_ADDR(struct rvu *rvu, + struct nix_rss_flowkey_cfg_rsp *rsp); +int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, struct nix_set_mac_addr *req, struct msg_rsp *rsp); -int rvu_mbox_handler_NIX_SET_RX_MODE(struct rvu *rvu, struct nix_rx_mode *req, +int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req, struct msg_rsp *rsp); +int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, + struct msg_rsp *rsp); +int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp); +int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp); +int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu, + struct nix_mark_format_cfg *req, + struct nix_mark_format_cfg_rsp *rsp); +int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, + struct msg_rsp *rsp); +int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu, + struct nix_lso_format_cfg *req, + struct nix_lso_format_cfg_rsp *rsp); /* NPC APIs */ int rvu_npc_init(struct rvu *rvu); @@ -360,9 +457,48 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u64 chan, bool allmulti); void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf); +void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf); void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u64 chan); +int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf); void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf); +void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf); +void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf); void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, int group, int alg_idx, int mcam_index); +int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu, + struct npc_mcam_alloc_entry_req *req, + struct npc_mcam_alloc_entry_rsp *rsp); +int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu, + struct npc_mcam_free_entry_req *req, + struct msg_rsp *rsp); +int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu, + struct npc_mcam_write_entry_req *req, + struct msg_rsp *rsp); +int rvu_mbox_handler_npc_mcam_ena_entry(struct rvu *rvu, + struct npc_mcam_ena_dis_entry_req *req, + struct msg_rsp *rsp); +int rvu_mbox_handler_npc_mcam_dis_entry(struct rvu *rvu, + struct npc_mcam_ena_dis_entry_req *req, + struct msg_rsp *rsp); +int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu, + struct npc_mcam_shift_entry_req *req, + struct npc_mcam_shift_entry_rsp *rsp); +int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu, + struct npc_mcam_alloc_counter_req *req, + struct npc_mcam_alloc_counter_rsp *rsp); +int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu, + struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp); +int rvu_mbox_handler_npc_mcam_clear_counter(struct rvu *rvu, + struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp); +int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu, + struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp); +int rvu_mbox_handler_npc_mcam_counter_stats(struct rvu *rvu, + struct npc_mcam_oper_counter_req *req, + struct npc_mcam_oper_counter_rsp *rsp); +int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu, + struct npc_mcam_alloc_and_write_entry_req *req, + struct npc_mcam_alloc_and_write_entry_rsp *rsp); +int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req, + struct npc_get_kex_cfg_rsp *rsp); #endif /* RVU_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index 188185c15b4a..7d7133c5f799 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -20,14 +20,14 @@ struct cgx_evq_entry { struct cgx_link_event link_event; }; -#define M(_name, _id, _req_type, _rsp_type) \ +#define M(_name, _id, _fn_name, _req_type, _rsp_type) \ static struct _req_type __maybe_unused \ -*otx2_mbox_alloc_msg_ ## _name(struct rvu *rvu, int devid) \ +*otx2_mbox_alloc_msg_ ## _fn_name(struct rvu *rvu, int devid) \ { \ struct _req_type *req; \ \ req = (struct _req_type *)otx2_mbox_alloc_msg_rsp( \ - &rvu->mbox_up, devid, sizeof(struct _req_type), \ + &rvu->afpf_wq_info.mbox_up, devid, sizeof(struct _req_type), \ sizeof(struct _rsp_type)); \ if (!req) \ return NULL; \ @@ -52,7 +52,7 @@ static inline u8 cgxlmac_id_to_bmap(u8 cgx_id, u8 lmac_id) void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu) { - if (cgx_id >= rvu->cgx_cnt) + if (cgx_id >= rvu->cgx_cnt_max) return NULL; return rvu->cgx_idmap[cgx_id]; @@ -61,38 +61,40 @@ void *rvu_cgx_pdata(u8 cgx_id, struct rvu *rvu) static int rvu_map_cgx_lmac_pf(struct rvu *rvu) { struct npc_pkind *pkind = &rvu->hw->pkind; - int cgx_cnt = rvu->cgx_cnt; + int cgx_cnt_max = rvu->cgx_cnt_max; int cgx, lmac_cnt, lmac; int pf = PF_CGXMAP_BASE; int size, free_pkind; - if (!cgx_cnt) + if (!cgx_cnt_max) return 0; - if (cgx_cnt > 0xF || MAX_LMAC_PER_CGX > 0xF) + if (cgx_cnt_max > 0xF || MAX_LMAC_PER_CGX > 0xF) return -EINVAL; /* Alloc map table * An additional entry is required since PF id starts from 1 and * hence entry at offset 0 is invalid. */ - size = (cgx_cnt * MAX_LMAC_PER_CGX + 1) * sizeof(u8); - rvu->pf2cgxlmac_map = devm_kzalloc(rvu->dev, size, GFP_KERNEL); + size = (cgx_cnt_max * MAX_LMAC_PER_CGX + 1) * sizeof(u8); + rvu->pf2cgxlmac_map = devm_kmalloc(rvu->dev, size, GFP_KERNEL); if (!rvu->pf2cgxlmac_map) return -ENOMEM; - /* Initialize offset 0 with an invalid cgx and lmac id */ - rvu->pf2cgxlmac_map[0] = 0xFF; + /* Initialize all entries with an invalid cgx and lmac id */ + memset(rvu->pf2cgxlmac_map, 0xFF, size); /* Reverse map table */ rvu->cgxlmac2pf_map = devm_kzalloc(rvu->dev, - cgx_cnt * MAX_LMAC_PER_CGX * sizeof(u16), + cgx_cnt_max * MAX_LMAC_PER_CGX * sizeof(u16), GFP_KERNEL); if (!rvu->cgxlmac2pf_map) return -ENOMEM; rvu->cgx_mapped_pfs = 0; - for (cgx = 0; cgx < cgx_cnt; cgx++) { + for (cgx = 0; cgx < cgx_cnt_max; cgx++) { + if (!rvu_cgx_pdata(cgx, rvu)) + continue; lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); for (lmac = 0; lmac < lmac_cnt; lmac++, pf++) { rvu->pf2cgxlmac_map[pf] = cgxlmac_id_to_bmap(cgx, lmac); @@ -177,12 +179,12 @@ static void cgx_notify_pfs(struct cgx_link_event *event, struct rvu *rvu) } /* Send mbox message to PF */ - msg = otx2_mbox_alloc_msg_CGX_LINK_EVENT(rvu, pfid); + msg = otx2_mbox_alloc_msg_cgx_link_event(rvu, pfid); if (!msg) continue; msg->link_info = *linfo; - otx2_mbox_msg_send(&rvu->mbox_up, pfid); - err = otx2_mbox_wait_for_rsp(&rvu->mbox_up, pfid); + otx2_mbox_msg_send(&rvu->afpf_wq_info.mbox_up, pfid); + err = otx2_mbox_wait_for_rsp(&rvu->afpf_wq_info.mbox_up, pfid); if (err) dev_warn(rvu->dev, "notification to pf %d failed\n", pfid); @@ -216,7 +218,7 @@ static void cgx_evhandler_task(struct work_struct *work) } while (1); } -static void cgx_lmac_event_handler_init(struct rvu *rvu) +static int cgx_lmac_event_handler_init(struct rvu *rvu) { struct cgx_event_cb cb; int cgx, lmac, err; @@ -228,14 +230,16 @@ static void cgx_lmac_event_handler_init(struct rvu *rvu) rvu->cgx_evh_wq = alloc_workqueue("rvu_evh_wq", 0, 0); if (!rvu->cgx_evh_wq) { dev_err(rvu->dev, "alloc workqueue failed"); - return; + return -ENOMEM; } cb.notify_link_chg = cgx_lmac_postevent; /* link change call back */ cb.data = rvu; - for (cgx = 0; cgx < rvu->cgx_cnt; cgx++) { + for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) { cgxd = rvu_cgx_pdata(cgx, rvu); + if (!cgxd) + continue; for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) { err = cgx_lmac_evh_register(&cb, cgxd, lmac); if (err) @@ -244,9 +248,11 @@ static void cgx_lmac_event_handler_init(struct rvu *rvu) cgx, lmac); } } + + return 0; } -void rvu_cgx_wq_destroy(struct rvu *rvu) +static void rvu_cgx_wq_destroy(struct rvu *rvu) { if (rvu->cgx_evh_wq) { flush_workqueue(rvu->cgx_evh_wq); @@ -255,25 +261,28 @@ void rvu_cgx_wq_destroy(struct rvu *rvu) } } -int rvu_cgx_probe(struct rvu *rvu) +int rvu_cgx_init(struct rvu *rvu) { - int i, err; + int cgx, err; + void *cgxd; - /* find available cgx ports */ - rvu->cgx_cnt = cgx_get_cgx_cnt(); - if (!rvu->cgx_cnt) { + /* CGX port id starts from 0 and are not necessarily contiguous + * Hence we allocate resources based on the maximum port id value. + */ + rvu->cgx_cnt_max = cgx_get_cgxcnt_max(); + if (!rvu->cgx_cnt_max) { dev_info(rvu->dev, "No CGX devices found!\n"); return -ENODEV; } - rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt * sizeof(void *), - GFP_KERNEL); + rvu->cgx_idmap = devm_kzalloc(rvu->dev, rvu->cgx_cnt_max * + sizeof(void *), GFP_KERNEL); if (!rvu->cgx_idmap) return -ENOMEM; /* Initialize the cgxdata table */ - for (i = 0; i < rvu->cgx_cnt; i++) - rvu->cgx_idmap[i] = cgx_get_pdata(i); + for (cgx = 0; cgx < rvu->cgx_cnt_max; cgx++) + rvu->cgx_idmap[cgx] = cgx_get_pdata(cgx); /* Map CGX LMAC interfaces to RVU PFs */ err = rvu_map_cgx_lmac_pf(rvu); @@ -281,7 +290,47 @@ int rvu_cgx_probe(struct rvu *rvu) return err; /* Register for CGX events */ - cgx_lmac_event_handler_init(rvu); + err = cgx_lmac_event_handler_init(rvu); + if (err) + return err; + + /* Ensure event handler registration is completed, before + * we turn on the links + */ + mb(); + + /* Do link up for all CGX ports */ + for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) { + cgxd = rvu_cgx_pdata(cgx, rvu); + if (!cgxd) + continue; + err = cgx_lmac_linkup_start(cgxd); + if (err) + dev_err(rvu->dev, + "Link up process failed to start on cgx %d\n", + cgx); + } + + return 0; +} + +int rvu_cgx_exit(struct rvu *rvu) +{ + int cgx, lmac; + void *cgxd; + + for (cgx = 0; cgx <= rvu->cgx_cnt_max; cgx++) { + cgxd = rvu_cgx_pdata(cgx, rvu); + if (!cgxd) + continue; + for (lmac = 0; lmac < cgx_get_lmac_cnt(cgxd); lmac++) + cgx_lmac_evh_unregister(cgxd, lmac); + } + + /* Ensure event handler unregister is completed */ + mb(); + + rvu_cgx_wq_destroy(rvu); return 0; } @@ -303,21 +352,21 @@ int rvu_cgx_config_rxtx(struct rvu *rvu, u16 pcifunc, bool start) return 0; } -int rvu_mbox_handler_CGX_START_RXTX(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_start_rxtx(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, true); return 0; } -int rvu_mbox_handler_CGX_STOP_RXTX(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_stop_rxtx(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { rvu_cgx_config_rxtx(rvu, req->hdr.pcifunc, false); return 0; } -int rvu_mbox_handler_CGX_STATS(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_stats(struct rvu *rvu, struct msg_req *req, struct cgx_stats_rsp *rsp) { int pf = rvu_get_pf(req->hdr.pcifunc); @@ -354,7 +403,7 @@ int rvu_mbox_handler_CGX_STATS(struct rvu *rvu, struct msg_req *req, return 0; } -int rvu_mbox_handler_CGX_MAC_ADDR_SET(struct rvu *rvu, +int rvu_mbox_handler_cgx_mac_addr_set(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req, struct cgx_mac_addr_set_or_get *rsp) { @@ -368,7 +417,7 @@ int rvu_mbox_handler_CGX_MAC_ADDR_SET(struct rvu *rvu, return 0; } -int rvu_mbox_handler_CGX_MAC_ADDR_GET(struct rvu *rvu, +int rvu_mbox_handler_cgx_mac_addr_get(struct rvu *rvu, struct cgx_mac_addr_set_or_get *req, struct cgx_mac_addr_set_or_get *rsp) { @@ -387,7 +436,7 @@ int rvu_mbox_handler_CGX_MAC_ADDR_GET(struct rvu *rvu, return 0; } -int rvu_mbox_handler_CGX_PROMISC_ENABLE(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_promisc_enable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { u16 pcifunc = req->hdr.pcifunc; @@ -407,7 +456,7 @@ int rvu_mbox_handler_CGX_PROMISC_ENABLE(struct rvu *rvu, struct msg_req *req, return 0; } -int rvu_mbox_handler_CGX_PROMISC_DISABLE(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_promisc_disable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { u16 pcifunc = req->hdr.pcifunc; @@ -451,21 +500,21 @@ static int rvu_cgx_config_linkevents(struct rvu *rvu, u16 pcifunc, bool en) return 0; } -int rvu_mbox_handler_CGX_START_LINKEVENTS(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_start_linkevents(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, true); return 0; } -int rvu_mbox_handler_CGX_STOP_LINKEVENTS(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_stop_linkevents(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { rvu_cgx_config_linkevents(rvu, req->hdr.pcifunc, false); return 0; } -int rvu_mbox_handler_CGX_GET_LINKINFO(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_get_linkinfo(struct rvu *rvu, struct msg_req *req, struct cgx_link_info_msg *rsp) { u8 cgx_id, lmac_id; @@ -500,14 +549,14 @@ static int rvu_cgx_config_intlbk(struct rvu *rvu, u16 pcifunc, bool en) lmac_id, en); } -int rvu_mbox_handler_CGX_INTLBK_ENABLE(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_intlbk_enable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, true); return 0; } -int rvu_mbox_handler_CGX_INTLBK_DISABLE(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_cgx_intlbk_disable(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { rvu_cgx_config_intlbk(rvu, req->hdr.pcifunc, false); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index a5ab7eff2301..4a7609fd6dd0 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -43,6 +43,19 @@ enum mc_buf_cnt { MC_BUF_CNT_2048, }; +enum nix_makr_fmt_indexes { + NIX_MARK_CFG_IP_DSCP_RED, + NIX_MARK_CFG_IP_DSCP_YELLOW, + NIX_MARK_CFG_IP_DSCP_YELLOW_RED, + NIX_MARK_CFG_IP_ECN_RED, + NIX_MARK_CFG_IP_ECN_YELLOW, + NIX_MARK_CFG_IP_ECN_YELLOW_RED, + NIX_MARK_CFG_VLAN_DEI_RED, + NIX_MARK_CFG_VLAN_DEI_YELLOW, + NIX_MARK_CFG_VLAN_DEI_YELLOW_RED, + NIX_MARK_CFG_MAX, +}; + /* For now considering MC resources needed for broadcast * pkt replication only. i.e 256 HWVFs + 12 PFs. */ @@ -55,6 +68,17 @@ struct mce { u16 pcifunc; }; +bool is_nixlf_attached(struct rvu *rvu, u16 pcifunc) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (!pfvf->nixlf || blkaddr < 0) + return false; + return true; +} + int rvu_get_nixlf_count(struct rvu *rvu) { struct rvu_block *block; @@ -94,11 +118,29 @@ static inline struct nix_hw *get_nix_hw(struct rvu_hwinfo *hw, int blkaddr) return NULL; } +static void nix_rx_sync(struct rvu *rvu, int blkaddr) +{ + int err; + + /*Sync all in flight RX packets to LLC/DRAM */ + rvu_write64(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0)); + err = rvu_poll_reg(rvu, blkaddr, NIX_AF_RX_SW_SYNC, BIT_ULL(0), true); + if (err) + dev_err(rvu->dev, "NIX RX software sync failed\n"); + + /* As per a HW errata in 9xxx A0 silicon, HW may clear SW_SYNC[ENA] + * bit too early. Hence wait for 50us more. + */ + if (is_rvu_9xxx_A0(rvu)) + usleep_range(50, 60); +} + static bool is_valid_txschq(struct rvu *rvu, int blkaddr, int lvl, u16 pcifunc, u16 schq) { struct nix_txsch *txsch; struct nix_hw *nix_hw; + u16 map_func; nix_hw = get_nix_hw(rvu->hw, blkaddr); if (!nix_hw) @@ -109,12 +151,19 @@ static bool is_valid_txschq(struct rvu *rvu, int blkaddr, if (schq >= txsch->schq.max) return false; - spin_lock(&rvu->rsrc_lock); - if (txsch->pfvf_map[schq] != pcifunc) { - spin_unlock(&rvu->rsrc_lock); + mutex_lock(&rvu->rsrc_lock); + map_func = TXSCH_MAP_FUNC(txsch->pfvf_map[schq]); + mutex_unlock(&rvu->rsrc_lock); + + /* For TL1 schq, sharing across VF's of same PF is ok */ + if (lvl == NIX_TXSCH_LVL_TL1 && + rvu_get_pf(map_func) != rvu_get_pf(pcifunc)) return false; - } - spin_unlock(&rvu->rsrc_lock); + + if (lvl != NIX_TXSCH_LVL_TL1 && + map_func != pcifunc) + return false; + return true; } @@ -122,7 +171,7 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); u8 cgx_id, lmac_id; - int pkind, pf; + int pkind, pf, vf; int err; pf = rvu_get_pf(pcifunc); @@ -148,6 +197,14 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) rvu_npc_set_pkind(rvu, pkind, pfvf); break; case NIX_INTF_TYPE_LBK: + vf = (pcifunc & RVU_PFVF_FUNC_MASK) - 1; + pfvf->rx_chan_base = NIX_CHAN_LBK_CHX(0, vf); + pfvf->tx_chan_base = vf & 0x1 ? NIX_CHAN_LBK_CHX(0, vf - 1) : + NIX_CHAN_LBK_CHX(0, vf + 1); + pfvf->rx_chan_cnt = 1; + pfvf->tx_chan_cnt = 1; + rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, + pfvf->rx_chan_base, false); break; } @@ -168,14 +225,21 @@ static int nix_interface_init(struct rvu *rvu, u16 pcifunc, int type, int nixlf) rvu_npc_install_bcast_match_entry(rvu, pcifunc, nixlf, pfvf->rx_chan_base); + pfvf->maxlen = NIC_HW_MIN_FRS; + pfvf->minlen = NIC_HW_MIN_FRS; return 0; } static void nix_interface_deinit(struct rvu *rvu, u16 pcifunc, u8 nixlf) { + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); int err; + pfvf->maxlen = 0; + pfvf->minlen = 0; + pfvf->rxvlan = false; + /* Remove this PF_FUNC from bcast pkt replication list */ err = nix_update_bcast_mce_list(rvu, pcifunc, false); if (err) { @@ -234,17 +298,21 @@ static void nix_setup_lso_tso_l4(struct rvu *rvu, int blkaddr, /* TCP's flags field */ field.layer = NIX_TXLAYER_OL4; field.offset = 12; - field.sizem1 = 0; /* not needed */ + field.sizem1 = 1; /* 2 bytes */ field.alg = NIX_LSOALG_TCP_FLAGS; rvu_write64(rvu, blkaddr, NIX_AF_LSO_FORMATX_FIELDX(format, (*fidx)++), *(u64 *)&field); } -static void nix_setup_lso(struct rvu *rvu, int blkaddr) +static void nix_setup_lso(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) { u64 cfg, idx, fidx = 0; + /* Get max HW supported format indices */ + cfg = (rvu_read64(rvu, blkaddr, NIX_AF_CONST1) >> 48) & 0xFF; + nix_hw->lso.total = cfg; + /* Enable LSO */ cfg = rvu_read64(rvu, blkaddr, NIX_AF_LSO_CFG); /* For TSO, set first and middle segment flags to @@ -254,7 +322,10 @@ static void nix_setup_lso(struct rvu *rvu, int blkaddr) cfg |= (0xFFF2ULL << 32) | (0xFFF2ULL << 16); rvu_write64(rvu, blkaddr, NIX_AF_LSO_CFG, cfg | BIT_ULL(63)); - /* Configure format fields for TCPv4 segmentation offload */ + /* Setup default static LSO formats + * + * Configure format fields for TCPv4 segmentation offload + */ idx = NIX_LSO_FORMAT_IDX_TSOV4; nix_setup_lso_tso_l3(rvu, blkaddr, idx, true, &fidx); nix_setup_lso_tso_l4(rvu, blkaddr, idx, &fidx); @@ -264,6 +335,7 @@ static void nix_setup_lso(struct rvu *rvu, int blkaddr) rvu_write64(rvu, blkaddr, NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); } + nix_hw->lso.in_use++; /* Configure format fields for TCPv6 segmentation offload */ idx = NIX_LSO_FORMAT_IDX_TSOV6; @@ -276,6 +348,7 @@ static void nix_setup_lso(struct rvu *rvu, int blkaddr) rvu_write64(rvu, blkaddr, NIX_AF_LSO_FORMATX_FIELDX(idx, fidx), 0x0ULL); } + nix_hw->lso.in_use++; } static void nix_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf) @@ -388,9 +461,8 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, bool ena; u64 cfg; - pfvf = rvu_get_pfvf(rvu, pcifunc); blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); - if (!pfvf->nixlf || blkaddr < 0) + if (blkaddr < 0) return NIX_AF_ERR_AF_LF_INVALID; block = &hw->block[blkaddr]; @@ -400,9 +472,14 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, return NIX_AF_ERR_AQ_ENQUEUE; } + pfvf = rvu_get_pfvf(rvu, pcifunc); nixlf = rvu_get_lf(rvu, block, pcifunc, 0); - if (nixlf < 0) - return NIX_AF_ERR_AF_LF_INVALID; + + /* Skip NIXLF check for broadcast MCE entry init */ + if (!(!rsp && req->ctype == NIX_AQ_CTYPE_MCE)) { + if (!pfvf->nixlf || nixlf < 0) + return NIX_AF_ERR_AF_LF_INVALID; + } switch (req->ctype) { case NIX_AQ_CTYPE_RQ: @@ -447,7 +524,9 @@ static int rvu_nix_aq_enq_inst(struct rvu *rvu, struct nix_aq_enq_req *req, /* Check if SQ pointed SMQ belongs to this PF/VF or not */ if (req->ctype == NIX_AQ_CTYPE_SQ && - req->op != NIX_AQ_INSTOP_WRITE) { + ((req->op == NIX_AQ_INSTOP_INIT && req->sq.ena) || + (req->op == NIX_AQ_INSTOP_WRITE && + req->sq_mask.ena && req->sq_mask.smq && req->sq.ena))) { if (!is_valid_txschq(rvu, blkaddr, NIX_TXSCH_LVL_SMQ, pcifunc, req->sq.smq)) return NIX_AF_ERR_AQ_ENQUEUE; @@ -637,25 +716,25 @@ static int nix_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) return err; } -int rvu_mbox_handler_NIX_AQ_ENQ(struct rvu *rvu, +int rvu_mbox_handler_nix_aq_enq(struct rvu *rvu, struct nix_aq_enq_req *req, struct nix_aq_enq_rsp *rsp) { return rvu_nix_aq_enq_inst(rvu, req, rsp); } -int rvu_mbox_handler_NIX_HWCTX_DISABLE(struct rvu *rvu, +int rvu_mbox_handler_nix_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req, struct msg_rsp *rsp) { return nix_lf_hwctx_disable(rvu, req); } -int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu, +int rvu_mbox_handler_nix_lf_alloc(struct rvu *rvu, struct nix_lf_alloc_req *req, struct nix_lf_alloc_rsp *rsp) { - int nixlf, qints, hwctx_size, err, rc = 0; + int nixlf, qints, hwctx_size, intf, err, rc = 0; struct rvu_hwinfo *hw = rvu->hw; u16 pcifunc = req->hdr.pcifunc; struct rvu_block *block; @@ -676,6 +755,24 @@ int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu, if (nixlf < 0) return NIX_AF_ERR_AF_LF_INVALID; + /* Check if requested 'NIXLF <=> NPALF' mapping is valid */ + if (req->npa_func) { + /* If default, use 'this' NIXLF's PFFUNC */ + if (req->npa_func == RVU_DEFAULT_PF_FUNC) + req->npa_func = pcifunc; + if (!is_pffunc_map_valid(rvu, req->npa_func, BLKTYPE_NPA)) + return NIX_AF_INVAL_NPA_PF_FUNC; + } + + /* Check if requested 'NIXLF <=> SSOLF' mapping is valid */ + if (req->sso_func) { + /* If default, use 'this' NIXLF's PFFUNC */ + if (req->sso_func == RVU_DEFAULT_PF_FUNC) + req->sso_func = pcifunc; + if (!is_pffunc_map_valid(rvu, req->sso_func, BLKTYPE_SSO)) + return NIX_AF_INVAL_SSO_PF_FUNC; + } + /* If RSS is being enabled, check if requested config is valid. * RSS table size should be power of two, otherwise * RSS_GRP::OFFSET + adder might go beyond that group or @@ -777,21 +874,20 @@ int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu, (u64)pfvf->nix_qints_ctx->iova); rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_CFG(nixlf), BIT_ULL(36)); + /* Setup VLANX TPID's. + * Use VLAN1 for 802.1Q + * and VLAN0 for 802.1AD. + */ + cfg = (0x8100ULL << 16) | 0x88A8ULL; + rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG(nixlf), cfg); + /* Enable LMTST for this NIX LF */ rvu_write64(rvu, blkaddr, NIX_AF_LFX_TX_CFG2(nixlf), BIT_ULL(0)); - /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC - * If requester has sent a 'RVU_DEFAULT_PF_FUNC' use this NIX LF's - * PCIFUNC itself. - */ - if (req->npa_func == RVU_DEFAULT_PF_FUNC) - cfg = pcifunc; - else + /* Set CQE/WQE size, NPA_PF_FUNC for SQBs and also SSO_PF_FUNC */ + if (req->npa_func) cfg = req->npa_func; - - if (req->sso_func == RVU_DEFAULT_PF_FUNC) - cfg |= (u64)pcifunc << 16; - else + if (req->sso_func) cfg |= (u64)req->sso_func << 16; cfg |= (u64)req->xqe_sz << 33; @@ -800,10 +896,14 @@ int rvu_mbox_handler_NIX_LF_ALLOC(struct rvu *rvu, /* Config Rx pkt length, csum checks and apad enable / disable */ rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), req->rx_cfg); - err = nix_interface_init(rvu, pcifunc, NIX_INTF_TYPE_CGX, nixlf); + intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; + err = nix_interface_init(rvu, pcifunc, intf, nixlf); if (err) goto free_mem; + /* Disable NPC entries as NIXLF's contexts are not initialized yet */ + rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); + goto exit; free_mem: @@ -823,10 +923,18 @@ exit: rsp->tx_chan_cnt = pfvf->tx_chan_cnt; rsp->lso_tsov4_idx = NIX_LSO_FORMAT_IDX_TSOV4; rsp->lso_tsov6_idx = NIX_LSO_FORMAT_IDX_TSOV6; + /* Get HW supported stat count */ + cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST1); + rsp->lf_rx_stats = ((cfg >> 32) & 0xFF); + rsp->lf_tx_stats = ((cfg >> 24) & 0xFF); + /* Get count of CQ IRQs and error IRQs supported per LF */ + cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2); + rsp->qints = ((cfg >> 12) & 0xFFF); + rsp->cints = ((cfg >> 24) & 0xFFF); return rc; } -int rvu_mbox_handler_NIX_LF_FREE(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_nix_lf_free(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { struct rvu_hwinfo *hw = rvu->hw; @@ -860,6 +968,41 @@ int rvu_mbox_handler_NIX_LF_FREE(struct rvu *rvu, struct msg_req *req, return 0; } +int rvu_mbox_handler_nix_mark_format_cfg(struct rvu *rvu, + struct nix_mark_format_cfg *req, + struct nix_mark_format_cfg_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + struct nix_hw *nix_hw; + struct rvu_pfvf *pfvf; + int blkaddr, rc; + u32 cfg; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (!pfvf->nixlf || blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return -EINVAL; + + cfg = (((u32)req->offset & 0x7) << 16) | + (((u32)req->y_mask & 0xF) << 12) | + (((u32)req->y_val & 0xF) << 8) | + (((u32)req->r_mask & 0xF) << 4) | ((u32)req->r_val & 0xF); + + rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfg); + if (rc < 0) { + dev_err(rvu->dev, "No mark_format_ctl for (pf:%d, vf:%d)", + rvu_get_pf(pcifunc), pcifunc & RVU_PFVF_FUNC_MASK); + return NIX_AF_ERR_MARK_CFG_FAIL; + } + + rsp->mark_format_idx = rc; + return 0; +} + /* Disable shaping of pkts by a scheduler queue * at a given scheduler level. */ @@ -918,7 +1061,74 @@ static void nix_reset_tx_linkcfg(struct rvu *rvu, int blkaddr, NIX_AF_TL3_TL2X_LINKX_CFG(schq, link), 0x00); } -int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu, +static int +rvu_get_tl1_schqs(struct rvu *rvu, int blkaddr, u16 pcifunc, + u16 *schq_list, u16 *schq_cnt) +{ + struct nix_txsch *txsch; + struct nix_hw *nix_hw; + struct rvu_pfvf *pfvf; + u8 cgx_id, lmac_id; + u16 schq_base; + u32 *pfvf_map; + int pf, intf; + + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return -ENODEV; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + txsch = &nix_hw->txsch[NIX_TXSCH_LVL_TL1]; + pfvf_map = txsch->pfvf_map; + pf = rvu_get_pf(pcifunc); + + /* static allocation as two TL1's per link */ + intf = is_afvf(pcifunc) ? NIX_INTF_TYPE_LBK : NIX_INTF_TYPE_CGX; + + switch (intf) { + case NIX_INTF_TYPE_CGX: + rvu_get_cgx_lmac_id(pfvf->cgx_lmac, &cgx_id, &lmac_id); + schq_base = (cgx_id * MAX_LMAC_PER_CGX + lmac_id) * 2; + break; + case NIX_INTF_TYPE_LBK: + schq_base = rvu->cgx_cnt_max * MAX_LMAC_PER_CGX * 2; + break; + default: + return -ENODEV; + } + + if (schq_base + 1 > txsch->schq.max) + return -ENODEV; + + /* init pfvf_map as we store flags */ + if (pfvf_map[schq_base] == U32_MAX) { + pfvf_map[schq_base] = + TXSCH_MAP((pf << RVU_PFVF_PF_SHIFT), 0); + pfvf_map[schq_base + 1] = + TXSCH_MAP((pf << RVU_PFVF_PF_SHIFT), 0); + + /* Onetime reset for TL1 */ + nix_reset_tx_linkcfg(rvu, blkaddr, + NIX_TXSCH_LVL_TL1, schq_base); + nix_reset_tx_shaping(rvu, blkaddr, + NIX_TXSCH_LVL_TL1, schq_base); + + nix_reset_tx_linkcfg(rvu, blkaddr, + NIX_TXSCH_LVL_TL1, schq_base + 1); + nix_reset_tx_shaping(rvu, blkaddr, + NIX_TXSCH_LVL_TL1, schq_base + 1); + } + + if (schq_list && schq_cnt) { + schq_list[0] = schq_base; + schq_list[1] = schq_base + 1; + *schq_cnt = 2; + } + + return 0; +} + +int rvu_mbox_handler_nix_txsch_alloc(struct rvu *rvu, struct nix_txsch_alloc_req *req, struct nix_txsch_alloc_rsp *rsp) { @@ -928,6 +1138,7 @@ int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu, struct rvu_pfvf *pfvf; struct nix_hw *nix_hw; int blkaddr, rc = 0; + u32 *pfvf_map; u16 schq; pfvf = rvu_get_pfvf(rvu, pcifunc); @@ -939,17 +1150,27 @@ int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu, if (!nix_hw) return -EINVAL; - spin_lock(&rvu->rsrc_lock); + mutex_lock(&rvu->rsrc_lock); for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { txsch = &nix_hw->txsch[lvl]; req_schq = req->schq_contig[lvl] + req->schq[lvl]; + pfvf_map = txsch->pfvf_map; + + if (!req_schq) + continue; /* There are only 28 TL1s */ - if (lvl == NIX_TXSCH_LVL_TL1 && req_schq > txsch->schq.max) - goto err; + if (lvl == NIX_TXSCH_LVL_TL1) { + if (req->schq_contig[lvl] || + req->schq[lvl] > 2 || + rvu_get_tl1_schqs(rvu, blkaddr, + pcifunc, NULL, NULL)) + goto err; + continue; + } /* Check if request is valid */ - if (!req_schq || req_schq > MAX_TXSCHQ_PER_FUNC) + if (req_schq > MAX_TXSCHQ_PER_FUNC) goto err; /* If contiguous queues are needed, check for availability */ @@ -965,16 +1186,32 @@ int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu, for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { txsch = &nix_hw->txsch[lvl]; rsp->schq_contig[lvl] = req->schq_contig[lvl]; + pfvf_map = txsch->pfvf_map; rsp->schq[lvl] = req->schq[lvl]; - schq = 0; + if (!req->schq[lvl] && !req->schq_contig[lvl]) + continue; + + /* Handle TL1 specially as it is + * allocation is restricted to 2 TL1's + * per link + */ + + if (lvl == NIX_TXSCH_LVL_TL1) { + rsp->schq_contig[lvl] = 0; + rvu_get_tl1_schqs(rvu, blkaddr, pcifunc, + &rsp->schq_list[lvl][0], + &rsp->schq[lvl]); + continue; + } + /* Alloc contiguous queues first */ if (req->schq_contig[lvl]) { schq = rvu_alloc_rsrc_contig(&txsch->schq, req->schq_contig[lvl]); for (idx = 0; idx < req->schq_contig[lvl]; idx++) { - txsch->pfvf_map[schq] = pcifunc; + pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); nix_reset_tx_shaping(rvu, blkaddr, lvl, schq); rsp->schq_contig_list[lvl][idx] = schq; @@ -985,7 +1222,7 @@ int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu, /* Alloc non-contiguous queues */ for (idx = 0; idx < req->schq[lvl]; idx++) { schq = rvu_alloc_rsrc(&txsch->schq); - txsch->pfvf_map[schq] = pcifunc; + pfvf_map[schq] = TXSCH_MAP(pcifunc, 0); nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); nix_reset_tx_shaping(rvu, blkaddr, lvl, schq); rsp->schq_list[lvl][idx] = schq; @@ -995,7 +1232,7 @@ int rvu_mbox_handler_NIX_TXSCH_ALLOC(struct rvu *rvu, err: rc = NIX_AF_ERR_TLX_ALLOC_FAIL; exit: - spin_unlock(&rvu->rsrc_lock); + mutex_unlock(&rvu->rsrc_lock); return rc; } @@ -1020,14 +1257,14 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) return NIX_AF_ERR_AF_LF_INVALID; /* Disable TL2/3 queue links before SMQ flush*/ - spin_lock(&rvu->rsrc_lock); + mutex_lock(&rvu->rsrc_lock); for (lvl = NIX_TXSCH_LVL_TL4; lvl < NIX_TXSCH_LVL_CNT; lvl++) { if (lvl != NIX_TXSCH_LVL_TL2 && lvl != NIX_TXSCH_LVL_TL4) continue; txsch = &nix_hw->txsch[lvl]; for (schq = 0; schq < txsch->schq.max; schq++) { - if (txsch->pfvf_map[schq] != pcifunc) + if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) continue; nix_reset_tx_linkcfg(rvu, blkaddr, lvl, schq); } @@ -1036,7 +1273,7 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) /* Flush SMQs */ txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; for (schq = 0; schq < txsch->schq.max; schq++) { - if (txsch->pfvf_map[schq] != pcifunc) + if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) continue; cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)); /* Do SMQ flush and set enqueue xoff */ @@ -1054,15 +1291,21 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) /* Now free scheduler queues to free pool */ for (lvl = 0; lvl < NIX_TXSCH_LVL_CNT; lvl++) { + /* Free all SCHQ's except TL1 as + * TL1 is shared across all VF's for a RVU PF + */ + if (lvl == NIX_TXSCH_LVL_TL1) + continue; + txsch = &nix_hw->txsch[lvl]; for (schq = 0; schq < txsch->schq.max; schq++) { - if (txsch->pfvf_map[schq] != pcifunc) + if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) continue; rvu_free_rsrc(&txsch->schq, schq); txsch->pfvf_map[schq] = 0; } } - spin_unlock(&rvu->rsrc_lock); + mutex_unlock(&rvu->rsrc_lock); /* Sync cached info for this LF in NDC-TX to LLC/DRAM */ rvu_write64(rvu, blkaddr, NIX_AF_NDC_TX_SYNC, BIT_ULL(12) | nixlf); @@ -1073,11 +1316,81 @@ static int nix_txschq_free(struct rvu *rvu, u16 pcifunc) return 0; } -int rvu_mbox_handler_NIX_TXSCH_FREE(struct rvu *rvu, +static int nix_txschq_free_one(struct rvu *rvu, + struct nix_txsch_free_req *req) +{ + int lvl, schq, nixlf, blkaddr, rc; + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + struct nix_txsch *txsch; + struct nix_hw *nix_hw; + u32 *pfvf_map; + u64 cfg; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return -EINVAL; + + nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); + if (nixlf < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + lvl = req->schq_lvl; + schq = req->schq; + txsch = &nix_hw->txsch[lvl]; + + /* Don't allow freeing TL1 */ + if (lvl > NIX_TXSCH_LVL_TL2 || + schq >= txsch->schq.max) + goto err; + + pfvf_map = txsch->pfvf_map; + mutex_lock(&rvu->rsrc_lock); + + if (TXSCH_MAP_FUNC(pfvf_map[schq]) != pcifunc) { + mutex_unlock(&rvu->rsrc_lock); + goto err; + } + + /* Flush if it is a SMQ. Onus of disabling + * TL2/3 queue links before SMQ flush is on user + */ + if (lvl == NIX_TXSCH_LVL_SMQ) { + cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)); + /* Do SMQ flush and set enqueue xoff */ + cfg |= BIT_ULL(50) | BIT_ULL(49); + rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg); + + /* Wait for flush to complete */ + rc = rvu_poll_reg(rvu, blkaddr, + NIX_AF_SMQX_CFG(schq), BIT_ULL(49), true); + if (rc) { + dev_err(rvu->dev, + "NIXLF%d: SMQ%d flush failed\n", nixlf, schq); + } + } + + /* Free the resource */ + rvu_free_rsrc(&txsch->schq, schq); + txsch->pfvf_map[schq] = 0; + mutex_unlock(&rvu->rsrc_lock); + return 0; +err: + return NIX_AF_ERR_TLX_INVALID; +} + +int rvu_mbox_handler_nix_txsch_free(struct rvu *rvu, struct nix_txsch_free_req *req, struct msg_rsp *rsp) { - return nix_txschq_free(rvu, req->hdr.pcifunc); + if (req->flags & TXSCHQ_FREE_ALL) + return nix_txschq_free(rvu, req->hdr.pcifunc); + else + return nix_txschq_free_one(rvu, req); } static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr, @@ -1118,16 +1431,73 @@ static bool is_txschq_config_valid(struct rvu *rvu, u16 pcifunc, int blkaddr, return true; } -int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu, +static int +nix_tl1_default_cfg(struct rvu *rvu, u16 pcifunc) +{ + u16 schq_list[2], schq_cnt, schq; + int blkaddr, idx, err = 0; + u16 map_func, map_flags; + struct nix_hw *nix_hw; + u64 reg, regval; + u32 *pfvf_map; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return -EINVAL; + + pfvf_map = nix_hw->txsch[NIX_TXSCH_LVL_TL1].pfvf_map; + + mutex_lock(&rvu->rsrc_lock); + + err = rvu_get_tl1_schqs(rvu, blkaddr, + pcifunc, schq_list, &schq_cnt); + if (err) + goto unlock; + + for (idx = 0; idx < schq_cnt; idx++) { + schq = schq_list[idx]; + map_func = TXSCH_MAP_FUNC(pfvf_map[schq]); + map_flags = TXSCH_MAP_FLAGS(pfvf_map[schq]); + + /* check if config is already done or this is pf */ + if (map_flags & NIX_TXSCHQ_TL1_CFG_DONE) + continue; + + /* default configuration */ + reg = NIX_AF_TL1X_TOPOLOGY(schq); + regval = (TXSCH_TL1_DFLT_RR_PRIO << 1); + rvu_write64(rvu, blkaddr, reg, regval); + reg = NIX_AF_TL1X_SCHEDULE(schq); + regval = TXSCH_TL1_DFLT_RR_QTM; + rvu_write64(rvu, blkaddr, reg, regval); + reg = NIX_AF_TL1X_CIR(schq); + regval = 0; + rvu_write64(rvu, blkaddr, reg, regval); + + map_flags |= NIX_TXSCHQ_TL1_CFG_DONE; + pfvf_map[schq] = TXSCH_MAP(map_func, map_flags); + } +unlock: + mutex_unlock(&rvu->rsrc_lock); + return err; +} + +int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, struct nix_txschq_config *req, struct msg_rsp *rsp) { + u16 schq, pcifunc = req->hdr.pcifunc; struct rvu_hwinfo *hw = rvu->hw; - u16 pcifunc = req->hdr.pcifunc; u64 reg, regval, schq_regbase; struct nix_txsch *txsch; + u16 map_func, map_flags; struct nix_hw *nix_hw; int blkaddr, idx, err; + u32 *pfvf_map; int nixlf; if (req->lvl >= NIX_TXSCH_LVL_CNT || @@ -1147,6 +1517,16 @@ int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu, return NIX_AF_ERR_AF_LF_INVALID; txsch = &nix_hw->txsch[req->lvl]; + pfvf_map = txsch->pfvf_map; + + /* VF is only allowed to trigger + * setting default cfg on TL1 + */ + if (pcifunc & RVU_PFVF_FUNC_MASK && + req->lvl == NIX_TXSCH_LVL_TL1) { + return nix_tl1_default_cfg(rvu, pcifunc); + } + for (idx = 0; idx < req->num_regs; idx++) { reg = req->reg[idx]; regval = req->regval[idx]; @@ -1164,6 +1544,21 @@ int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu, regval |= ((u64)nixlf << 24); } + /* Mark config as done for TL1 by PF */ + if (schq_regbase >= NIX_AF_TL1X_SCHEDULE(0) && + schq_regbase <= NIX_AF_TL1X_GREEN_BYTES(0)) { + schq = TXSCHQ_IDX(reg, TXSCHQ_IDX_SHIFT); + + mutex_lock(&rvu->rsrc_lock); + + map_func = TXSCH_MAP_FUNC(pfvf_map[schq]); + map_flags = TXSCH_MAP_FLAGS(pfvf_map[schq]); + + map_flags |= NIX_TXSCHQ_TL1_CFG_DONE; + pfvf_map[schq] = TXSCH_MAP(map_func, map_flags); + mutex_unlock(&rvu->rsrc_lock); + } + rvu_write64(rvu, blkaddr, reg, regval); /* Check for SMQ flush, if so, poll for its completion */ @@ -1181,35 +1576,22 @@ int rvu_mbox_handler_NIX_TXSCHQ_CFG(struct rvu *rvu, static int nix_rx_vtag_cfg(struct rvu *rvu, int nixlf, int blkaddr, struct nix_vtag_config *req) { - u64 regval = 0; - -#define NIX_VTAGTYPE_MAX 0x8ull -#define NIX_VTAGSIZE_MASK 0x7ull -#define NIX_VTAGSTRIP_CAP_MASK 0x30ull + u64 regval = req->vtag_size; - if (req->rx.vtag_type >= NIX_VTAGTYPE_MAX || - req->vtag_size > VTAGSIZE_T8) + if (req->rx.vtag_type > 7 || req->vtag_size > VTAGSIZE_T8) return -EINVAL; - regval = rvu_read64(rvu, blkaddr, - NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type)); - - if (req->rx.strip_vtag && req->rx.capture_vtag) - regval |= BIT_ULL(4) | BIT_ULL(5); - else if (req->rx.strip_vtag) + if (req->rx.capture_vtag) + regval |= BIT_ULL(5); + if (req->rx.strip_vtag) regval |= BIT_ULL(4); - else - regval &= ~(BIT_ULL(4) | BIT_ULL(5)); - - regval &= ~NIX_VTAGSIZE_MASK; - regval |= req->vtag_size & NIX_VTAGSIZE_MASK; rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_VTAG_TYPEX(nixlf, req->rx.vtag_type), regval); return 0; } -int rvu_mbox_handler_NIX_VTAG_CFG(struct rvu *rvu, +int rvu_mbox_handler_nix_vtag_cfg(struct rvu *rvu, struct nix_vtag_config *req, struct msg_rsp *rsp) { @@ -1243,7 +1625,7 @@ static int nix_setup_mce(struct rvu *rvu, int mce, u8 op, struct nix_aq_enq_req aq_req; int err; - aq_req.hdr.pcifunc = pcifunc; + aq_req.hdr.pcifunc = 0; aq_req.ctype = NIX_AQ_CTYPE_MCE; aq_req.op = op; aq_req.qidx = mce; @@ -1294,7 +1676,7 @@ static int nix_update_mce_list(struct nix_mce_list *mce_list, return 0; /* Add a new one to the list, at the tail */ - mce = kzalloc(sizeof(*mce), GFP_ATOMIC); + mce = kzalloc(sizeof(*mce), GFP_KERNEL); if (!mce) return -ENOMEM; mce->idx = idx; @@ -1317,6 +1699,10 @@ static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add) struct rvu_pfvf *pfvf; int blkaddr; + /* Broadcast pkt replication is not needed for AF's VFs, hence skip */ + if (is_afvf(pcifunc)) + return 0; + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); if (blkaddr < 0) return 0; @@ -1340,7 +1726,7 @@ static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add) return -EINVAL; } - spin_lock(&mcast->mce_lock); + mutex_lock(&mcast->mce_lock); err = nix_update_mce_list(mce_list, pcifunc, idx, add); if (err) @@ -1370,7 +1756,7 @@ static int nix_update_bcast_mce_list(struct rvu *rvu, u16 pcifunc, bool add) } end: - spin_unlock(&mcast->mce_lock); + mutex_unlock(&mcast->mce_lock); return err; } @@ -1455,7 +1841,7 @@ static int nix_setup_mcast(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) BIT_ULL(63) | (mcast->replay_pkind << 24) | BIT_ULL(20) | MC_BUF_CNT); - spin_lock_init(&mcast->mce_lock); + mutex_init(&mcast->mce_lock); return nix_setup_bcast_tables(rvu, nix_hw); } @@ -1499,14 +1885,66 @@ static int nix_setup_txschq(struct rvu *rvu, struct nix_hw *nix_hw, int blkaddr) * PF/VF pcifunc mapping info. */ txsch->pfvf_map = devm_kcalloc(rvu->dev, txsch->schq.max, - sizeof(u16), GFP_KERNEL); + sizeof(u32), GFP_KERNEL); if (!txsch->pfvf_map) return -ENOMEM; + memset(txsch->pfvf_map, U8_MAX, txsch->schq.max * sizeof(u32)); } return 0; } -int rvu_mbox_handler_NIX_STATS_RST(struct rvu *rvu, struct msg_req *req, +int rvu_nix_reserve_mark_format(struct rvu *rvu, struct nix_hw *nix_hw, + int blkaddr, u32 cfg) +{ + int fmt_idx; + + for (fmt_idx = 0; fmt_idx < nix_hw->mark_format.in_use; fmt_idx++) { + if (nix_hw->mark_format.cfg[fmt_idx] == cfg) + return fmt_idx; + } + if (fmt_idx >= nix_hw->mark_format.total) + return -ERANGE; + + rvu_write64(rvu, blkaddr, NIX_AF_MARK_FORMATX_CTL(fmt_idx), cfg); + nix_hw->mark_format.cfg[fmt_idx] = cfg; + nix_hw->mark_format.in_use++; + return fmt_idx; +} + +static int nix_af_mark_format_setup(struct rvu *rvu, struct nix_hw *nix_hw, + int blkaddr) +{ + u64 cfgs[] = { + [NIX_MARK_CFG_IP_DSCP_RED] = 0x10003, + [NIX_MARK_CFG_IP_DSCP_YELLOW] = 0x11200, + [NIX_MARK_CFG_IP_DSCP_YELLOW_RED] = 0x11203, + [NIX_MARK_CFG_IP_ECN_RED] = 0x6000c, + [NIX_MARK_CFG_IP_ECN_YELLOW] = 0x60c00, + [NIX_MARK_CFG_IP_ECN_YELLOW_RED] = 0x60c0c, + [NIX_MARK_CFG_VLAN_DEI_RED] = 0x30008, + [NIX_MARK_CFG_VLAN_DEI_YELLOW] = 0x30800, + [NIX_MARK_CFG_VLAN_DEI_YELLOW_RED] = 0x30808, + }; + int i, rc; + u64 total; + + total = (rvu_read64(rvu, blkaddr, NIX_AF_PSE_CONST) & 0xFF00) >> 8; + nix_hw->mark_format.total = (u8)total; + nix_hw->mark_format.cfg = devm_kcalloc(rvu->dev, total, sizeof(u32), + GFP_KERNEL); + if (!nix_hw->mark_format.cfg) + return -ENOMEM; + for (i = 0; i < NIX_MARK_CFG_MAX; i++) { + rc = rvu_nix_reserve_mark_format(rvu, nix_hw, blkaddr, cfgs[i]); + if (rc < 0) + dev_err(rvu->dev, "Err %d in setup mark format %d\n", + i, rc); + } + + return 0; +} + +int rvu_mbox_handler_nix_stats_rst(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { struct rvu_hwinfo *hw = rvu->hw; @@ -1537,190 +1975,287 @@ int rvu_mbox_handler_NIX_STATS_RST(struct rvu *rvu, struct msg_req *req, } /* Returns the ALG index to be set into NPC_RX_ACTION */ -static int get_flowkey_alg_idx(u32 flow_cfg) -{ - u32 ip_cfg; - - flow_cfg &= ~FLOW_KEY_TYPE_PORT; - ip_cfg = FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_IPV6; - if (flow_cfg == ip_cfg) - return FLOW_KEY_ALG_IP; - else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP)) - return FLOW_KEY_ALG_TCP; - else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_UDP)) - return FLOW_KEY_ALG_UDP; - else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_SCTP)) - return FLOW_KEY_ALG_SCTP; - else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_UDP)) - return FLOW_KEY_ALG_TCP_UDP; - else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_SCTP)) - return FLOW_KEY_ALG_TCP_SCTP; - else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP)) - return FLOW_KEY_ALG_UDP_SCTP; - else if (flow_cfg == (ip_cfg | FLOW_KEY_TYPE_TCP | - FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP)) - return FLOW_KEY_ALG_TCP_UDP_SCTP; - - return FLOW_KEY_ALG_PORT; -} - -int rvu_mbox_handler_NIX_RSS_FLOWKEY_CFG(struct rvu *rvu, - struct nix_rss_flowkey_cfg *req, - struct msg_rsp *rsp) +static int get_flowkey_alg_idx(struct nix_hw *nix_hw, u32 flow_cfg) { - struct rvu_hwinfo *hw = rvu->hw; - u16 pcifunc = req->hdr.pcifunc; - int alg_idx, nixlf, blkaddr; - - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); - if (blkaddr < 0) - return NIX_AF_ERR_AF_LF_INVALID; - - nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); - if (nixlf < 0) - return NIX_AF_ERR_AF_LF_INVALID; + int i; - alg_idx = get_flowkey_alg_idx(req->flowkey_cfg); + /* Scan over exiting algo entries to find a match */ + for (i = 0; i < nix_hw->flowkey.in_use; i++) + if (nix_hw->flowkey.flowkey[i] == flow_cfg) + return i; - rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group, - alg_idx, req->mcam_index); - return 0; + return -ERANGE; } -static void set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) +static int set_flowkey_fields(struct nix_rx_flowkey_alg *alg, u32 flow_cfg) { - struct nix_rx_flowkey_alg *field = NULL; - int idx, key_type; + int idx, nr_field, key_off, field_marker, keyoff_marker; + int max_key_off, max_bit_pos, group_member; + struct nix_rx_flowkey_alg *field; + struct nix_rx_flowkey_alg tmp; + u32 key_type, valid_key; if (!alg) - return; + return -EINVAL; - /* FIELD0: IPv4 - * FIELD1: IPv6 - * FIELD2: TCP/UDP/SCTP/ALL - * FIELD3: Unused - * FIELD4: Unused - * - * Each of the 32 possible flow key algorithm definitions should +#define FIELDS_PER_ALG 5 +#define MAX_KEY_OFF 40 + /* Clear all fields */ + memset(alg, 0, sizeof(uint64_t) * FIELDS_PER_ALG); + + /* Each of the 32 possible flow key algorithm definitions should * fall into above incremental config (except ALG0). Otherwise a * single NPC MCAM entry is not sufficient for supporting RSS. * * If a different definition or combination needed then NPC MCAM * has to be programmed to filter such pkts and it's action should * point to this definition to calculate flowtag or hash. + * + * The `for loop` goes over _all_ protocol field and the following + * variables depicts the state machine forward progress logic. + * + * keyoff_marker - Enabled when hash byte length needs to be accounted + * in field->key_offset update. + * field_marker - Enabled when a new field needs to be selected. + * group_member - Enabled when protocol is part of a group. */ - for (idx = 0; idx < 32; idx++) { - key_type = flow_cfg & BIT_ULL(idx); - if (!key_type) - continue; + + keyoff_marker = 0; max_key_off = 0; group_member = 0; + nr_field = 0; key_off = 0; field_marker = 1; + field = &tmp; max_bit_pos = fls(flow_cfg); + for (idx = 0; + idx < max_bit_pos && nr_field < FIELDS_PER_ALG && + key_off < MAX_KEY_OFF; idx++) { + key_type = BIT(idx); + valid_key = flow_cfg & key_type; + /* Found a field marker, reset the field values */ + if (field_marker) + memset(&tmp, 0, sizeof(tmp)); + switch (key_type) { - case FLOW_KEY_TYPE_PORT: - field = &alg[0]; + case NIX_FLOW_KEY_TYPE_PORT: field->sel_chan = true; /* This should be set to 1, when SEL_CHAN is set */ field->bytesm1 = 1; + field_marker = true; + keyoff_marker = true; break; - case FLOW_KEY_TYPE_IPV4: - field = &alg[0]; + case NIX_FLOW_KEY_TYPE_IPV4: field->lid = NPC_LID_LC; field->ltype_match = NPC_LT_LC_IP; field->hdr_offset = 12; /* SIP offset */ field->bytesm1 = 7; /* SIP + DIP, 8 bytes */ field->ltype_mask = 0xF; /* Match only IPv4 */ + field_marker = true; + keyoff_marker = false; break; - case FLOW_KEY_TYPE_IPV6: - field = &alg[1]; + case NIX_FLOW_KEY_TYPE_IPV6: field->lid = NPC_LID_LC; field->ltype_match = NPC_LT_LC_IP6; field->hdr_offset = 8; /* SIP offset */ field->bytesm1 = 31; /* SIP + DIP, 32 bytes */ field->ltype_mask = 0xF; /* Match only IPv6 */ + field_marker = true; + keyoff_marker = true; break; - case FLOW_KEY_TYPE_TCP: - case FLOW_KEY_TYPE_UDP: - case FLOW_KEY_TYPE_SCTP: - field = &alg[2]; + case NIX_FLOW_KEY_TYPE_TCP: + case NIX_FLOW_KEY_TYPE_UDP: + case NIX_FLOW_KEY_TYPE_SCTP: field->lid = NPC_LID_LD; field->bytesm1 = 3; /* Sport + Dport, 4 bytes */ - if (key_type == FLOW_KEY_TYPE_TCP) + if (key_type == NIX_FLOW_KEY_TYPE_TCP && valid_key) { field->ltype_match |= NPC_LT_LD_TCP; - else if (key_type == FLOW_KEY_TYPE_UDP) + group_member = true; + } else if (key_type == NIX_FLOW_KEY_TYPE_UDP && + valid_key) { field->ltype_match |= NPC_LT_LD_UDP; - else if (key_type == FLOW_KEY_TYPE_SCTP) + group_member = true; + } else if (key_type == NIX_FLOW_KEY_TYPE_SCTP && + valid_key) { field->ltype_match |= NPC_LT_LD_SCTP; - field->key_offset = 32; /* After IPv4/v6 SIP, DIP */ + group_member = true; + } field->ltype_mask = ~field->ltype_match; + if (key_type == NIX_FLOW_KEY_TYPE_SCTP) { + /* Handle the case where any of the group item + * is enabled in the group but not the final one + */ + if (group_member) { + valid_key = true; + group_member = false; + } + field_marker = true; + keyoff_marker = true; + } else { + field_marker = false; + keyoff_marker = false; + } break; } - if (field) - field->ena = 1; - field = NULL; + field->ena = 1; + + /* Found a valid flow key type */ + if (valid_key) { + field->key_offset = key_off; + memcpy(&alg[nr_field], field, sizeof(*field)); + max_key_off = max(max_key_off, field->bytesm1 + 1); + + /* Found a field marker, get the next field */ + if (field_marker) + nr_field++; + } + + /* Found a keyoff marker, update the new key_off */ + if (keyoff_marker) { + key_off += max_key_off; + max_key_off = 0; + } } + /* Processed all the flow key types */ + if (idx == max_bit_pos && key_off <= MAX_KEY_OFF) + return 0; + else + return NIX_AF_ERR_RSS_NOSPC_FIELD; } -static void nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr) +static int reserve_flowkey_alg_idx(struct rvu *rvu, int blkaddr, u32 flow_cfg) { -#define FIELDS_PER_ALG 5 - u64 field[FLOW_KEY_ALG_MAX][FIELDS_PER_ALG]; - u32 flowkey_cfg, minkey_cfg; - int alg, fid; + u64 field[FIELDS_PER_ALG]; + struct nix_hw *hw; + int fid, rc; - memset(&field, 0, sizeof(u64) * FLOW_KEY_ALG_MAX * FIELDS_PER_ALG); + hw = get_nix_hw(rvu->hw, blkaddr); + if (!hw) + return -EINVAL; - /* Only incoming channel number */ - flowkey_cfg = FLOW_KEY_TYPE_PORT; - set_flowkey_fields((void *)&field[FLOW_KEY_ALG_PORT], flowkey_cfg); + /* No room to add new flow hash algoritham */ + if (hw->flowkey.in_use >= NIX_FLOW_KEY_ALG_MAX) + return NIX_AF_ERR_RSS_NOSPC_ALGO; - /* For a incoming pkt if none of the fields match then flowkey - * will be zero, hence tag generated will also be zero. - * RSS entry at rsse_index = NIX_AF_LF()_RSS_GRP()[OFFSET] will - * be used to queue the packet. - */ + /* Generate algo fields for the given flow_cfg */ + rc = set_flowkey_fields((struct nix_rx_flowkey_alg *)field, flow_cfg); + if (rc) + return rc; + + /* Update ALGX_FIELDX register with generated fields */ + for (fid = 0; fid < FIELDS_PER_ALG; fid++) + rvu_write64(rvu, blkaddr, + NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(hw->flowkey.in_use, + fid), field[fid]); + + /* Store the flow_cfg for futher lookup */ + rc = hw->flowkey.in_use; + hw->flowkey.flowkey[rc] = flow_cfg; + hw->flowkey.in_use++; + + return rc; +} + +int rvu_mbox_handler_nix_rss_flowkey_cfg(struct rvu *rvu, + struct nix_rss_flowkey_cfg *req, + struct nix_rss_flowkey_cfg_rsp *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + int alg_idx, nixlf, blkaddr; + struct nix_hw *nix_hw; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); + if (nixlf < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return -EINVAL; + + alg_idx = get_flowkey_alg_idx(nix_hw, req->flowkey_cfg); + /* Failed to get algo index from the exiting list, reserve new */ + if (alg_idx < 0) { + alg_idx = reserve_flowkey_alg_idx(rvu, blkaddr, + req->flowkey_cfg); + if (alg_idx < 0) + return alg_idx; + } + rsp->alg_idx = alg_idx; + rvu_npc_update_flowkey_alg_idx(rvu, pcifunc, nixlf, req->group, + alg_idx, req->mcam_index); + return 0; +} + +static int nix_rx_flowkey_alg_cfg(struct rvu *rvu, int blkaddr) +{ + u32 flowkey_cfg, minkey_cfg; + int alg, fid, rc; + + /* Disable all flow key algx fieldx */ + for (alg = 0; alg < NIX_FLOW_KEY_ALG_MAX; alg++) { + for (fid = 0; fid < FIELDS_PER_ALG; fid++) + rvu_write64(rvu, blkaddr, + NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid), + 0); + } /* IPv4/IPv6 SIP/DIPs */ - flowkey_cfg = FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_IPV6; - set_flowkey_fields((void *)&field[FLOW_KEY_ALG_IP], flowkey_cfg); + flowkey_cfg = NIX_FLOW_KEY_TYPE_IPV4 | NIX_FLOW_KEY_TYPE_IPV6; + rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); + if (rc < 0) + return rc; /* TCPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ minkey_cfg = flowkey_cfg; - flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP; - set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP], flowkey_cfg); + flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP; + rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); + if (rc < 0) + return rc; /* UDPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ - flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_UDP; - set_flowkey_fields((void *)&field[FLOW_KEY_ALG_UDP], flowkey_cfg); + flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP; + rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); + if (rc < 0) + return rc; /* SCTPv4/v6 4-tuple, SIP, DIP, Sport, Dport */ - flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_SCTP; - set_flowkey_fields((void *)&field[FLOW_KEY_ALG_SCTP], flowkey_cfg); + flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_SCTP; + rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); + if (rc < 0) + return rc; /* TCP/UDP v4/v6 4-tuple, rest IP pkts 2-tuple */ - flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_UDP; - set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP_UDP], flowkey_cfg); + flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | + NIX_FLOW_KEY_TYPE_UDP; + rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); + if (rc < 0) + return rc; /* TCP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ - flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP | FLOW_KEY_TYPE_SCTP; - set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP_SCTP], flowkey_cfg); + flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | + NIX_FLOW_KEY_TYPE_SCTP; + rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); + if (rc < 0) + return rc; /* UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ - flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP; - set_flowkey_fields((void *)&field[FLOW_KEY_ALG_UDP_SCTP], flowkey_cfg); + flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_UDP | + NIX_FLOW_KEY_TYPE_SCTP; + rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); + if (rc < 0) + return rc; /* TCP/UDP/SCTP v4/v6 4-tuple, rest IP pkts 2-tuple */ - flowkey_cfg = minkey_cfg | FLOW_KEY_TYPE_TCP | - FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP; - set_flowkey_fields((void *)&field[FLOW_KEY_ALG_TCP_UDP_SCTP], - flowkey_cfg); + flowkey_cfg = minkey_cfg | NIX_FLOW_KEY_TYPE_TCP | + NIX_FLOW_KEY_TYPE_UDP | NIX_FLOW_KEY_TYPE_SCTP; + rc = reserve_flowkey_alg_idx(rvu, blkaddr, flowkey_cfg); + if (rc < 0) + return rc; - for (alg = 0; alg < FLOW_KEY_ALG_MAX; alg++) { - for (fid = 0; fid < FIELDS_PER_ALG; fid++) - rvu_write64(rvu, blkaddr, - NIX_AF_RX_FLOW_KEY_ALGX_FIELDX(alg, fid), - field[alg][fid]); - } + return 0; } -int rvu_mbox_handler_NIX_SET_MAC_ADDR(struct rvu *rvu, +int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, struct nix_set_mac_addr *req, struct msg_rsp *rsp) { @@ -1742,10 +2277,13 @@ int rvu_mbox_handler_NIX_SET_MAC_ADDR(struct rvu *rvu, rvu_npc_install_ucast_entry(rvu, pcifunc, nixlf, pfvf->rx_chan_base, req->mac_addr); + + rvu_npc_update_rxvlan(rvu, pcifunc, nixlf); + return 0; } -int rvu_mbox_handler_NIX_SET_RX_MODE(struct rvu *rvu, struct nix_rx_mode *req, +int rvu_mbox_handler_nix_set_rx_mode(struct rvu *rvu, struct nix_rx_mode *req, struct msg_rsp *rsp) { bool allmulti = false, disable_promisc = false; @@ -1775,9 +2313,303 @@ int rvu_mbox_handler_NIX_SET_RX_MODE(struct rvu *rvu, struct nix_rx_mode *req, else rvu_npc_install_promisc_entry(rvu, pcifunc, nixlf, pfvf->rx_chan_base, allmulti); + + rvu_npc_update_rxvlan(rvu, pcifunc, nixlf); + + return 0; +} + +static void nix_find_link_frs(struct rvu *rvu, + struct nix_frs_cfg *req, u16 pcifunc) +{ + int pf = rvu_get_pf(pcifunc); + struct rvu_pfvf *pfvf; + int maxlen, minlen; + int numvfs, hwvf; + int vf; + + /* Update with requester's min/max lengths */ + pfvf = rvu_get_pfvf(rvu, pcifunc); + pfvf->maxlen = req->maxlen; + if (req->update_minlen) + pfvf->minlen = req->minlen; + + maxlen = req->maxlen; + minlen = req->update_minlen ? req->minlen : 0; + + /* Get this PF's numVFs and starting hwvf */ + rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); + + /* For each VF, compare requested max/minlen */ + for (vf = 0; vf < numvfs; vf++) { + pfvf = &rvu->hwvf[hwvf + vf]; + if (pfvf->maxlen > maxlen) + maxlen = pfvf->maxlen; + if (req->update_minlen && + pfvf->minlen && pfvf->minlen < minlen) + minlen = pfvf->minlen; + } + + /* Compare requested max/minlen with PF's max/minlen */ + pfvf = &rvu->pf[pf]; + if (pfvf->maxlen > maxlen) + maxlen = pfvf->maxlen; + if (req->update_minlen && + pfvf->minlen && pfvf->minlen < minlen) + minlen = pfvf->minlen; + + /* Update the request with max/min PF's and it's VF's max/min */ + req->maxlen = maxlen; + if (req->update_minlen) + req->minlen = minlen; +} + +int rvu_mbox_handler_nix_set_hw_frs(struct rvu *rvu, struct nix_frs_cfg *req, + struct msg_rsp *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + int pf = rvu_get_pf(pcifunc); + int blkaddr, schq, link = -1; + struct nix_txsch *txsch; + u64 cfg, lmac_fifo_len; + struct nix_hw *nix_hw; + u8 cgx = 0, lmac = 0; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return -EINVAL; + + if (!req->sdp_link && req->maxlen > NIC_HW_MAX_FRS) + return NIX_AF_ERR_FRS_INVALID; + + if (req->update_minlen && req->minlen < NIC_HW_MIN_FRS) + return NIX_AF_ERR_FRS_INVALID; + + /* Check if requester wants to update SMQ's */ + if (!req->update_smq) + goto rx_frscfg; + + /* Update min/maxlen in each of the SMQ attached to this PF/VF */ + txsch = &nix_hw->txsch[NIX_TXSCH_LVL_SMQ]; + mutex_lock(&rvu->rsrc_lock); + for (schq = 0; schq < txsch->schq.max; schq++) { + if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) + continue; + cfg = rvu_read64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq)); + cfg = (cfg & ~(0xFFFFULL << 8)) | ((u64)req->maxlen << 8); + if (req->update_minlen) + cfg = (cfg & ~0x7FULL) | ((u64)req->minlen & 0x7F); + rvu_write64(rvu, blkaddr, NIX_AF_SMQX_CFG(schq), cfg); + } + mutex_unlock(&rvu->rsrc_lock); + +rx_frscfg: + /* Check if config is for SDP link */ + if (req->sdp_link) { + if (!hw->sdp_links) + return NIX_AF_ERR_RX_LINK_INVALID; + link = hw->cgx_links + hw->lbk_links; + goto linkcfg; + } + + /* Check if the request is from CGX mapped RVU PF */ + if (is_pf_cgxmapped(rvu, pf)) { + /* Get CGX and LMAC to which this PF is mapped and find link */ + rvu_get_cgx_lmac_id(rvu->pf2cgxlmac_map[pf], &cgx, &lmac); + link = (cgx * hw->lmac_per_cgx) + lmac; + } else if (pf == 0) { + /* For VFs of PF0 ingress is LBK port, so config LBK link */ + link = hw->cgx_links; + } + + if (link < 0) + return NIX_AF_ERR_RX_LINK_INVALID; + + nix_find_link_frs(rvu, req, pcifunc); + +linkcfg: + cfg = rvu_read64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link)); + cfg = (cfg & ~(0xFFFFULL << 16)) | ((u64)req->maxlen << 16); + if (req->update_minlen) + cfg = (cfg & ~0xFFFFULL) | req->minlen; + rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), cfg); + + if (req->sdp_link || pf == 0) + return 0; + + /* Update transmit credits for CGX links */ + lmac_fifo_len = + CGX_FIFO_LEN / cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); + cfg = rvu_read64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link)); + cfg &= ~(0xFFFFFULL << 12); + cfg |= ((lmac_fifo_len - req->maxlen) / 16) << 12; + rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_NORM_CREDIT(link), cfg); + rvu_write64(rvu, blkaddr, NIX_AF_TX_LINKX_EXPR_CREDIT(link), cfg); + + return 0; +} + +int rvu_mbox_handler_nix_rxvlan_alloc(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + struct npc_mcam_alloc_entry_req alloc_req = { }; + struct npc_mcam_alloc_entry_rsp alloc_rsp = { }; + struct npc_mcam_free_entry_req free_req = { }; + u16 pcifunc = req->hdr.pcifunc; + int blkaddr, nixlf, err; + struct rvu_pfvf *pfvf; + + /* LBK VFs do not have separate MCAM UCAST entry hence + * skip allocating rxvlan for them + */ + if (is_afvf(pcifunc)) + return 0; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + if (pfvf->rxvlan) + return 0; + + /* alloc new mcam entry */ + alloc_req.hdr.pcifunc = pcifunc; + alloc_req.count = 1; + + err = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req, + &alloc_rsp); + if (err) + return err; + + /* update entry to enable rxvlan offload */ + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (blkaddr < 0) { + err = NIX_AF_ERR_AF_LF_INVALID; + goto free_entry; + } + + nixlf = rvu_get_lf(rvu, &rvu->hw->block[blkaddr], pcifunc, 0); + if (nixlf < 0) { + err = NIX_AF_ERR_AF_LF_INVALID; + goto free_entry; + } + + pfvf->rxvlan_index = alloc_rsp.entry_list[0]; + /* all it means is that rxvlan_index is valid */ + pfvf->rxvlan = true; + + err = rvu_npc_update_rxvlan(rvu, pcifunc, nixlf); + if (err) + goto free_entry; + + return 0; +free_entry: + free_req.hdr.pcifunc = pcifunc; + free_req.entry = alloc_rsp.entry_list[0]; + rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, rsp); + pfvf->rxvlan = false; + return err; +} + +int rvu_mbox_handler_nix_set_rx_cfg(struct rvu *rvu, struct nix_rx_cfg *req, + struct msg_rsp *rsp) +{ + struct rvu_hwinfo *hw = rvu->hw; + u16 pcifunc = req->hdr.pcifunc; + struct rvu_block *block; + struct rvu_pfvf *pfvf; + int nixlf, blkaddr; + u64 cfg; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (!pfvf->nixlf || blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + block = &hw->block[blkaddr]; + nixlf = rvu_get_lf(rvu, block, pcifunc, 0); + if (nixlf < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + cfg = rvu_read64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf)); + /* Set the interface configuration */ + if (req->len_verify & BIT(0)) + cfg |= BIT_ULL(41); + else + cfg &= ~BIT_ULL(41); + + if (req->len_verify & BIT(1)) + cfg |= BIT_ULL(40); + else + cfg &= ~BIT_ULL(40); + + if (req->csum_verify & BIT(0)) + cfg |= BIT_ULL(37); + else + cfg &= ~BIT_ULL(37); + + rvu_write64(rvu, blkaddr, NIX_AF_LFX_RX_CFG(nixlf), cfg); + return 0; } +static void nix_link_config(struct rvu *rvu, int blkaddr) +{ + struct rvu_hwinfo *hw = rvu->hw; + int cgx, lmac_cnt, slink, link; + u64 tx_credits; + + /* Set default min/max packet lengths allowed on NIX Rx links. + * + * With HW reset minlen value of 60byte, HW will treat ARP pkts + * as undersize and report them to SW as error pkts, hence + * setting it to 40 bytes. + */ + for (link = 0; link < (hw->cgx_links + hw->lbk_links); link++) { + rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), + NIC_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS); + } + + if (hw->sdp_links) { + link = hw->cgx_links + hw->lbk_links; + rvu_write64(rvu, blkaddr, NIX_AF_RX_LINKX_CFG(link), + SDP_HW_MAX_FRS << 16 | NIC_HW_MIN_FRS); + } + + /* Set credits for Tx links assuming max packet length allowed. + * This will be reconfigured based on MTU set for PF/VF. + */ + for (cgx = 0; cgx < hw->cgx; cgx++) { + lmac_cnt = cgx_get_lmac_cnt(rvu_cgx_pdata(cgx, rvu)); + tx_credits = ((CGX_FIFO_LEN / lmac_cnt) - NIC_HW_MAX_FRS) / 16; + /* Enable credits and set credit pkt count to max allowed */ + tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); + slink = cgx * hw->lmac_per_cgx; + for (link = slink; link < (slink + lmac_cnt); link++) { + rvu_write64(rvu, blkaddr, + NIX_AF_TX_LINKX_NORM_CREDIT(link), + tx_credits); + rvu_write64(rvu, blkaddr, + NIX_AF_TX_LINKX_EXPR_CREDIT(link), + tx_credits); + } + } + + /* Set Tx credits for LBK link */ + slink = hw->cgx_links; + for (link = slink; link < (slink + hw->lbk_links); link++) { + tx_credits = 1000; /* 10 * max LBK datarate = 10 * 100Gbps */ + /* Enable credits and set credit pkt count to max allowed */ + tx_credits = (tx_credits << 12) | (0x1FF << 2) | BIT_ULL(1); + rvu_write64(rvu, blkaddr, + NIX_AF_TX_LINKX_NORM_CREDIT(link), tx_credits); + rvu_write64(rvu, blkaddr, + NIX_AF_TX_LINKX_EXPR_CREDIT(link), tx_credits); + } +} + static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr) { int idx, err; @@ -1796,8 +2628,10 @@ static int nix_calibrate_x2p(struct rvu *rvu, int blkaddr) status = rvu_read64(rvu, blkaddr, NIX_AF_STATUS); /* Check if CGX devices are ready */ - for (idx = 0; idx < cgx_get_cgx_cnt(); idx++) { - if (status & (BIT_ULL(16 + idx))) + for (idx = 0; idx < rvu->cgx_cnt_max; idx++) { + /* Skip when cgx port is not available */ + if (!rvu_cgx_pdata(idx, rvu) || + (status & (BIT_ULL(16 + idx)))) continue; dev_err(rvu->dev, "CGX%d didn't respond to NIX X2P calibration\n", idx); @@ -1830,10 +2664,10 @@ static int nix_aq_init(struct rvu *rvu, struct rvu_block *block) /* Set admin queue endianness */ cfg = rvu_read64(rvu, block->addr, NIX_AF_CFG); #ifdef __BIG_ENDIAN - cfg |= BIT_ULL(1); + cfg |= BIT_ULL(8); rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); #else - cfg &= ~BIT_ULL(1); + cfg &= ~BIT_ULL(8); rvu_write64(rvu, block->addr, NIX_AF_CFG, cfg); #endif @@ -1870,6 +2704,14 @@ int rvu_nix_init(struct rvu *rvu) return 0; block = &hw->block[blkaddr]; + /* As per a HW errata in 9xxx A0 silicon, NIX may corrupt + * internal state when conditional clocks are turned off. + * Hence enable them. + */ + if (is_rvu_9xxx_A0(rvu)) + rvu_write64(rvu, blkaddr, NIX_AF_CFG, + rvu_read64(rvu, blkaddr, NIX_AF_CFG) | 0x5EULL); + /* Calibrate X2P bus to check if CGX/LBK links are fine */ err = nix_calibrate_x2p(rvu, blkaddr); if (err) @@ -1891,9 +2733,6 @@ int rvu_nix_init(struct rvu *rvu) /* Restore CINT timer delay to HW reset values */ rvu_write64(rvu, blkaddr, NIX_AF_CINT_DELAY, 0x0ULL); - /* Configure segmentation offload formats */ - nix_setup_lso(rvu, blkaddr); - if (blkaddr == BLKADDR_NIX0) { hw->nix0 = devm_kzalloc(rvu->dev, sizeof(struct nix_hw), GFP_KERNEL); @@ -1904,24 +2743,51 @@ int rvu_nix_init(struct rvu *rvu) if (err) return err; + err = nix_af_mark_format_setup(rvu, hw->nix0, blkaddr); + if (err) + return err; + err = nix_setup_mcast(rvu, hw->nix0, blkaddr); if (err) return err; - /* Config Outer L2, IP, TCP and UDP's NPC layer info. + /* Configure segmentation offload formats */ + nix_setup_lso(rvu, hw->nix0, blkaddr); + + /* Config Outer/Inner L2, IP, TCP, UDP and SCTP NPC layer info. * This helps HW protocol checker to identify headers * and validate length and checksums. */ rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OL2, (NPC_LID_LA << 8) | (NPC_LT_LA_ETHER << 4) | 0x0F); - rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP, - (NPC_LID_LD << 8) | (NPC_LT_LD_UDP << 4) | 0x0F); - rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP, - (NPC_LID_LD << 8) | (NPC_LT_LD_TCP << 4) | 0x0F); rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP4, (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F); + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP4, + (NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP << 4) | 0x0F); + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OIP6, + (NPC_LID_LC << 8) | (NPC_LT_LC_IP6 << 4) | 0x0F); + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IIP6, + (NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP6 << 4) | 0x0F); + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OTCP, + (NPC_LID_LD << 8) | (NPC_LT_LD_TCP << 4) | 0x0F); + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ITCP, + (NPC_LID_LG << 8) | (NPC_LT_LG_TU_TCP << 4) | 0x0F); + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OUDP, + (NPC_LID_LD << 8) | (NPC_LT_LD_UDP << 4) | 0x0F); + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_IUDP, + (NPC_LID_LG << 8) | (NPC_LT_LG_TU_UDP << 4) | 0x0F); + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_OSCTP, + (NPC_LID_LD << 8) | (NPC_LT_LD_SCTP << 4) | 0x0F); + rvu_write64(rvu, blkaddr, NIX_AF_RX_DEF_ISCTP, + (NPC_LID_LG << 8) | (NPC_LT_LG_TU_SCTP << 4) | + 0x0F); + + err = nix_rx_flowkey_alg_cfg(rvu, blkaddr); + if (err) + return err; - nix_rx_flowkey_alg_cfg(rvu, blkaddr); + /* Initialize CGX/LBK/SDP link credits, min/max pkt lengths */ + nix_link_config(rvu, blkaddr); } return 0; } @@ -1955,5 +2821,139 @@ void rvu_nix_freemem(struct rvu *rvu) mcast = &nix_hw->mcast; qmem_free(rvu->dev, mcast->mce_ctx); qmem_free(rvu->dev, mcast->mcast_buf); + mutex_destroy(&mcast->mce_lock); + } +} + +static int nix_get_nixlf(struct rvu *rvu, u16 pcifunc, int *nixlf) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + struct rvu_hwinfo *hw = rvu->hw; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (!pfvf->nixlf || blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + *nixlf = rvu_get_lf(rvu, &hw->block[blkaddr], pcifunc, 0); + if (*nixlf < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + return 0; +} + +int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + int nixlf, err; + + err = nix_get_nixlf(rvu, pcifunc, &nixlf); + if (err) + return err; + + rvu_npc_enable_default_entries(rvu, pcifunc, nixlf); + return 0; +} + +int rvu_mbox_handler_nix_lf_stop_rx(struct rvu *rvu, struct msg_req *req, + struct msg_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + int nixlf, err; + + err = nix_get_nixlf(rvu, pcifunc, &nixlf); + if (err) + return err; + + rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); + return 0; +} + +void rvu_nix_lf_teardown(struct rvu *rvu, u16 pcifunc, int blkaddr, int nixlf) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + struct hwctx_disable_req ctx_req; + int err; + + ctx_req.hdr.pcifunc = pcifunc; + + /* Cleanup NPC MCAM entries, free Tx scheduler queues being used */ + nix_interface_deinit(rvu, pcifunc, nixlf); + nix_rx_sync(rvu, blkaddr); + nix_txschq_free(rvu, pcifunc); + + if (pfvf->sq_ctx) { + ctx_req.ctype = NIX_AQ_CTYPE_SQ; + err = nix_lf_hwctx_disable(rvu, &ctx_req); + if (err) + dev_err(rvu->dev, "SQ ctx disable failed\n"); + } + + if (pfvf->rq_ctx) { + ctx_req.ctype = NIX_AQ_CTYPE_RQ; + err = nix_lf_hwctx_disable(rvu, &ctx_req); + if (err) + dev_err(rvu->dev, "RQ ctx disable failed\n"); + } + + if (pfvf->cq_ctx) { + ctx_req.ctype = NIX_AQ_CTYPE_CQ; + err = nix_lf_hwctx_disable(rvu, &ctx_req); + if (err) + dev_err(rvu->dev, "CQ ctx disable failed\n"); } + + nix_ctx_free(rvu, pfvf); +} + +int rvu_mbox_handler_nix_lso_format_cfg(struct rvu *rvu, + struct nix_lso_format_cfg *req, + struct nix_lso_format_cfg_rsp *rsp) +{ + u16 pcifunc = req->hdr.pcifunc; + struct nix_hw *nix_hw; + struct rvu_pfvf *pfvf; + int blkaddr, idx, f; + u64 reg; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, pcifunc); + if (!pfvf->nixlf || blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + nix_hw = get_nix_hw(rvu->hw, blkaddr); + if (!nix_hw) + return -EINVAL; + + /* Find existing matching LSO format, if any */ + for (idx = 0; idx < nix_hw->lso.in_use; idx++) { + for (f = 0; f < NIX_LSO_FIELD_MAX; f++) { + reg = rvu_read64(rvu, blkaddr, + NIX_AF_LSO_FORMATX_FIELDX(idx, f)); + if (req->fields[f] != (reg & req->field_mask)) + break; + } + + if (f == NIX_LSO_FIELD_MAX) + break; + } + + if (idx < nix_hw->lso.in_use) { + /* Match found */ + rsp->lso_format_idx = idx; + return 0; + } + + if (nix_hw->lso.in_use == nix_hw->lso.total) + return NIX_AF_ERR_LSO_CFG_FAIL; + + rsp->lso_format_idx = nix_hw->lso.in_use++; + + for (f = 0; f < NIX_LSO_FIELD_MAX; f++) + rvu_write64(rvu, blkaddr, + NIX_AF_LSO_FORMATX_FIELDX(rsp->lso_format_idx, f), + req->fields[f]); + + return 0; } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c index 7531fdc54fa1..c0e165dfc403 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npa.c @@ -241,14 +241,14 @@ static int npa_lf_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req) return err; } -int rvu_mbox_handler_NPA_AQ_ENQ(struct rvu *rvu, +int rvu_mbox_handler_npa_aq_enq(struct rvu *rvu, struct npa_aq_enq_req *req, struct npa_aq_enq_rsp *rsp) { return rvu_npa_aq_enq_inst(rvu, req, rsp); } -int rvu_mbox_handler_NPA_HWCTX_DISABLE(struct rvu *rvu, +int rvu_mbox_handler_npa_hwctx_disable(struct rvu *rvu, struct hwctx_disable_req *req, struct msg_rsp *rsp) { @@ -273,7 +273,7 @@ static void npa_ctx_free(struct rvu *rvu, struct rvu_pfvf *pfvf) pfvf->npa_qints_ctx = NULL; } -int rvu_mbox_handler_NPA_LF_ALLOC(struct rvu *rvu, +int rvu_mbox_handler_npa_lf_alloc(struct rvu *rvu, struct npa_lf_alloc_req *req, struct npa_lf_alloc_rsp *rsp) { @@ -372,7 +372,7 @@ exit: return rc; } -int rvu_mbox_handler_NPA_LF_FREE(struct rvu *rvu, struct msg_req *req, +int rvu_mbox_handler_npa_lf_free(struct rvu *rvu, struct msg_req *req, struct msg_rsp *rsp) { struct rvu_hwinfo *hw = rvu->hw; @@ -470,3 +470,20 @@ void rvu_npa_freemem(struct rvu *rvu) block = &hw->block[blkaddr]; rvu_aq_free(rvu, block->aq); } + +void rvu_npa_lf_teardown(struct rvu *rvu, u16 pcifunc, int npalf) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + struct hwctx_disable_req ctx_req; + + /* Disable all pools */ + ctx_req.hdr.pcifunc = pcifunc; + ctx_req.ctype = NPA_AQ_CTYPE_POOL; + npa_lf_hwctx_disable(rvu, &ctx_req); + + /* Disable all auras */ + ctx_req.ctype = NPA_AQ_CTYPE_AURA; + npa_lf_hwctx_disable(rvu, &ctx_req); + + npa_ctx_free(rvu, pfvf); +} diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 23ff47f7efc5..15f70273e29c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -8,6 +8,7 @@ * published by the Free Software Foundation. */ +#include <linux/bitfield.h> #include <linux/module.h> #include <linux/pci.h> @@ -15,6 +16,7 @@ #include "rvu_reg.h" #include "rvu.h" #include "npc.h" +#include "cgx.h" #include "npc_profile.h" #define RSVD_MCAM_ENTRIES_PER_PF 2 /* Bcast & Promisc */ @@ -26,13 +28,10 @@ #define NPC_PARSE_RESULT_DMAC_OFFSET 8 -struct mcam_entry { -#define NPC_MAX_KWS_IN_KEY 7 /* Number of keywords in max keywidth */ - u64 kw[NPC_MAX_KWS_IN_KEY]; - u64 kw_mask[NPC_MAX_KWS_IN_KEY]; - u64 action; - u64 vtag_action; -}; +static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam, + int blkaddr, u16 pcifunc); +static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam, + u16 pcifunc); void rvu_npc_set_pkind(struct rvu *rvu, int pkind, struct rvu_pfvf *pfvf) { @@ -256,6 +255,46 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, npc_enable_mcam_entry(rvu, mcam, blkaddr, actindex, false); } +static void npc_copy_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, + int blkaddr, u16 src, u16 dest) +{ + int dbank = npc_get_bank(mcam, dest); + int sbank = npc_get_bank(mcam, src); + u64 cfg, sreg, dreg; + int bank, i; + + src &= (mcam->banksize - 1); + dest &= (mcam->banksize - 1); + + /* Copy INTF's, W0's, W1's CAM0 and CAM1 configuration */ + for (bank = 0; bank < mcam->banks_per_entry; bank++) { + sreg = NPC_AF_MCAMEX_BANKX_CAMX_INTF(src, sbank + bank, 0); + dreg = NPC_AF_MCAMEX_BANKX_CAMX_INTF(dest, dbank + bank, 0); + for (i = 0; i < 6; i++) { + cfg = rvu_read64(rvu, blkaddr, sreg + (i * 8)); + rvu_write64(rvu, blkaddr, dreg + (i * 8), cfg); + } + } + + /* Copy action */ + cfg = rvu_read64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_ACTION(src, sbank)); + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_ACTION(dest, dbank), cfg); + + /* Copy TAG action */ + cfg = rvu_read64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_TAG_ACT(src, sbank)); + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_TAG_ACT(dest, dbank), cfg); + + /* Enable or disable */ + cfg = rvu_read64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CFG(src, sbank)); + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CFG(dest, dbank), cfg); +} + static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam, int blkaddr, int index) { @@ -269,12 +308,17 @@ static u64 npc_get_mcam_action(struct rvu *rvu, struct npc_mcam *mcam, void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u64 chan, u8 *mac_addr) { + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); struct npc_mcam *mcam = &rvu->hw->mcam; struct mcam_entry entry = { {0} }; struct nix_rx_action action; int blkaddr, index, kwi; u64 mac = 0; + /* AF's VFs work in promiscuous mode */ + if (is_afvf(pcifunc)) + return; + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return; @@ -308,22 +352,33 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, entry.action = *(u64 *)&action; npc_config_mcam_entry(rvu, mcam, blkaddr, index, NIX_INTF_RX, &entry, true); + + /* add VLAN matching, setup action and save entry back for later */ + entry.kw[0] |= (NPC_LT_LB_STAG | NPC_LT_LB_CTAG) << 20; + entry.kw_mask[0] |= (NPC_LT_LB_STAG & NPC_LT_LB_CTAG) << 20; + + entry.vtag_action = VTAG0_VALID_BIT | + FIELD_PREP(VTAG0_TYPE_MASK, 0) | + FIELD_PREP(VTAG0_LID_MASK, NPC_LID_LA) | + FIELD_PREP(VTAG0_RELPTR_MASK, 12); + + memcpy(&pfvf->entry, &entry, sizeof(entry)); } void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf, u64 chan, bool allmulti) { struct npc_mcam *mcam = &rvu->hw->mcam; + int blkaddr, ucast_idx, index, kwi; struct mcam_entry entry = { {0} }; - struct nix_rx_action action; - int blkaddr, index, kwi; + struct nix_rx_action action = { }; - blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); - if (blkaddr < 0) + /* Only PF or AF VF can add a promiscuous entry */ + if ((pcifunc & RVU_PFVF_FUNC_MASK) && !is_afvf(pcifunc)) return; - /* Only PF or AF VF can add a promiscuous entry */ - if (pcifunc & RVU_PFVF_FUNC_MASK) + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) return; index = npc_get_nixlf_mcam_index(mcam, pcifunc, @@ -338,16 +393,29 @@ void rvu_npc_install_promisc_entry(struct rvu *rvu, u16 pcifunc, entry.kw_mask[kwi] = BIT_ULL(40); } - *(u64 *)&action = 0x00; - action.op = NIX_RX_ACTIONOP_UCAST; - action.pf_func = pcifunc; + ucast_idx = npc_get_nixlf_mcam_index(mcam, pcifunc, + nixlf, NIXLF_UCAST_ENTRY); + + /* If the corresponding PF's ucast action is RSS, + * use the same action for promisc also + */ + if (is_mcam_entry_enabled(rvu, mcam, blkaddr, ucast_idx)) + *(u64 *)&action = npc_get_mcam_action(rvu, mcam, + blkaddr, ucast_idx); + + if (action.op != NIX_RX_ACTIONOP_RSS) { + *(u64 *)&action = 0x00; + action.op = NIX_RX_ACTIONOP_UCAST; + action.pf_func = pcifunc; + } entry.action = *(u64 *)&action; npc_config_mcam_entry(rvu, mcam, blkaddr, index, NIX_INTF_RX, &entry, true); } -void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf) +static void npc_enadis_promisc_entry(struct rvu *rvu, u16 pcifunc, + int nixlf, bool enable) { struct npc_mcam *mcam = &rvu->hw->mcam; int blkaddr, index; @@ -362,7 +430,17 @@ void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf) index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, NIXLF_PROMISC_ENTRY); - npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false); + npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); +} + +void rvu_npc_disable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf) +{ + npc_enadis_promisc_entry(rvu, pcifunc, nixlf, false); +} + +void rvu_npc_enable_promisc_entry(struct rvu *rvu, u16 pcifunc, int nixlf) +{ + npc_enadis_promisc_entry(rvu, pcifunc, nixlf, true); } void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, @@ -390,9 +468,28 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, NIXLF_BCAST_ENTRY); - /* Check for L2B bit and LMAC channel */ - entry.kw[0] = BIT_ULL(25) | chan; - entry.kw_mask[0] = BIT_ULL(25) | 0xFFFULL; + /* Check for L2B bit and LMAC channel + * NOTE: Since MKEX default profile(a reduced version intended to + * accommodate more capability but igoring few bits) a stap-gap + * approach. + * Since we care for L2B which by HRM NPC_PARSE_KEX_S at BIT_POS[25], So + * moved to BIT_POS[13], ignoring ERRCODE, ERRLEV as we'll loose out + * on capability features needed for CoS (/from ODP PoV) e.g: VLAN, + * DSCP. + * + * Reduced layout of MKEX default profile - + * Includes following are (i.e.CHAN, L2/3{B/M}, LA, LB, LC, LD): + * + * BIT_POS[31:28] : LD + * BIT_POS[27:24] : LC + * BIT_POS[23:20] : LB + * BIT_POS[19:16] : LA + * BIT_POS[15:12] : L3B, L3M, L2B, L2M + * BIT_POS[11:00] : CHAN + * + */ + entry.kw[0] = BIT_ULL(13) | chan; + entry.kw_mask[0] = BIT_ULL(13) | 0xFFFULL; *(u64 *)&action = 0x00; #ifdef MCAST_MCE @@ -454,51 +551,110 @@ void rvu_npc_update_flowkey_alg_idx(struct rvu *rvu, u16 pcifunc, int nixlf, rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_ACTION(index, bank), *(u64 *)&action); + + index = npc_get_nixlf_mcam_index(mcam, pcifunc, + nixlf, NIXLF_PROMISC_ENTRY); + + /* If PF's promiscuous entry is enabled, + * Set RSS action for that entry as well + */ + if (is_mcam_entry_enabled(rvu, mcam, blkaddr, index)) { + bank = npc_get_bank(mcam, index); + index &= (mcam->banksize - 1); + + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_ACTION(index, bank), + *(u64 *)&action); + } + + rvu_npc_update_rxvlan(rvu, pcifunc, nixlf); } -void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf) +static void npc_enadis_default_entries(struct rvu *rvu, u16 pcifunc, + int nixlf, bool enable) { struct npc_mcam *mcam = &rvu->hw->mcam; struct nix_rx_action action; - int blkaddr, index, bank; + int index, bank, blkaddr; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) return; - /* Disable ucast MCAM match entry of this PF/VF */ + /* Ucast MCAM match entry of this PF/VF */ index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, NIXLF_UCAST_ENTRY); - npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false); + npc_enable_mcam_entry(rvu, mcam, blkaddr, index, enable); - /* For PF, disable promisc and bcast MCAM match entries */ - if (!(pcifunc & RVU_PFVF_FUNC_MASK)) { - index = npc_get_nixlf_mcam_index(mcam, pcifunc, - nixlf, NIXLF_BCAST_ENTRY); - /* For bcast, disable only if it's action is not - * packet replication, incase if action is replication - * then this PF's nixlf is removed from bcast replication - * list. - */ - bank = npc_get_bank(mcam, index); - index &= (mcam->banksize - 1); - *(u64 *)&action = rvu_read64(rvu, blkaddr, - NPC_AF_MCAMEX_BANKX_ACTION(index, bank)); - if (action.op != NIX_RX_ACTIONOP_MCAST) - npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false); + /* For PF, ena/dis promisc and bcast MCAM match entries */ + if (pcifunc & RVU_PFVF_FUNC_MASK) + return; + /* For bcast, enable/disable only if it's action is not + * packet replication, incase if action is replication + * then this PF's nixlf is removed from bcast replication + * list. + */ + index = npc_get_nixlf_mcam_index(mcam, pcifunc, + nixlf, NIXLF_BCAST_ENTRY); + bank = npc_get_bank(mcam, index); + *(u64 *)&action = rvu_read64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_ACTION(index & (mcam->banksize - 1), bank)); + if (action.op != NIX_RX_ACTIONOP_MCAST) + npc_enable_mcam_entry(rvu, mcam, + blkaddr, index, enable); + if (enable) + rvu_npc_enable_promisc_entry(rvu, pcifunc, nixlf); + else rvu_npc_disable_promisc_entry(rvu, pcifunc, nixlf); - } + + rvu_npc_update_rxvlan(rvu, pcifunc, nixlf); +} + +void rvu_npc_disable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf) +{ + npc_enadis_default_entries(rvu, pcifunc, nixlf, false); +} + +void rvu_npc_enable_default_entries(struct rvu *rvu, u16 pcifunc, int nixlf) +{ + npc_enadis_default_entries(rvu, pcifunc, nixlf, true); +} + +void rvu_npc_disable_mcam_entries(struct rvu *rvu, u16 pcifunc, int nixlf) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return; + + mutex_lock(&mcam->lock); + + /* Disable and free all MCAM entries mapped to this 'pcifunc' */ + npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc); + + /* Free all MCAM counters mapped to this 'pcifunc' */ + npc_mcam_free_all_counters(rvu, mcam, pcifunc); + + mutex_unlock(&mcam->lock); + + rvu_npc_disable_default_entries(rvu, pcifunc, nixlf); } -#define LDATA_EXTRACT_CONFIG(intf, lid, ltype, ld, cfg) \ +#define SET_KEX_LD(intf, lid, ltype, ld, cfg) \ rvu_write64(rvu, blkaddr, \ NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, ltype, ld), cfg) -#define LDATA_FLAGS_CONFIG(intf, ld, flags, cfg) \ +#define SET_KEX_LDFLAGS(intf, ld, flags, cfg) \ rvu_write64(rvu, blkaddr, \ NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, flags), cfg) +#define KEX_LD_CFG(bytesm1, hdr_ofs, ena, flags_ena, key_ofs) \ + (((bytesm1) << 16) | ((hdr_ofs) << 8) | ((ena) << 7) | \ + ((flags_ena) << 6) | ((key_ofs) & 0x3F)) + static void npc_config_ldata_extract(struct rvu *rvu, int blkaddr) { struct npc_mcam *mcam = &rvu->hw->mcam; @@ -514,28 +670,171 @@ static void npc_config_ldata_extract(struct rvu *rvu, int blkaddr) */ for (lid = 0; lid < lid_count; lid++) { for (ltype = 0; ltype < 16; ltype++) { - LDATA_EXTRACT_CONFIG(NIX_INTF_RX, lid, ltype, 0, 0ULL); - LDATA_EXTRACT_CONFIG(NIX_INTF_RX, lid, ltype, 1, 0ULL); - LDATA_EXTRACT_CONFIG(NIX_INTF_TX, lid, ltype, 0, 0ULL); - LDATA_EXTRACT_CONFIG(NIX_INTF_TX, lid, ltype, 1, 0ULL); - - LDATA_FLAGS_CONFIG(NIX_INTF_RX, 0, ltype, 0ULL); - LDATA_FLAGS_CONFIG(NIX_INTF_RX, 1, ltype, 0ULL); - LDATA_FLAGS_CONFIG(NIX_INTF_TX, 0, ltype, 0ULL); - LDATA_FLAGS_CONFIG(NIX_INTF_TX, 1, ltype, 0ULL); + SET_KEX_LD(NIX_INTF_RX, lid, ltype, 0, 0ULL); + SET_KEX_LD(NIX_INTF_RX, lid, ltype, 1, 0ULL); + SET_KEX_LD(NIX_INTF_TX, lid, ltype, 0, 0ULL); + SET_KEX_LD(NIX_INTF_TX, lid, ltype, 1, 0ULL); + + SET_KEX_LDFLAGS(NIX_INTF_RX, 0, ltype, 0ULL); + SET_KEX_LDFLAGS(NIX_INTF_RX, 1, ltype, 0ULL); + SET_KEX_LDFLAGS(NIX_INTF_TX, 0, ltype, 0ULL); + SET_KEX_LDFLAGS(NIX_INTF_TX, 1, ltype, 0ULL); } } - /* If we plan to extract Outer IPv4 tuple for TCP/UDP pkts - * then 112bit key is not sufficient - */ if (mcam->keysize != NPC_MCAM_KEY_X2) return; - /* Start placing extracted data/flags from 64bit onwards, for now */ - /* Extract DMAC from the packet */ - cfg = (0x05 << 16) | BIT_ULL(7) | NPC_PARSE_RESULT_DMAC_OFFSET; - LDATA_EXTRACT_CONFIG(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_ETHER, 0, cfg); + /* Default MCAM KEX profile */ + /* Layer A: Ethernet: */ + + /* DMAC: 6 bytes, KW1[47:0] */ + cfg = KEX_LD_CFG(0x05, 0x0, 0x1, 0x0, NPC_PARSE_RESULT_DMAC_OFFSET); + SET_KEX_LD(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_ETHER, 0, cfg); + + /* Ethertype: 2 bytes, KW0[47:32] */ + cfg = KEX_LD_CFG(0x01, 0xc, 0x1, 0x0, 0x4); + SET_KEX_LD(NIX_INTF_RX, NPC_LID_LA, NPC_LT_LA_ETHER, 1, cfg); + + /* Layer B: Single VLAN (CTAG) */ + /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */ + cfg = KEX_LD_CFG(0x03, 0x0, 0x1, 0x0, 0x4); + SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_CTAG, 0, cfg); + + /* Layer B: Stacked VLAN (STAG|QinQ) */ + /* CTAG VLAN[2..3] + Ethertype, 4 bytes, KW0[63:32] */ + cfg = KEX_LD_CFG(0x03, 0x4, 0x1, 0x0, 0x4); + SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_STAG, 0, cfg); + SET_KEX_LD(NIX_INTF_RX, NPC_LID_LB, NPC_LT_LB_QINQ, 0, cfg); + + /* Layer C: IPv4 */ + /* SIP+DIP: 8 bytes, KW2[63:0] */ + cfg = KEX_LD_CFG(0x07, 0xc, 0x1, 0x0, 0x10); + SET_KEX_LD(NIX_INTF_RX, NPC_LID_LC, NPC_LT_LC_IP, 0, cfg); + /* TOS: 1 byte, KW1[63:56] */ + cfg = KEX_LD_CFG(0x0, 0x1, 0x1, 0x0, 0xf); + SET_KEX_LD(NIX_INTF_RX, NPC_LID_LC, NPC_LT_LC_IP, 1, cfg); + + /* Layer D:UDP */ + /* SPORT: 2 bytes, KW3[15:0] */ + cfg = KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18); + SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_UDP, 0, cfg); + /* DPORT: 2 bytes, KW3[31:16] */ + cfg = KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a); + SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_UDP, 1, cfg); + + /* Layer D:TCP */ + /* SPORT: 2 bytes, KW3[15:0] */ + cfg = KEX_LD_CFG(0x1, 0x0, 0x1, 0x0, 0x18); + SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_TCP, 0, cfg); + /* DPORT: 2 bytes, KW3[31:16] */ + cfg = KEX_LD_CFG(0x1, 0x2, 0x1, 0x0, 0x1a); + SET_KEX_LD(NIX_INTF_RX, NPC_LID_LD, NPC_LT_LD_TCP, 1, cfg); +} + +static void npc_program_mkex_profile(struct rvu *rvu, int blkaddr, + struct npc_mcam_kex *mkex) +{ + int lid, lt, ld, fl; + + rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX), + mkex->keyx_cfg[NIX_INTF_RX]); + rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX), + mkex->keyx_cfg[NIX_INTF_TX]); + + for (ld = 0; ld < NPC_MAX_LD; ld++) + rvu_write64(rvu, blkaddr, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld), + mkex->kex_ld_flags[ld]); + + for (lid = 0; lid < NPC_MAX_LID; lid++) { + for (lt = 0; lt < NPC_MAX_LT; lt++) { + for (ld = 0; ld < NPC_MAX_LD; ld++) { + SET_KEX_LD(NIX_INTF_RX, lid, lt, ld, + mkex->intf_lid_lt_ld[NIX_INTF_RX] + [lid][lt][ld]); + + SET_KEX_LD(NIX_INTF_TX, lid, lt, ld, + mkex->intf_lid_lt_ld[NIX_INTF_TX] + [lid][lt][ld]); + } + } + } + + for (ld = 0; ld < NPC_MAX_LD; ld++) { + for (fl = 0; fl < NPC_MAX_LFL; fl++) { + SET_KEX_LDFLAGS(NIX_INTF_RX, ld, fl, + mkex->intf_ld_flags[NIX_INTF_RX] + [ld][fl]); + + SET_KEX_LDFLAGS(NIX_INTF_TX, ld, fl, + mkex->intf_ld_flags[NIX_INTF_TX] + [ld][fl]); + } + } +} + +/* strtoull of "mkexprof" with base:36 */ +#define MKEX_SIGN 0x19bbfdbd15f +#define MKEX_END_SIGN 0xdeadbeef + +static void npc_load_mkex_profile(struct rvu *rvu, int blkaddr) +{ + const char *mkex_profile = rvu->mkex_pfl_name; + struct device *dev = &rvu->pdev->dev; + void __iomem *mkex_prfl_addr = NULL; + struct npc_mcam_kex *mcam_kex; + u64 prfl_addr; + u64 prfl_sz; + + /* If user not selected mkex profile */ + if (!strncmp(mkex_profile, "default", MKEX_NAME_LEN)) + goto load_default; + + if (cgx_get_mkex_prfl_info(&prfl_addr, &prfl_sz)) + goto load_default; + + if (!prfl_addr || !prfl_sz) + goto load_default; + + mkex_prfl_addr = ioremap_wc(prfl_addr, prfl_sz); + if (!mkex_prfl_addr) + goto load_default; + + mcam_kex = (struct npc_mcam_kex *)mkex_prfl_addr; + + while (((s64)prfl_sz > 0) && (mcam_kex->mkex_sign != MKEX_END_SIGN)) { + /* Compare with mkex mod_param name string */ + if (mcam_kex->mkex_sign == MKEX_SIGN && + !strncmp(mcam_kex->name, mkex_profile, MKEX_NAME_LEN)) { + /* Due to an errata (35786) in A0 pass silicon, + * parse nibble enable configuration has to be + * identical for both Rx and Tx interfaces. + */ + if (is_rvu_9xxx_A0(rvu) && + mcam_kex->keyx_cfg[NIX_INTF_RX] != + mcam_kex->keyx_cfg[NIX_INTF_TX]) + goto load_default; + + /* Program selected mkex profile */ + npc_program_mkex_profile(rvu, blkaddr, mcam_kex); + + goto unmap; + } + + mcam_kex++; + prfl_sz -= sizeof(struct npc_mcam_kex); + } + dev_warn(dev, "Failed to load requested profile: %s\n", + rvu->mkex_pfl_name); + +load_default: + dev_info(rvu->dev, "Using default mkex profile\n"); + /* Config packet data and flags extraction into PARSE result */ + npc_config_ldata_extract(rvu, blkaddr); + +unmap: + if (mkex_prfl_addr) + iounmap(mkex_prfl_addr); } static void npc_config_kpuaction(struct rvu *rvu, int blkaddr, @@ -690,13 +989,14 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr) { int nixlf_count = rvu_get_nixlf_count(rvu); struct npc_mcam *mcam = &rvu->hw->mcam; - int rsvd; + int rsvd, err; u64 cfg; /* Get HW limits */ cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST); mcam->banks = (cfg >> 44) & 0xF; mcam->banksize = (cfg >> 28) & 0xFFFF; + mcam->counters.max = (cfg >> 48) & 0xFFFF; /* Actual number of MCAM entries vary by entry size */ cfg = (rvu_read64(rvu, blkaddr, @@ -728,20 +1028,82 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr) return -ENOMEM; } - mcam->entries = mcam->total_entries - rsvd; - mcam->nixlf_offset = mcam->entries; + mcam->bmap_entries = mcam->total_entries - rsvd; + mcam->nixlf_offset = mcam->bmap_entries; mcam->pf_offset = mcam->nixlf_offset + nixlf_count; - spin_lock_init(&mcam->lock); + /* Allocate bitmaps for managing MCAM entries */ + mcam->bmap = devm_kcalloc(rvu->dev, BITS_TO_LONGS(mcam->bmap_entries), + sizeof(long), GFP_KERNEL); + if (!mcam->bmap) + return -ENOMEM; + + mcam->bmap_reverse = devm_kcalloc(rvu->dev, + BITS_TO_LONGS(mcam->bmap_entries), + sizeof(long), GFP_KERNEL); + if (!mcam->bmap_reverse) + return -ENOMEM; + + mcam->bmap_fcnt = mcam->bmap_entries; + + /* Alloc memory for saving entry to RVU PFFUNC allocation mapping */ + mcam->entry2pfvf_map = devm_kcalloc(rvu->dev, mcam->bmap_entries, + sizeof(u16), GFP_KERNEL); + if (!mcam->entry2pfvf_map) + return -ENOMEM; + + /* Reserve 1/8th of MCAM entries at the bottom for low priority + * allocations and another 1/8th at the top for high priority + * allocations. + */ + mcam->lprio_count = mcam->bmap_entries / 8; + if (mcam->lprio_count > BITS_PER_LONG) + mcam->lprio_count = round_down(mcam->lprio_count, + BITS_PER_LONG); + mcam->lprio_start = mcam->bmap_entries - mcam->lprio_count; + mcam->hprio_count = mcam->lprio_count; + mcam->hprio_end = mcam->hprio_count; + + /* Allocate bitmap for managing MCAM counters and memory + * for saving counter to RVU PFFUNC allocation mapping. + */ + err = rvu_alloc_bitmap(&mcam->counters); + if (err) + return err; + + mcam->cntr2pfvf_map = devm_kcalloc(rvu->dev, mcam->counters.max, + sizeof(u16), GFP_KERNEL); + if (!mcam->cntr2pfvf_map) + goto free_mem; + + /* Alloc memory for MCAM entry to counter mapping and for tracking + * counter's reference count. + */ + mcam->entry2cntr_map = devm_kcalloc(rvu->dev, mcam->bmap_entries, + sizeof(u16), GFP_KERNEL); + if (!mcam->entry2cntr_map) + goto free_mem; + + mcam->cntr_refcnt = devm_kcalloc(rvu->dev, mcam->counters.max, + sizeof(u16), GFP_KERNEL); + if (!mcam->cntr_refcnt) + goto free_mem; + + mutex_init(&mcam->lock); return 0; + +free_mem: + kfree(mcam->counters.bmap); + return -ENOMEM; } int rvu_npc_init(struct rvu *rvu) { struct npc_pkind *pkind = &rvu->hw->pkind; u64 keyz = NPC_MCAM_KEY_X2; - int blkaddr, err; + int blkaddr, entry, bank, err; + u64 cfg, nibble_ena; blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); if (blkaddr < 0) { @@ -749,6 +1111,14 @@ int rvu_npc_init(struct rvu *rvu) return -ENODEV; } + /* First disable all MCAM entries, to stop traffic towards NIXLFs */ + cfg = rvu_read64(rvu, blkaddr, NPC_AF_CONST); + for (bank = 0; bank < ((cfg >> 44) & 0xF); bank++) { + for (entry = 0; entry < ((cfg >> 28) & 0xFFFF); entry++) + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_CFG(entry, bank), 0); + } + /* Allocate resource bimap for pkind*/ pkind->rsrc.max = (rvu_read64(rvu, blkaddr, NPC_AF_CONST1) >> 12) & 0xFF; @@ -771,29 +1141,41 @@ int rvu_npc_init(struct rvu *rvu) rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_OIP4, (NPC_LID_LC << 8) | (NPC_LT_LC_IP << 4) | 0x0F); + /* Config Inner IPV4 NPC layer info */ + rvu_write64(rvu, blkaddr, NPC_AF_PCK_DEF_IIP4, + (NPC_LID_LF << 8) | (NPC_LT_LF_TU_IP << 4) | 0x0F); + /* Enable below for Rx pkts. * - Outer IPv4 header checksum validation. * - Detect outer L2 broadcast address and set NPC_RESULT_S[L2M]. + * - Inner IPv4 header checksum validation. + * - Set non zero checksum error code value */ rvu_write64(rvu, blkaddr, NPC_AF_PCK_CFG, rvu_read64(rvu, blkaddr, NPC_AF_PCK_CFG) | - BIT_ULL(6) | BIT_ULL(2)); + BIT_ULL(32) | BIT_ULL(24) | BIT_ULL(6) | + BIT_ULL(2) | BIT_ULL(1)); /* Set RX and TX side MCAM search key size. - * Also enable parse key extract nibbles suchthat except - * layer E to H, rest of the key is included for MCAM search. + * LA..LD (ltype only) + Channel */ + nibble_ena = 0x49247; rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_RX), - ((keyz & 0x3) << 32) | ((1ULL << 20) - 1)); + ((keyz & 0x3) << 32) | nibble_ena); + /* Due to an errata (35786) in A0 pass silicon, parse nibble enable + * configuration has to be identical for both Rx and Tx interfaces. + */ + if (!is_rvu_9xxx_A0(rvu)) + nibble_ena = (1ULL << 19) - 1; rvu_write64(rvu, blkaddr, NPC_AF_INTFX_KEX_CFG(NIX_INTF_TX), - ((keyz & 0x3) << 32) | ((1ULL << 20) - 1)); + ((keyz & 0x3) << 32) | nibble_ena); err = npc_mcam_rsrcs_init(rvu, blkaddr); if (err) return err; - /* Config packet data and flags extraction into PARSE result */ - npc_config_ldata_extract(rvu, blkaddr); + /* Configure MKEX profile */ + npc_load_mkex_profile(rvu, blkaddr); /* Set TX miss action to UCAST_DEFAULT i.e * transmit the packet on NIX LF SQ's default channel. @@ -811,6 +1193,1020 @@ int rvu_npc_init(struct rvu *rvu) void rvu_npc_freemem(struct rvu *rvu) { struct npc_pkind *pkind = &rvu->hw->pkind; + struct npc_mcam *mcam = &rvu->hw->mcam; kfree(pkind->rsrc.bmap); + kfree(mcam->counters.bmap); + mutex_destroy(&mcam->lock); +} + +static int npc_mcam_verify_entry(struct npc_mcam *mcam, + u16 pcifunc, int entry) +{ + /* Verify if entry is valid and if it is indeed + * allocated to the requesting PFFUNC. + */ + if (entry >= mcam->bmap_entries) + return NPC_MCAM_INVALID_REQ; + + if (pcifunc != mcam->entry2pfvf_map[entry]) + return NPC_MCAM_PERM_DENIED; + + return 0; +} + +static int npc_mcam_verify_counter(struct npc_mcam *mcam, + u16 pcifunc, int cntr) +{ + /* Verify if counter is valid and if it is indeed + * allocated to the requesting PFFUNC. + */ + if (cntr >= mcam->counters.max) + return NPC_MCAM_INVALID_REQ; + + if (pcifunc != mcam->cntr2pfvf_map[cntr]) + return NPC_MCAM_PERM_DENIED; + + return 0; +} + +static void npc_map_mcam_entry_and_cntr(struct rvu *rvu, struct npc_mcam *mcam, + int blkaddr, u16 entry, u16 cntr) +{ + u16 index = entry & (mcam->banksize - 1); + u16 bank = npc_get_bank(mcam, entry); + + /* Set mapping and increment counter's refcnt */ + mcam->entry2cntr_map[entry] = cntr; + mcam->cntr_refcnt[cntr]++; + /* Enable stats */ + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank), + BIT_ULL(9) | cntr); +} + +static void npc_unmap_mcam_entry_and_cntr(struct rvu *rvu, + struct npc_mcam *mcam, + int blkaddr, u16 entry, u16 cntr) +{ + u16 index = entry & (mcam->banksize - 1); + u16 bank = npc_get_bank(mcam, entry); + + /* Remove mapping and reduce counter's refcnt */ + mcam->entry2cntr_map[entry] = NPC_MCAM_INVALID_MAP; + mcam->cntr_refcnt[cntr]--; + /* Disable stats */ + rvu_write64(rvu, blkaddr, + NPC_AF_MCAMEX_BANKX_STAT_ACT(index, bank), 0x00); +} + +/* Sets MCAM entry in bitmap as used. Update + * reverse bitmap too. Should be called with + * 'mcam->lock' held. + */ +static void npc_mcam_set_bit(struct npc_mcam *mcam, u16 index) +{ + u16 entry, rentry; + + entry = index; + rentry = mcam->bmap_entries - index - 1; + + __set_bit(entry, mcam->bmap); + __set_bit(rentry, mcam->bmap_reverse); + mcam->bmap_fcnt--; +} + +/* Sets MCAM entry in bitmap as free. Update + * reverse bitmap too. Should be called with + * 'mcam->lock' held. + */ +static void npc_mcam_clear_bit(struct npc_mcam *mcam, u16 index) +{ + u16 entry, rentry; + + entry = index; + rentry = mcam->bmap_entries - index - 1; + + __clear_bit(entry, mcam->bmap); + __clear_bit(rentry, mcam->bmap_reverse); + mcam->bmap_fcnt++; +} + +static void npc_mcam_free_all_entries(struct rvu *rvu, struct npc_mcam *mcam, + int blkaddr, u16 pcifunc) +{ + u16 index, cntr; + + /* Scan all MCAM entries and free the ones mapped to 'pcifunc' */ + for (index = 0; index < mcam->bmap_entries; index++) { + if (mcam->entry2pfvf_map[index] == pcifunc) { + mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP; + /* Free the entry in bitmap */ + npc_mcam_clear_bit(mcam, index); + /* Disable the entry */ + npc_enable_mcam_entry(rvu, mcam, blkaddr, index, false); + + /* Update entry2counter mapping */ + cntr = mcam->entry2cntr_map[index]; + if (cntr != NPC_MCAM_INVALID_MAP) + npc_unmap_mcam_entry_and_cntr(rvu, mcam, + blkaddr, index, + cntr); + } + } +} + +static void npc_mcam_free_all_counters(struct rvu *rvu, struct npc_mcam *mcam, + u16 pcifunc) +{ + u16 cntr; + + /* Scan all MCAM counters and free the ones mapped to 'pcifunc' */ + for (cntr = 0; cntr < mcam->counters.max; cntr++) { + if (mcam->cntr2pfvf_map[cntr] == pcifunc) { + mcam->cntr2pfvf_map[cntr] = NPC_MCAM_INVALID_MAP; + mcam->cntr_refcnt[cntr] = 0; + rvu_free_rsrc(&mcam->counters, cntr); + /* This API is expected to be called after freeing + * MCAM entries, which inturn will remove + * 'entry to counter' mapping. + * No need to do it again. + */ + } + } +} + +/* Find area of contiguous free entries of size 'nr'. + * If not found return max contiguous free entries available. + */ +static u16 npc_mcam_find_zero_area(unsigned long *map, u16 size, u16 start, + u16 nr, u16 *max_area) +{ + u16 max_area_start = 0; + u16 index, next, end; + + *max_area = 0; + +again: + index = find_next_zero_bit(map, size, start); + if (index >= size) + return max_area_start; + + end = ((index + nr) >= size) ? size : index + nr; + next = find_next_bit(map, end, index); + if (*max_area < (next - index)) { + *max_area = next - index; + max_area_start = index; + } + + if (next < end) { + start = next + 1; + goto again; + } + + return max_area_start; +} + +/* Find number of free MCAM entries available + * within range i.e in between 'start' and 'end'. + */ +static u16 npc_mcam_get_free_count(unsigned long *map, u16 start, u16 end) +{ + u16 index, next; + u16 fcnt = 0; + +again: + if (start >= end) + return fcnt; + + index = find_next_zero_bit(map, end, start); + if (index >= end) + return fcnt; + + next = find_next_bit(map, end, index); + if (next <= end) { + fcnt += next - index; + start = next + 1; + goto again; + } + + fcnt += end - index; + return fcnt; +} + +static void +npc_get_mcam_search_range_priority(struct npc_mcam *mcam, + struct npc_mcam_alloc_entry_req *req, + u16 *start, u16 *end, bool *reverse) +{ + u16 fcnt; + + if (req->priority == NPC_MCAM_HIGHER_PRIO) + goto hprio; + + /* For a low priority entry allocation + * - If reference entry is not in hprio zone then + * search range: ref_entry to end. + * - If reference entry is in hprio zone and if + * request can be accomodated in non-hprio zone then + * search range: 'start of middle zone' to 'end' + * - else search in reverse, so that less number of hprio + * zone entries are allocated. + */ + + *reverse = false; + *start = req->ref_entry + 1; + *end = mcam->bmap_entries; + + if (req->ref_entry >= mcam->hprio_end) + return; + + fcnt = npc_mcam_get_free_count(mcam->bmap, + mcam->hprio_end, mcam->bmap_entries); + if (fcnt > req->count) + *start = mcam->hprio_end; + else + *reverse = true; + return; + +hprio: + /* For a high priority entry allocation, search is always + * in reverse to preserve hprio zone entries. + * - If reference entry is not in lprio zone then + * search range: 0 to ref_entry. + * - If reference entry is in lprio zone and if + * request can be accomodated in middle zone then + * search range: 'hprio_end' to 'lprio_start' + */ + + *reverse = true; + *start = 0; + *end = req->ref_entry; + + if (req->ref_entry <= mcam->lprio_start) + return; + + fcnt = npc_mcam_get_free_count(mcam->bmap, + mcam->hprio_end, mcam->lprio_start); + if (fcnt < req->count) + return; + *start = mcam->hprio_end; + *end = mcam->lprio_start; +} + +static int npc_mcam_alloc_entries(struct npc_mcam *mcam, u16 pcifunc, + struct npc_mcam_alloc_entry_req *req, + struct npc_mcam_alloc_entry_rsp *rsp) +{ + u16 entry_list[NPC_MAX_NONCONTIG_ENTRIES]; + u16 fcnt, hp_fcnt, lp_fcnt; + u16 start, end, index; + int entry, next_start; + bool reverse = false; + unsigned long *bmap; + u16 max_contig; + + mutex_lock(&mcam->lock); + + /* Check if there are any free entries */ + if (!mcam->bmap_fcnt) { + mutex_unlock(&mcam->lock); + return NPC_MCAM_ALLOC_FAILED; + } + + /* MCAM entries are divided into high priority, middle and + * low priority zones. Idea is to not allocate top and lower + * most entries as much as possible, this is to increase + * probability of honouring priority allocation requests. + * + * Two bitmaps are used for mcam entry management, + * mcam->bmap for forward search i.e '0 to mcam->bmap_entries'. + * mcam->bmap_reverse for reverse search i.e 'mcam->bmap_entries to 0'. + * + * Reverse bitmap is used to allocate entries + * - when a higher priority entry is requested + * - when available free entries are less. + * Lower priority ones out of avaialble free entries are always + * chosen when 'high vs low' question arises. + */ + + /* Get the search range for priority allocation request */ + if (req->priority) { + npc_get_mcam_search_range_priority(mcam, req, + &start, &end, &reverse); + goto alloc; + } + + /* Find out the search range for non-priority allocation request + * + * Get MCAM free entry count in middle zone. + */ + lp_fcnt = npc_mcam_get_free_count(mcam->bmap, + mcam->lprio_start, + mcam->bmap_entries); + hp_fcnt = npc_mcam_get_free_count(mcam->bmap, 0, mcam->hprio_end); + fcnt = mcam->bmap_fcnt - lp_fcnt - hp_fcnt; + + /* Check if request can be accomodated in the middle zone */ + if (fcnt > req->count) { + start = mcam->hprio_end; + end = mcam->lprio_start; + } else if ((fcnt + (hp_fcnt / 2) + (lp_fcnt / 2)) > req->count) { + /* Expand search zone from half of hprio zone to + * half of lprio zone. + */ + start = mcam->hprio_end / 2; + end = mcam->bmap_entries - (mcam->lprio_count / 2); + reverse = true; + } else { + /* Not enough free entries, search all entries in reverse, + * so that low priority ones will get used up. + */ + reverse = true; + start = 0; + end = mcam->bmap_entries; + } + +alloc: + if (reverse) { + bmap = mcam->bmap_reverse; + start = mcam->bmap_entries - start; + end = mcam->bmap_entries - end; + index = start; + start = end; + end = index; + } else { + bmap = mcam->bmap; + } + + if (req->contig) { + /* Allocate requested number of contiguous entries, if + * unsuccessful find max contiguous entries available. + */ + index = npc_mcam_find_zero_area(bmap, end, start, + req->count, &max_contig); + rsp->count = max_contig; + if (reverse) + rsp->entry = mcam->bmap_entries - index - max_contig; + else + rsp->entry = index; + } else { + /* Allocate requested number of non-contiguous entries, + * if unsuccessful allocate as many as possible. + */ + rsp->count = 0; + next_start = start; + for (entry = 0; entry < req->count; entry++) { + index = find_next_zero_bit(bmap, end, next_start); + if (index >= end) + break; + + next_start = start + (index - start) + 1; + + /* Save the entry's index */ + if (reverse) + index = mcam->bmap_entries - index - 1; + entry_list[entry] = index; + rsp->count++; + } + } + + /* If allocating requested no of entries is unsucessful, + * expand the search range to full bitmap length and retry. + */ + if (!req->priority && (rsp->count < req->count) && + ((end - start) != mcam->bmap_entries)) { + reverse = true; + start = 0; + end = mcam->bmap_entries; + goto alloc; + } + + /* For priority entry allocation requests, if allocation is + * failed then expand search to max possible range and retry. + */ + if (req->priority && rsp->count < req->count) { + if (req->priority == NPC_MCAM_LOWER_PRIO && + (start != (req->ref_entry + 1))) { + start = req->ref_entry + 1; + end = mcam->bmap_entries; + reverse = false; + goto alloc; + } else if ((req->priority == NPC_MCAM_HIGHER_PRIO) && + ((end - start) != req->ref_entry)) { + start = 0; + end = req->ref_entry; + reverse = true; + goto alloc; + } + } + + /* Copy MCAM entry indices into mbox response entry_list. + * Requester always expects indices in ascending order, so + * so reverse the list if reverse bitmap is used for allocation. + */ + if (!req->contig && rsp->count) { + index = 0; + for (entry = rsp->count - 1; entry >= 0; entry--) { + if (reverse) + rsp->entry_list[index++] = entry_list[entry]; + else + rsp->entry_list[entry] = entry_list[entry]; + } + } + + /* Mark the allocated entries as used and set nixlf mapping */ + for (entry = 0; entry < rsp->count; entry++) { + index = req->contig ? + (rsp->entry + entry) : rsp->entry_list[entry]; + npc_mcam_set_bit(mcam, index); + mcam->entry2pfvf_map[index] = pcifunc; + mcam->entry2cntr_map[index] = NPC_MCAM_INVALID_MAP; + } + + /* Update available free count in mbox response */ + rsp->free_count = mcam->bmap_fcnt; + + mutex_unlock(&mcam->lock); + return 0; +} + +int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu, + struct npc_mcam_alloc_entry_req *req, + struct npc_mcam_alloc_entry_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + u16 pcifunc = req->hdr.pcifunc; + int blkaddr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + rsp->entry = NPC_MCAM_ENTRY_INVALID; + rsp->free_count = 0; + + /* Check if ref_entry is within range */ + if (req->priority && req->ref_entry >= mcam->bmap_entries) + return NPC_MCAM_INVALID_REQ; + + /* ref_entry can't be '0' if requested priority is high. + * Can't be last entry if requested priority is low. + */ + if ((!req->ref_entry && req->priority == NPC_MCAM_HIGHER_PRIO) || + ((req->ref_entry == (mcam->bmap_entries - 1)) && + req->priority == NPC_MCAM_LOWER_PRIO)) + return NPC_MCAM_INVALID_REQ; + + /* Since list of allocated indices needs to be sent to requester, + * max number of non-contiguous entries per mbox msg is limited. + */ + if (!req->contig && req->count > NPC_MAX_NONCONTIG_ENTRIES) + return NPC_MCAM_INVALID_REQ; + + /* Alloc request from PFFUNC with no NIXLF attached should be denied */ + if (!is_nixlf_attached(rvu, pcifunc)) + return NPC_MCAM_ALLOC_DENIED; + + return npc_mcam_alloc_entries(mcam, pcifunc, req, rsp); +} + +int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu, + struct npc_mcam_free_entry_req *req, + struct msg_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + u16 pcifunc = req->hdr.pcifunc; + int blkaddr, rc = 0; + u16 cntr; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + /* Free request from PFFUNC with no NIXLF attached, ignore */ + if (!is_nixlf_attached(rvu, pcifunc)) + return NPC_MCAM_INVALID_REQ; + + mutex_lock(&mcam->lock); + + if (req->all) + goto free_all; + + rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); + if (rc) + goto exit; + + mcam->entry2pfvf_map[req->entry] = 0; + npc_mcam_clear_bit(mcam, req->entry); + npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false); + + /* Update entry2counter mapping */ + cntr = mcam->entry2cntr_map[req->entry]; + if (cntr != NPC_MCAM_INVALID_MAP) + npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, + req->entry, cntr); + + goto exit; + +free_all: + /* Free up all entries allocated to requesting PFFUNC */ + npc_mcam_free_all_entries(rvu, mcam, blkaddr, pcifunc); +exit: + mutex_unlock(&mcam->lock); + return rc; +} + +int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu, + struct npc_mcam_write_entry_req *req, + struct msg_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + u16 pcifunc = req->hdr.pcifunc; + int blkaddr, rc; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + mutex_lock(&mcam->lock); + rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); + if (rc) + goto exit; + + if (req->set_cntr && + npc_mcam_verify_counter(mcam, pcifunc, req->cntr)) { + rc = NPC_MCAM_INVALID_REQ; + goto exit; + } + + if (req->intf != NIX_INTF_RX && req->intf != NIX_INTF_TX) { + rc = NPC_MCAM_INVALID_REQ; + goto exit; + } + + npc_config_mcam_entry(rvu, mcam, blkaddr, req->entry, req->intf, + &req->entry_data, req->enable_entry); + + if (req->set_cntr) + npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, + req->entry, req->cntr); + + rc = 0; +exit: + mutex_unlock(&mcam->lock); + return rc; +} + +int rvu_mbox_handler_npc_mcam_ena_entry(struct rvu *rvu, + struct npc_mcam_ena_dis_entry_req *req, + struct msg_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + u16 pcifunc = req->hdr.pcifunc; + int blkaddr, rc; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + mutex_lock(&mcam->lock); + rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); + mutex_unlock(&mcam->lock); + if (rc) + return rc; + + npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, true); + + return 0; +} + +int rvu_mbox_handler_npc_mcam_dis_entry(struct rvu *rvu, + struct npc_mcam_ena_dis_entry_req *req, + struct msg_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + u16 pcifunc = req->hdr.pcifunc; + int blkaddr, rc; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + mutex_lock(&mcam->lock); + rc = npc_mcam_verify_entry(mcam, pcifunc, req->entry); + mutex_unlock(&mcam->lock); + if (rc) + return rc; + + npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false); + + return 0; +} + +int rvu_mbox_handler_npc_mcam_shift_entry(struct rvu *rvu, + struct npc_mcam_shift_entry_req *req, + struct npc_mcam_shift_entry_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + u16 pcifunc = req->hdr.pcifunc; + u16 old_entry, new_entry; + u16 index, cntr; + int blkaddr, rc; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + if (req->shift_count > NPC_MCAM_MAX_SHIFTS) + return NPC_MCAM_INVALID_REQ; + + mutex_lock(&mcam->lock); + for (index = 0; index < req->shift_count; index++) { + old_entry = req->curr_entry[index]; + new_entry = req->new_entry[index]; + + /* Check if both old and new entries are valid and + * does belong to this PFFUNC or not. + */ + rc = npc_mcam_verify_entry(mcam, pcifunc, old_entry); + if (rc) + break; + + rc = npc_mcam_verify_entry(mcam, pcifunc, new_entry); + if (rc) + break; + + /* new_entry should not have a counter mapped */ + if (mcam->entry2cntr_map[new_entry] != NPC_MCAM_INVALID_MAP) { + rc = NPC_MCAM_PERM_DENIED; + break; + } + + /* Disable the new_entry */ + npc_enable_mcam_entry(rvu, mcam, blkaddr, new_entry, false); + + /* Copy rule from old entry to new entry */ + npc_copy_mcam_entry(rvu, mcam, blkaddr, old_entry, new_entry); + + /* Copy counter mapping, if any */ + cntr = mcam->entry2cntr_map[old_entry]; + if (cntr != NPC_MCAM_INVALID_MAP) { + npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, + old_entry, cntr); + npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, + new_entry, cntr); + } + + /* Enable new_entry and disable old_entry */ + npc_enable_mcam_entry(rvu, mcam, blkaddr, new_entry, true); + npc_enable_mcam_entry(rvu, mcam, blkaddr, old_entry, false); + } + + /* If shift has failed then report the failed index */ + if (index != req->shift_count) { + rc = NPC_MCAM_PERM_DENIED; + rsp->failed_entry_idx = index; + } + + mutex_unlock(&mcam->lock); + return rc; +} + +int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu, + struct npc_mcam_alloc_counter_req *req, + struct npc_mcam_alloc_counter_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + u16 pcifunc = req->hdr.pcifunc; + u16 max_contig, cntr; + int blkaddr, index; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + /* If the request is from a PFFUNC with no NIXLF attached, ignore */ + if (!is_nixlf_attached(rvu, pcifunc)) + return NPC_MCAM_INVALID_REQ; + + /* Since list of allocated counter IDs needs to be sent to requester, + * max number of non-contiguous counters per mbox msg is limited. + */ + if (!req->contig && req->count > NPC_MAX_NONCONTIG_COUNTERS) + return NPC_MCAM_INVALID_REQ; + + mutex_lock(&mcam->lock); + + /* Check if unused counters are available or not */ + if (!rvu_rsrc_free_count(&mcam->counters)) { + mutex_unlock(&mcam->lock); + return NPC_MCAM_ALLOC_FAILED; + } + + rsp->count = 0; + + if (req->contig) { + /* Allocate requested number of contiguous counters, if + * unsuccessful find max contiguous entries available. + */ + index = npc_mcam_find_zero_area(mcam->counters.bmap, + mcam->counters.max, 0, + req->count, &max_contig); + rsp->count = max_contig; + rsp->cntr = index; + for (cntr = index; cntr < (index + max_contig); cntr++) { + __set_bit(cntr, mcam->counters.bmap); + mcam->cntr2pfvf_map[cntr] = pcifunc; + } + } else { + /* Allocate requested number of non-contiguous counters, + * if unsuccessful allocate as many as possible. + */ + for (cntr = 0; cntr < req->count; cntr++) { + index = rvu_alloc_rsrc(&mcam->counters); + if (index < 0) + break; + rsp->cntr_list[cntr] = index; + rsp->count++; + mcam->cntr2pfvf_map[index] = pcifunc; + } + } + + mutex_unlock(&mcam->lock); + return 0; +} + +int rvu_mbox_handler_npc_mcam_free_counter(struct rvu *rvu, + struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + u16 index, entry = 0; + int blkaddr, err; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + mutex_lock(&mcam->lock); + err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); + if (err) { + mutex_unlock(&mcam->lock); + return err; + } + + /* Mark counter as free/unused */ + mcam->cntr2pfvf_map[req->cntr] = NPC_MCAM_INVALID_MAP; + rvu_free_rsrc(&mcam->counters, req->cntr); + + /* Disable all MCAM entry's stats which are using this counter */ + while (entry < mcam->bmap_entries) { + if (!mcam->cntr_refcnt[req->cntr]) + break; + + index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry); + if (index >= mcam->bmap_entries) + break; + if (mcam->entry2cntr_map[index] != req->cntr) + continue; + + entry = index + 1; + npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, + index, req->cntr); + } + + mutex_unlock(&mcam->lock); + return 0; +} + +int rvu_mbox_handler_npc_mcam_unmap_counter(struct rvu *rvu, + struct npc_mcam_unmap_counter_req *req, struct msg_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + u16 index, entry = 0; + int blkaddr, rc; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + mutex_lock(&mcam->lock); + rc = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); + if (rc) + goto exit; + + /* Unmap the MCAM entry and counter */ + if (!req->all) { + rc = npc_mcam_verify_entry(mcam, req->hdr.pcifunc, req->entry); + if (rc) + goto exit; + npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, + req->entry, req->cntr); + goto exit; + } + + /* Disable all MCAM entry's stats which are using this counter */ + while (entry < mcam->bmap_entries) { + if (!mcam->cntr_refcnt[req->cntr]) + break; + + index = find_next_bit(mcam->bmap, mcam->bmap_entries, entry); + if (index >= mcam->bmap_entries) + break; + if (mcam->entry2cntr_map[index] != req->cntr) + continue; + + entry = index + 1; + npc_unmap_mcam_entry_and_cntr(rvu, mcam, blkaddr, + index, req->cntr); + } +exit: + mutex_unlock(&mcam->lock); + return rc; +} + +int rvu_mbox_handler_npc_mcam_clear_counter(struct rvu *rvu, + struct npc_mcam_oper_counter_req *req, struct msg_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + int blkaddr, err; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + mutex_lock(&mcam->lock); + err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); + mutex_unlock(&mcam->lock); + if (err) + return err; + + rvu_write64(rvu, blkaddr, NPC_AF_MATCH_STATX(req->cntr), 0x00); + + return 0; +} + +int rvu_mbox_handler_npc_mcam_counter_stats(struct rvu *rvu, + struct npc_mcam_oper_counter_req *req, + struct npc_mcam_oper_counter_rsp *rsp) +{ + struct npc_mcam *mcam = &rvu->hw->mcam; + int blkaddr, err; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + mutex_lock(&mcam->lock); + err = npc_mcam_verify_counter(mcam, req->hdr.pcifunc, req->cntr); + mutex_unlock(&mcam->lock); + if (err) + return err; + + rsp->stat = rvu_read64(rvu, blkaddr, NPC_AF_MATCH_STATX(req->cntr)); + rsp->stat &= BIT_ULL(48) - 1; + + return 0; +} + +int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu, + struct npc_mcam_alloc_and_write_entry_req *req, + struct npc_mcam_alloc_and_write_entry_rsp *rsp) +{ + struct npc_mcam_alloc_counter_req cntr_req; + struct npc_mcam_alloc_counter_rsp cntr_rsp; + struct npc_mcam_alloc_entry_req entry_req; + struct npc_mcam_alloc_entry_rsp entry_rsp; + struct npc_mcam *mcam = &rvu->hw->mcam; + u16 entry = NPC_MCAM_ENTRY_INVALID; + u16 cntr = NPC_MCAM_ENTRY_INVALID; + int blkaddr, rc; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NPC_MCAM_INVALID_REQ; + + if (req->intf != NIX_INTF_RX && req->intf != NIX_INTF_TX) + return NPC_MCAM_INVALID_REQ; + + /* Try to allocate a MCAM entry */ + entry_req.hdr.pcifunc = req->hdr.pcifunc; + entry_req.contig = true; + entry_req.priority = req->priority; + entry_req.ref_entry = req->ref_entry; + entry_req.count = 1; + + rc = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, + &entry_req, &entry_rsp); + if (rc) + return rc; + + if (!entry_rsp.count) + return NPC_MCAM_ALLOC_FAILED; + + entry = entry_rsp.entry; + + if (!req->alloc_cntr) + goto write_entry; + + /* Now allocate counter */ + cntr_req.hdr.pcifunc = req->hdr.pcifunc; + cntr_req.contig = true; + cntr_req.count = 1; + + rc = rvu_mbox_handler_npc_mcam_alloc_counter(rvu, &cntr_req, &cntr_rsp); + if (rc) { + /* Free allocated MCAM entry */ + mutex_lock(&mcam->lock); + mcam->entry2pfvf_map[entry] = 0; + npc_mcam_clear_bit(mcam, entry); + mutex_unlock(&mcam->lock); + return rc; + } + + cntr = cntr_rsp.cntr; + +write_entry: + mutex_lock(&mcam->lock); + npc_config_mcam_entry(rvu, mcam, blkaddr, entry, req->intf, + &req->entry_data, req->enable_entry); + + if (req->alloc_cntr) + npc_map_mcam_entry_and_cntr(rvu, mcam, blkaddr, entry, cntr); + mutex_unlock(&mcam->lock); + + rsp->entry = entry; + rsp->cntr = cntr; + + return 0; +} + +#define GET_KEX_CFG(intf) \ + rvu_read64(rvu, BLKADDR_NPC, NPC_AF_INTFX_KEX_CFG(intf)) + +#define GET_KEX_FLAGS(ld) \ + rvu_read64(rvu, BLKADDR_NPC, NPC_AF_KEX_LDATAX_FLAGS_CFG(ld)) + +#define GET_KEX_LD(intf, lid, lt, ld) \ + rvu_read64(rvu, BLKADDR_NPC, \ + NPC_AF_INTFX_LIDX_LTX_LDX_CFG(intf, lid, lt, ld)) + +#define GET_KEX_LDFLAGS(intf, ld, fl) \ + rvu_read64(rvu, BLKADDR_NPC, \ + NPC_AF_INTFX_LDATAX_FLAGSX_CFG(intf, ld, fl)) + +int rvu_mbox_handler_npc_get_kex_cfg(struct rvu *rvu, struct msg_req *req, + struct npc_get_kex_cfg_rsp *rsp) +{ + int lid, lt, ld, fl; + + rsp->rx_keyx_cfg = GET_KEX_CFG(NIX_INTF_RX); + rsp->tx_keyx_cfg = GET_KEX_CFG(NIX_INTF_TX); + for (lid = 0; lid < NPC_MAX_LID; lid++) { + for (lt = 0; lt < NPC_MAX_LT; lt++) { + for (ld = 0; ld < NPC_MAX_LD; ld++) { + rsp->intf_lid_lt_ld[NIX_INTF_RX][lid][lt][ld] = + GET_KEX_LD(NIX_INTF_RX, lid, lt, ld); + rsp->intf_lid_lt_ld[NIX_INTF_TX][lid][lt][ld] = + GET_KEX_LD(NIX_INTF_TX, lid, lt, ld); + } + } + } + for (ld = 0; ld < NPC_MAX_LD; ld++) + rsp->kex_ld_flags[ld] = GET_KEX_FLAGS(ld); + + for (ld = 0; ld < NPC_MAX_LD; ld++) { + for (fl = 0; fl < NPC_MAX_LFL; fl++) { + rsp->intf_ld_flags[NIX_INTF_RX][ld][fl] = + GET_KEX_LDFLAGS(NIX_INTF_RX, ld, fl); + rsp->intf_ld_flags[NIX_INTF_TX][ld][fl] = + GET_KEX_LDFLAGS(NIX_INTF_TX, ld, fl); + } + } + memcpy(rsp->mkex_pfl_name, rvu->mkex_pfl_name, MKEX_NAME_LEN); + return 0; +} + +int rvu_npc_update_rxvlan(struct rvu *rvu, u16 pcifunc, int nixlf) +{ + struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); + struct npc_mcam *mcam = &rvu->hw->mcam; + int blkaddr, index; + bool enable; + + blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPC, 0); + if (blkaddr < 0) + return NIX_AF_ERR_AF_LF_INVALID; + + if (!pfvf->rxvlan) + return 0; + + index = npc_get_nixlf_mcam_index(mcam, pcifunc, nixlf, + NIXLF_UCAST_ENTRY); + pfvf->entry.action = npc_get_mcam_action(rvu, mcam, blkaddr, index); + enable = is_mcam_entry_enabled(rvu, mcam, blkaddr, index); + npc_config_mcam_entry(rvu, mcam, blkaddr, pfvf->rxvlan_index, + NIX_INTF_RX, &pfvf->entry, enable); + + return 0; } diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c index 9c08c3650c02..04fd1f135011 100644 --- a/drivers/net/ethernet/marvell/skge.c +++ b/drivers/net/ethernet/marvell/skge.c @@ -3732,19 +3732,7 @@ static int skge_debug_show(struct seq_file *seq, void *v) return 0; } - -static int skge_debug_open(struct inode *inode, struct file *file) -{ - return single_open(file, skge_debug_show, inode->i_private); -} - -static const struct file_operations skge_debug_fops = { - .owner = THIS_MODULE, - .open = skge_debug_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(skge_debug); /* * Use network device events to create/remove/rename diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c index 697d9b374f5e..f3a5fa84860f 100644 --- a/drivers/net/ethernet/marvell/sky2.c +++ b/drivers/net/ethernet/marvell/sky2.c @@ -2485,13 +2485,11 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2, skb->ip_summed = re->skb->ip_summed; skb->csum = re->skb->csum; skb_copy_hash(skb, re->skb); - skb->vlan_proto = re->skb->vlan_proto; - skb->vlan_tci = re->skb->vlan_tci; + __vlan_hwaccel_copy_tag(skb, re->skb); pci_dma_sync_single_for_device(sky2->hw->pdev, re->data_addr, length, PCI_DMA_FROMDEVICE); - re->skb->vlan_proto = 0; - re->skb->vlan_tci = 0; + __vlan_hwaccel_clear_tag(re->skb); skb_clear_hash(re->skb); re->skb->ip_summed = CHECKSUM_NONE; skb_put(skb, length); @@ -4623,19 +4621,7 @@ static int sky2_debug_show(struct seq_file *seq, void *v) napi_enable(&hw->napi); return 0; } - -static int sky2_debug_open(struct inode *inode, struct file *file) -{ - return single_open(file, sky2_debug_show, inode->i_private); -} - -static const struct file_operations sky2_debug_fops = { - .owner = THIS_MODULE, - .open = sky2_debug_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(sky2_debug); /* * Use network device events to create/remove/rename diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 7dbfdac4067a..399f565dd85a 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -243,7 +243,7 @@ static void mtk_phy_link_adjust(struct net_device *dev) if (dev->phydev->asym_pause) rmt_adv |= LPA_PAUSE_ASYM; - lcl_adv = ethtool_adv_to_lcl_adv_t(dev->phydev->advertising); + lcl_adv = linkmode_adv_to_lcl_adv_t(dev->phydev->advertising); flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); if (flowctrl & FLOW_CTRL_TX) @@ -353,8 +353,9 @@ static int mtk_phy_connect(struct net_device *dev) phy_set_max_speed(dev->phydev, SPEED_1000); phy_support_asym_pause(dev->phydev); - dev->phydev->advertising = dev->phydev->supported | - ADVERTISED_Autoneg; + linkmode_copy(dev->phydev->advertising, dev->phydev->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + dev->phydev->advertising); phy_start_aneg(dev->phydev); of_node_put(np); diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c index d8e9a323122e..db909b6069b5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/cq.c @@ -144,9 +144,9 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type) } static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, - int cq_num) + int cq_num, u8 opmod) { - return mlx4_cmd(dev, mailbox->dma, cq_num, 0, + return mlx4_cmd(dev, mailbox->dma, cq_num, opmod, MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED); } @@ -287,11 +287,61 @@ static void mlx4_cq_free_icm(struct mlx4_dev *dev, int cqn) __mlx4_cq_free_icm(dev, cqn); } +static int mlx4_init_user_cqes(void *buf, int entries, int cqe_size) +{ + int entries_per_copy = PAGE_SIZE / cqe_size; + void *init_ents; + int err = 0; + int i; + + init_ents = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!init_ents) + return -ENOMEM; + + /* Populate a list of CQ entries to reduce the number of + * copy_to_user calls. 0xcc is the initialization value + * required by the FW. + */ + memset(init_ents, 0xcc, PAGE_SIZE); + + if (entries_per_copy < entries) { + for (i = 0; i < entries / entries_per_copy; i++) { + err = copy_to_user(buf, init_ents, PAGE_SIZE); + if (err) + goto out; + + buf += PAGE_SIZE; + } + } else { + err = copy_to_user(buf, init_ents, entries * cqe_size); + } + +out: + kfree(init_ents); + + return err; +} + +static void mlx4_init_kernel_cqes(struct mlx4_buf *buf, + int entries, + int cqe_size) +{ + int i; + + if (buf->nbufs == 1) + memset(buf->direct.buf, 0xcc, entries * cqe_size); + else + for (i = 0; i < buf->npages; i++) + memset(buf->page_list[i].buf, 0xcc, + 1UL << buf->page_shift); +} + int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec, struct mlx4_cq *cq, unsigned vector, int collapsed, - int timestamp_en) + int timestamp_en, void *buf_addr, bool user_cq) { + bool sw_cq_init = dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SW_CQ_INIT; struct mlx4_priv *priv = mlx4_priv(dev); struct mlx4_cq_table *cq_table = &priv->cq_table; struct mlx4_cmd_mailbox *mailbox; @@ -336,7 +386,20 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, cq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); cq_context->db_rec_addr = cpu_to_be64(db_rec); - err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn); + if (sw_cq_init) { + if (user_cq) { + err = mlx4_init_user_cqes(buf_addr, nent, + dev->caps.cqe_size); + if (err) + sw_cq_init = false; + } else { + mlx4_init_kernel_cqes(buf_addr, nent, + dev->caps.cqe_size); + } + } + + err = mlx4_SW2HW_CQ(dev, mailbox, cq->cqn, sw_cq_init); + mlx4_free_cmd_mailbox(dev, mailbox); if (err) goto err_radix; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c index 1e487acb4667..74d466796b7c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c @@ -54,11 +54,8 @@ int mlx4_en_create_cq(struct mlx4_en_priv *priv, cq = kzalloc_node(sizeof(*cq), GFP_KERNEL, node); if (!cq) { - cq = kzalloc(sizeof(*cq), GFP_KERNEL); - if (!cq) { - en_err(priv, "Failed to allocate CQ structure\n"); - return -ENOMEM; - } + en_err(priv, "Failed to allocate CQ structure\n"); + return -ENOMEM; } cq->size = entries; @@ -143,7 +140,7 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq, cq->mcq.usage = MLX4_RES_USAGE_DRIVER; err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq, - cq->vector, 0, timestamp_en); + cq->vector, 0, timestamp_en, &cq->wqres.buf, false); if (err) goto free_eq; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c index db00bf1c23f5..9a0881cb7f51 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c @@ -271,11 +271,8 @@ int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv, ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node); if (!ring) { - ring = kzalloc(sizeof(*ring), GFP_KERNEL); - if (!ring) { - en_err(priv, "Failed to allocate RX ring structure\n"); - return -ENOMEM; - } + en_err(priv, "Failed to allocate RX ring structure\n"); + return -ENOMEM; } ring->prod = 0; @@ -875,7 +872,7 @@ csum_none: skb->data_len = length; napi_gro_frags(&cq->napi); } else { - skb->vlan_tci = 0; + __vlan_hwaccel_clear_tag(skb); skb_clear_hash(skb); } next: diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c index 6f5153afcab4..2cbd2bd7c67c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c @@ -57,11 +57,8 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv, ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, node); if (!ring) { - ring = kzalloc(sizeof(*ring), GFP_KERNEL); - if (!ring) { - en_err(priv, "Failed allocating TX ring\n"); - return -ENOMEM; - } + en_err(priv, "Failed allocating TX ring\n"); + return -ENOMEM; } ring->size = size; diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index babcfd9c0571..7df728f1e5b5 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -166,6 +166,7 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) [37] = "sl to vl mapping table change event support", [38] = "user MAC support", [39] = "Report driver version to FW support", + [40] = "SW CQ initialization support", }; int i; @@ -1098,6 +1099,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_FSM; if (field32 & (1 << 21)) dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_80_VFS; + if (field32 & (1 << 23)) + dev_cap->flags2 |= MLX4_DEV_CAP_FLAG2_SW_CQ_INIT; for (i = 1; i <= dev_cap->num_ports; i++) { err = mlx4_QUERY_PORT(dev, i, dev_cap->port_cap + i); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 6a046030e873..bdb8dd161923 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -63,7 +63,7 @@ struct workqueue_struct *mlx4_wq; #ifdef CONFIG_MLX4_DEBUG -int mlx4_debug_level = 0; +int mlx4_debug_level; /* 0 by default */ module_param_named(debug_level, mlx4_debug_level, int, 0644); MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0"); @@ -83,7 +83,7 @@ MODULE_PARM_DESC(msi_x, "0 - don't use MSI-X, 1 - use MSI-X, >1 - limit number o static uint8_t num_vfs[3] = {0, 0, 0}; static int num_vfs_argc; -module_param_array(num_vfs, byte , &num_vfs_argc, 0444); +module_param_array(num_vfs, byte, &num_vfs_argc, 0444); MODULE_PARM_DESC(num_vfs, "enable #num_vfs functions if num_vfs > 0\n" "num_vfs=port1,port2,port1+2"); @@ -313,7 +313,7 @@ int mlx4_check_port_params(struct mlx4_dev *dev, for (i = 0; i < dev->caps.num_ports - 1; i++) { if (port_type[i] != port_type[i + 1]) { mlx4_err(dev, "Only same port types supported on this HCA, aborting\n"); - return -EINVAL; + return -EOPNOTSUPP; } } } @@ -322,7 +322,7 @@ int mlx4_check_port_params(struct mlx4_dev *dev, if (!(port_type[i] & dev->caps.supported_type[i+1])) { mlx4_err(dev, "Requested port type for port %d is not supported on this HCA\n", i + 1); - return -EINVAL; + return -EOPNOTSUPP; } } return 0; @@ -1188,8 +1188,7 @@ static int __set_port_type(struct mlx4_port_info *info, mlx4_err(mdev, "Requested port type for port %d is not supported on this HCA\n", info->port); - err = -EINVAL; - goto err_sup; + return -EOPNOTSUPP; } mlx4_stop_sense(mdev); @@ -1211,7 +1210,7 @@ static int __set_port_type(struct mlx4_port_info *info, for (i = 1; i <= mdev->caps.num_ports; i++) { if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) { mdev->caps.possible_type[i] = mdev->caps.port_type[i]; - err = -EINVAL; + err = -EOPNOTSUPP; } } } @@ -1237,7 +1236,7 @@ static int __set_port_type(struct mlx4_port_info *info, out: mlx4_start_sense(mdev); mutex_unlock(&priv->port_mutex); -err_sup: + return err; } @@ -3252,7 +3251,7 @@ disable_sriov: free_mem: dev->persist->num_vfs = 0; kfree(dev->dev_vfs); - dev->dev_vfs = NULL; + dev->dev_vfs = NULL; return dev_flags & ~MLX4_FLAG_MASTER; } diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c index 31bd56727022..eb13d3618162 100644 --- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c +++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c @@ -4729,7 +4729,6 @@ static void rem_slave_srqs(struct mlx4_dev *dev, int slave) struct res_srq *tmp; int state; u64 in_param; - LIST_HEAD(tlist); int srqn; int err; @@ -4795,7 +4794,6 @@ static void rem_slave_cqs(struct mlx4_dev *dev, int slave) struct res_cq *tmp; int state; u64 in_param; - LIST_HEAD(tlist); int cqn; int err; @@ -4858,7 +4856,6 @@ static void rem_slave_mrs(struct mlx4_dev *dev, int slave) struct res_mpt *tmp; int state; u64 in_param; - LIST_HEAD(tlist); int mptn; int err; @@ -4926,7 +4923,6 @@ static void rem_slave_mtts(struct mlx4_dev *dev, int slave) struct res_mtt *mtt; struct res_mtt *tmp; int state; - LIST_HEAD(tlist); int base; int err; @@ -5115,7 +5111,6 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave) struct res_eq *tmp; int err; int state; - LIST_HEAD(tlist); int eqn; err = move_all_busy(dev, slave, RES_EQ); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Makefile b/drivers/net/ethernet/mellanox/mlx5/core/Makefile index d324a3884462..9de9abacf7f6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/Makefile +++ b/drivers/net/ethernet/mellanox/mlx5/core/Makefile @@ -12,17 +12,17 @@ obj-$(CONFIG_MLX5_CORE) += mlx5_core.o # mlx5 core basic # mlx5_core-y := main.o cmd.o debugfs.o fw.o eq.o uar.o pagealloc.o \ - health.o mcg.o cq.o srq.o alloc.o qp.o port.o mr.o pd.o \ + health.o mcg.o cq.o alloc.o qp.o port.o mr.o pd.o \ mad.o transobj.o vport.o sriov.o fs_cmd.o fs_core.o \ - fs_counters.o rl.o lag.o dev.o wq.o lib/gid.o \ - diag/fs_tracepoint.o diag/fw_tracer.o + fs_counters.o rl.o lag.o dev.o events.o wq.o lib/gid.o \ + lib/devcom.o diag/fs_tracepoint.o diag/fw_tracer.o # # Netdev basic # mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ en_tx.o en_rx.o en_dim.o en_txrx.o en/xdp.o en_stats.o \ - en_selftest.o en/port.o + en_selftest.o en/port.o en/monitor_stats.o # # Netdev extra @@ -30,7 +30,7 @@ mlx5_core-$(CONFIG_MLX5_CORE_EN) += en_main.o en_common.o en_fs.o en_ethtool.o \ mlx5_core-$(CONFIG_MLX5_EN_ARFS) += en_arfs.o mlx5_core-$(CONFIG_MLX5_EN_RXNFC) += en_fs_ethtool.o mlx5_core-$(CONFIG_MLX5_CORE_EN_DCB) += en_dcbnl.o en/port_buffer.o -mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o +mlx5_core-$(CONFIG_MLX5_ESWITCH) += en_rep.o en_tc.o en/tc_tun.o # # Core extra diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index a5a0823e5ada..d3125cdf69db 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -40,9 +40,11 @@ #include <linux/random.h> #include <linux/io-mapping.h> #include <linux/mlx5/driver.h> +#include <linux/mlx5/eq.h> #include <linux/debugfs.h> #include "mlx5_core.h" +#include "lib/eq.h" enum { CMD_IF_REV = 5, @@ -313,6 +315,7 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_FPGA_DESTROY_QP: case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT: case MLX5_CMD_OP_DEALLOC_MEMIC: + case MLX5_CMD_OP_PAGE_FAULT_RESUME: return MLX5_CMD_STAT_OK; case MLX5_CMD_OP_QUERY_HCA_CAP: @@ -326,7 +329,6 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_CREATE_MKEY: case MLX5_CMD_OP_QUERY_MKEY: case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS: - case MLX5_CMD_OP_PAGE_FAULT_RESUME: case MLX5_CMD_OP_CREATE_EQ: case MLX5_CMD_OP_QUERY_EQ: case MLX5_CMD_OP_GEN_EQE: @@ -371,6 +373,8 @@ static int mlx5_internal_err_ret_value(struct mlx5_core_dev *dev, u16 op, case MLX5_CMD_OP_QUERY_VPORT_COUNTER: case MLX5_CMD_OP_ALLOC_Q_COUNTER: case MLX5_CMD_OP_QUERY_Q_COUNTER: + case MLX5_CMD_OP_SET_MONITOR_COUNTER: + case MLX5_CMD_OP_ARM_MONITOR_COUNTER: case MLX5_CMD_OP_SET_PP_RATE_LIMIT: case MLX5_CMD_OP_QUERY_RATE_LIMIT: case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT: @@ -520,6 +524,8 @@ const char *mlx5_command_str(int command) MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER); MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER); MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER); + MLX5_COMMAND_STR_CASE(SET_MONITOR_COUNTER); + MLX5_COMMAND_STR_CASE(ARM_MONITOR_COUNTER); MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT); MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT); MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT); @@ -805,6 +811,8 @@ static u16 msg_to_opcode(struct mlx5_cmd_msg *in) return MLX5_GET(mbox_in, in->first.data, opcode); } +static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced); + static void cb_timeout_handler(struct work_struct *work) { struct delayed_work *dwork = container_of(work, struct delayed_work, @@ -1412,14 +1420,32 @@ static void mlx5_cmd_change_mod(struct mlx5_core_dev *dev, int mode) up(&cmd->sem); } +static int cmd_comp_notifier(struct notifier_block *nb, + unsigned long type, void *data) +{ + struct mlx5_core_dev *dev; + struct mlx5_cmd *cmd; + struct mlx5_eqe *eqe; + + cmd = mlx5_nb_cof(nb, struct mlx5_cmd, nb); + dev = container_of(cmd, struct mlx5_core_dev, cmd); + eqe = data; + + mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false); + + return NOTIFY_OK; +} void mlx5_cmd_use_events(struct mlx5_core_dev *dev) { + MLX5_NB_INIT(&dev->cmd.nb, cmd_comp_notifier, CMD); + mlx5_eq_notifier_register(dev, &dev->cmd.nb); mlx5_cmd_change_mod(dev, CMD_MODE_EVENTS); } void mlx5_cmd_use_polling(struct mlx5_core_dev *dev) { mlx5_cmd_change_mod(dev, CMD_MODE_POLLING); + mlx5_eq_notifier_unregister(dev, &dev->cmd.nb); } static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) @@ -1435,7 +1461,7 @@ static void free_msg(struct mlx5_core_dev *dev, struct mlx5_cmd_msg *msg) } } -void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced) +static void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced) { struct mlx5_cmd *cmd = &dev->cmd; struct mlx5_cmd_work_ent *ent; @@ -1533,7 +1559,29 @@ void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced) } } } -EXPORT_SYMBOL(mlx5_cmd_comp_handler); + +void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev) +{ + unsigned long flags; + u64 vector; + + /* wait for pending handlers to complete */ + mlx5_eq_synchronize_cmd_irq(dev); + spin_lock_irqsave(&dev->cmd.alloc_lock, flags); + vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1); + if (!vector) + goto no_trig; + + vector |= MLX5_TRIGGERED_CMD_COMP; + spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); + + mlx5_core_dbg(dev, "vector 0x%llx\n", vector); + mlx5_cmd_comp_handler(dev, vector, true); + return; + +no_trig: + spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); +} static int status_to_err(u8 status) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cq.c b/drivers/net/ethernet/mellanox/mlx5/core/cq.c index 4b85abb5c9f7..713a17ee3751 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cq.c @@ -38,6 +38,7 @@ #include <rdma/ib_verbs.h> #include <linux/mlx5/cq.h> #include "mlx5_core.h" +#include "lib/eq.h" #define TASKLET_MAX_TIME 2 #define TASKLET_MAX_TIME_JIFFIES msecs_to_jiffies(TASKLET_MAX_TIME) @@ -92,10 +93,10 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, u32 dout[MLX5_ST_SZ_DW(destroy_cq_out)]; u32 out[MLX5_ST_SZ_DW(create_cq_out)]; u32 din[MLX5_ST_SZ_DW(destroy_cq_in)]; - struct mlx5_eq *eq; + struct mlx5_eq_comp *eq; int err; - eq = mlx5_eqn2eq(dev, eqn); + eq = mlx5_eqn2comp_eq(dev, eqn); if (IS_ERR(eq)) return PTR_ERR(eq); @@ -119,12 +120,12 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, INIT_LIST_HEAD(&cq->tasklet_ctx.list); /* Add to comp EQ CQ tree to recv comp events */ - err = mlx5_eq_add_cq(eq, cq); + err = mlx5_eq_add_cq(&eq->core, cq); if (err) goto err_cmd; /* Add to async EQ CQ tree to recv async events */ - err = mlx5_eq_add_cq(&dev->priv.eq_table.async_eq, cq); + err = mlx5_eq_add_cq(mlx5_get_async_eq(dev), cq); if (err) goto err_cq_add; @@ -139,7 +140,7 @@ int mlx5_core_create_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq, return 0; err_cq_add: - mlx5_eq_del_cq(eq, cq); + mlx5_eq_del_cq(&eq->core, cq); err_cmd: memset(din, 0, sizeof(din)); memset(dout, 0, sizeof(dout)); @@ -157,11 +158,11 @@ int mlx5_core_destroy_cq(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) u32 in[MLX5_ST_SZ_DW(destroy_cq_in)] = {0}; int err; - err = mlx5_eq_del_cq(&dev->priv.eq_table.async_eq, cq); + err = mlx5_eq_del_cq(mlx5_get_async_eq(dev), cq); if (err) return err; - err = mlx5_eq_del_cq(cq->eq, cq); + err = mlx5_eq_del_cq(&cq->eq->core, cq); if (err) return err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c index 90fabd612b6c..a11e22d0b0cc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -36,6 +36,7 @@ #include <linux/mlx5/cq.h> #include <linux/mlx5/driver.h> #include "mlx5_core.h" +#include "lib/eq.h" enum { QP_PID, @@ -349,6 +350,16 @@ out: return param; } +static int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, + u32 *out, int outlen) +{ + u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {}; + + MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ); + MLX5_SET(query_eq_in, in, eq_number, eq->eqn); + return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); +} + static u64 eq_read_field(struct mlx5_core_dev *dev, struct mlx5_eq *eq, int index) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index 37ba7c78859d..ebc046fa97d3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -45,75 +45,11 @@ struct mlx5_device_context { unsigned long state; }; -struct mlx5_delayed_event { - struct list_head list; - struct mlx5_core_dev *dev; - enum mlx5_dev_event event; - unsigned long param; -}; - enum { MLX5_INTERFACE_ADDED, MLX5_INTERFACE_ATTACHED, }; -static void add_delayed_event(struct mlx5_priv *priv, - struct mlx5_core_dev *dev, - enum mlx5_dev_event event, - unsigned long param) -{ - struct mlx5_delayed_event *delayed_event; - - delayed_event = kzalloc(sizeof(*delayed_event), GFP_ATOMIC); - if (!delayed_event) { - mlx5_core_err(dev, "event %d is missed\n", event); - return; - } - - mlx5_core_dbg(dev, "Accumulating event %d\n", event); - delayed_event->dev = dev; - delayed_event->event = event; - delayed_event->param = param; - list_add_tail(&delayed_event->list, &priv->waiting_events_list); -} - -static void delayed_event_release(struct mlx5_device_context *dev_ctx, - struct mlx5_priv *priv) -{ - struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv); - struct mlx5_delayed_event *de; - struct mlx5_delayed_event *n; - struct list_head temp; - - INIT_LIST_HEAD(&temp); - - spin_lock_irq(&priv->ctx_lock); - - priv->is_accum_events = false; - list_splice_init(&priv->waiting_events_list, &temp); - if (!dev_ctx->context) - goto out; - list_for_each_entry_safe(de, n, &temp, list) - dev_ctx->intf->event(dev, dev_ctx->context, de->event, de->param); - -out: - spin_unlock_irq(&priv->ctx_lock); - - list_for_each_entry_safe(de, n, &temp, list) { - list_del(&de->list); - kfree(de); - } -} - -/* accumulating events that can come after mlx5_ib calls to - * ib_register_device, till adding that interface to the events list. - */ -static void delayed_event_start(struct mlx5_priv *priv) -{ - spin_lock_irq(&priv->ctx_lock); - priv->is_accum_events = true; - spin_unlock_irq(&priv->ctx_lock); -} void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) { @@ -129,8 +65,6 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) dev_ctx->intf = intf; - delayed_event_start(priv); - dev_ctx->context = intf->add(dev); if (dev_ctx->context) { set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); @@ -139,22 +73,9 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) spin_lock_irq(&priv->ctx_lock); list_add_tail(&dev_ctx->list, &priv->ctx_list); - -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - if (dev_ctx->intf->pfault) { - if (priv->pfault) { - mlx5_core_err(dev, "multiple page fault handlers not supported"); - } else { - priv->pfault_ctx = dev_ctx->context; - priv->pfault = dev_ctx->intf->pfault; - } - } -#endif spin_unlock_irq(&priv->ctx_lock); } - delayed_event_release(dev_ctx, priv); - if (!dev_ctx->context) kfree(dev_ctx); } @@ -179,15 +100,6 @@ void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv) if (!dev_ctx) return; -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - spin_lock_irq(&priv->ctx_lock); - if (priv->pfault == dev_ctx->intf->pfault) - priv->pfault = NULL; - spin_unlock_irq(&priv->ctx_lock); - - synchronize_srcu(&priv->pfault_srcu); -#endif - spin_lock_irq(&priv->ctx_lock); list_del(&dev_ctx->list); spin_unlock_irq(&priv->ctx_lock); @@ -207,26 +119,20 @@ static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv if (!dev_ctx) return; - delayed_event_start(priv); if (intf->attach) { if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)) - goto out; + return; if (intf->attach(dev, dev_ctx->context)) - goto out; - + return; set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state); } else { if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state)) - goto out; + return; dev_ctx->context = intf->add(dev); if (!dev_ctx->context) - goto out; - + return; set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); } - -out: - delayed_event_release(dev_ctx, priv); } void mlx5_attach_device(struct mlx5_core_dev *dev) @@ -350,28 +256,6 @@ void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol) mutex_unlock(&mlx5_intf_mutex); } -void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol) -{ - struct mlx5_priv *priv = &mdev->priv; - struct mlx5_device_context *dev_ctx; - unsigned long flags; - void *result = NULL; - - spin_lock_irqsave(&priv->ctx_lock, flags); - - list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list) - if ((dev_ctx->intf->protocol == protocol) && - dev_ctx->intf->get_dev) { - result = dev_ctx->intf->get_dev(dev_ctx->context); - break; - } - - spin_unlock_irqrestore(&priv->ctx_lock, flags); - - return result; -} -EXPORT_SYMBOL(mlx5_get_protocol_dev); - /* Must be called with intf_mutex held */ void mlx5_add_dev_by_protocol(struct mlx5_core_dev *dev, int protocol) { @@ -422,44 +306,6 @@ struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev) return res; } -void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, - unsigned long param) -{ - struct mlx5_priv *priv = &dev->priv; - struct mlx5_device_context *dev_ctx; - unsigned long flags; - - spin_lock_irqsave(&priv->ctx_lock, flags); - - if (priv->is_accum_events) - add_delayed_event(priv, dev, event, param); - - /* After mlx5_detach_device, the dev_ctx->intf is still set and dev_ctx is - * still in priv->ctx_list. In this case, only notify the dev_ctx if its - * ADDED or ATTACHED bit are set. - */ - list_for_each_entry(dev_ctx, &priv->ctx_list, list) - if (dev_ctx->intf->event && - (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state) || - test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state))) - dev_ctx->intf->event(dev, dev_ctx->context, event, param); - - spin_unlock_irqrestore(&priv->ctx_lock, flags); -} - -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING -void mlx5_core_page_fault(struct mlx5_core_dev *dev, - struct mlx5_pagefault *pfault) -{ - struct mlx5_priv *priv = &dev->priv; - int srcu_idx; - - srcu_idx = srcu_read_lock(&priv->pfault_srcu); - if (priv->pfault) - priv->pfault(dev, priv->pfault_ctx, pfault); - srcu_read_unlock(&priv->pfault_srcu, srcu_idx); -} -#endif void mlx5_dev_list_lock(void) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c index 0f11fff32a9b..424457ff9759 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fs_tracepoint.c @@ -161,10 +161,10 @@ static void print_misc_parameters_hdrs(struct trace_seq *p, PRINT_MASKED_VAL(name, p, format); \ } DECLARE_MASK_VAL(u64, gre_key) = { - .m = MLX5_GET(fte_match_set_misc, mask, gre_key_h) << 8 | - MLX5_GET(fte_match_set_misc, mask, gre_key_l), - .v = MLX5_GET(fte_match_set_misc, value, gre_key_h) << 8 | - MLX5_GET(fte_match_set_misc, value, gre_key_l)}; + .m = MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.hi) << 8 | + MLX5_GET(fte_match_set_misc, mask, gre_key.nvgre.lo), + .v = MLX5_GET(fte_match_set_misc, value, gre_key.nvgre.hi) << 8 | + MLX5_GET(fte_match_set_misc, value, gre_key.nvgre.lo)}; PRINT_MASKED_VAL(gre_key, p, "%llu"); PRINT_MASKED_VAL_MISC(u32, source_sqn, source_sqn, p, "%u"); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c index d4ec93bde4de..6999f4486e9e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c @@ -30,6 +30,7 @@ * SOFTWARE. */ #define CREATE_TRACE_POINTS +#include "lib/eq.h" #include "fw_tracer.h" #include "fw_tracer_tracepoint.h" @@ -846,9 +847,9 @@ free_tracer: return ERR_PTR(err); } -/* Create HW resources + start tracer - * must be called before Async EQ is created - */ +static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void *data); + +/* Create HW resources + start tracer */ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer) { struct mlx5_core_dev *dev; @@ -874,6 +875,9 @@ int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer) goto err_dealloc_pd; } + MLX5_NB_INIT(&tracer->nb, fw_tracer_event, DEVICE_TRACER); + mlx5_eq_notifier_register(dev, &tracer->nb); + mlx5_fw_tracer_start(tracer); return 0; @@ -883,9 +887,7 @@ err_dealloc_pd: return err; } -/* Stop tracer + Cleanup HW resources - * must be called after Async EQ is destroyed - */ +/* Stop tracer + Cleanup HW resources */ void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer) { if (IS_ERR_OR_NULL(tracer)) @@ -893,7 +895,7 @@ void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer) mlx5_core_dbg(tracer->dev, "FWTracer: Cleanup, is owner ? (%d)\n", tracer->owner); - + mlx5_eq_notifier_unregister(tracer->dev, &tracer->nb); cancel_work_sync(&tracer->ownership_change_work); cancel_work_sync(&tracer->handle_traces_work); @@ -922,12 +924,11 @@ void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer) kfree(tracer); } -void mlx5_fw_tracer_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) +static int fw_tracer_event(struct notifier_block *nb, unsigned long action, void *data) { - struct mlx5_fw_tracer *tracer = dev->tracer; - - if (!tracer) - return; + struct mlx5_fw_tracer *tracer = mlx5_nb_cof(nb, struct mlx5_fw_tracer, nb); + struct mlx5_core_dev *dev = tracer->dev; + struct mlx5_eqe *eqe = data; switch (eqe->sub_type) { case MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE: @@ -942,6 +943,8 @@ void mlx5_fw_tracer_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) mlx5_core_dbg(dev, "FWTracer: Event with unrecognized subtype: sub_type %d\n", eqe->sub_type); } + + return NOTIFY_OK; } EXPORT_TRACEPOINT_SYMBOL(mlx5_fw); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h index 0347f2dd5cee..a8b8747f2b61 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.h @@ -55,6 +55,7 @@ struct mlx5_fw_tracer { struct mlx5_core_dev *dev; + struct mlx5_nb nb; bool owner; u8 trc_ver; struct workqueue_struct *work_queue; @@ -170,6 +171,5 @@ struct mlx5_fw_tracer *mlx5_fw_tracer_create(struct mlx5_core_dev *dev); int mlx5_fw_tracer_init(struct mlx5_fw_tracer *tracer); void mlx5_fw_tracer_cleanup(struct mlx5_fw_tracer *tracer); void mlx5_fw_tracer_destroy(struct mlx5_fw_tracer *tracer); -void mlx5_fw_tracer_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe); #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 118324802926..8fa8fdd30b85 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -49,6 +49,7 @@ #include <net/switchdev.h> #include <net/xdp.h> #include <linux/net_dim.h> +#include <linux/bits.h> #include "wq.h" #include "mlx5_core.h" #include "en_stats.h" @@ -147,9 +148,6 @@ struct page_pool; MLX5_UMR_MTT_ALIGNMENT)) #define MLX5E_UMR_WQEBBS \ (DIV_ROUND_UP(MLX5E_UMR_WQE_INLINE_SZ, MLX5_SEND_WQE_BB)) -#define MLX5E_ICOSQ_MAX_WQEBBS MLX5E_UMR_WQEBBS - -#define MLX5E_NUM_MAIN_GROUPS 9 #define MLX5E_MSG_LEVEL NETIF_MSG_LINK @@ -178,8 +176,7 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) { return is_kdump_kernel() ? MLX5E_MIN_NUM_CHANNELS : - min_t(int, mdev->priv.eq_table.num_comp_vectors, - MLX5E_MAX_NUM_CHANNELS); + min_t(int, mlx5_comp_vectors_count(mdev), MLX5E_MAX_NUM_CHANNELS); } /* Use this function to get max num channels after netdev was created */ @@ -214,22 +211,24 @@ struct mlx5e_umr_wqe { extern const char mlx5e_self_tests[][ETH_GSTRING_LEN]; enum mlx5e_priv_flag { - MLX5E_PFLAG_RX_CQE_BASED_MODER = (1 << 0), - MLX5E_PFLAG_TX_CQE_BASED_MODER = (1 << 1), - MLX5E_PFLAG_RX_CQE_COMPRESS = (1 << 2), - MLX5E_PFLAG_RX_STRIDING_RQ = (1 << 3), - MLX5E_PFLAG_RX_NO_CSUM_COMPLETE = (1 << 4), + MLX5E_PFLAG_RX_CQE_BASED_MODER, + MLX5E_PFLAG_TX_CQE_BASED_MODER, + MLX5E_PFLAG_RX_CQE_COMPRESS, + MLX5E_PFLAG_RX_STRIDING_RQ, + MLX5E_PFLAG_RX_NO_CSUM_COMPLETE, + MLX5E_PFLAG_XDP_TX_MPWQE, + MLX5E_NUM_PFLAGS, /* Keep last */ }; #define MLX5E_SET_PFLAG(params, pflag, enable) \ do { \ if (enable) \ - (params)->pflags |= (pflag); \ + (params)->pflags |= BIT(pflag); \ else \ - (params)->pflags &= ~(pflag); \ + (params)->pflags &= ~(BIT(pflag)); \ } while (0) -#define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (pflag))) +#define MLX5E_GET_PFLAG(params, pflag) (!!((params)->pflags & (BIT(pflag)))) #ifdef CONFIG_MLX5_CORE_EN_DCB #define MLX5E_MAX_BW_ALLOC 100 /* Max percentage of BW allocation */ @@ -247,9 +246,6 @@ struct mlx5e_params { bool lro_en; u32 lro_wqe_sz; u8 tx_min_inline_mode; - u8 rss_hfunc; - u8 toeplitz_hash_key[40]; - u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE]; bool vlan_strip_disable; bool scatter_fcs_en; bool rx_dim_enabled; @@ -349,7 +345,6 @@ enum { MLX5E_SQ_STATE_IPSEC, MLX5E_SQ_STATE_AM, MLX5E_SQ_STATE_TLS, - MLX5E_SQ_STATE_REDIRECT, }; struct mlx5e_sq_wqe_info { @@ -410,24 +405,51 @@ struct mlx5e_xdp_info { struct mlx5e_dma_info di; }; +struct mlx5e_xdp_info_fifo { + struct mlx5e_xdp_info *xi; + u32 *cc; + u32 *pc; + u32 mask; +}; + +struct mlx5e_xdp_wqe_info { + u8 num_wqebbs; + u8 num_ds; +}; + +struct mlx5e_xdp_mpwqe { + /* Current MPWQE session */ + struct mlx5e_tx_wqe *wqe; + u8 ds_count; + u8 max_ds_count; +}; + +struct mlx5e_xdpsq; +typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq*, + struct mlx5e_xdp_info*); struct mlx5e_xdpsq { /* data path */ /* dirtied @completion */ + u32 xdpi_fifo_cc; u16 cc; bool redirect_flush; /* dirtied @xmit */ - u16 pc ____cacheline_aligned_in_smp; - bool doorbell; + u32 xdpi_fifo_pc ____cacheline_aligned_in_smp; + u16 pc; + struct mlx5_wqe_ctrl_seg *doorbell_cseg; + struct mlx5e_xdp_mpwqe mpwqe; struct mlx5e_cq cq; /* read only */ struct mlx5_wq_cyc wq; struct mlx5e_xdpsq_stats *stats; + mlx5e_fp_xmit_xdp_frame xmit_xdp_frame; struct { - struct mlx5e_xdp_info *xdpi; + struct mlx5e_xdp_wqe_info *wqe_info; + struct mlx5e_xdp_info_fifo xdpi_fifo; } db; void __iomem *uar_map; u32 sqn; @@ -633,7 +655,6 @@ struct mlx5e_channel_stats { } ____cacheline_aligned_in_smp; enum { - MLX5E_STATE_ASYNC_EVENTS_ENABLED, MLX5E_STATE_OPENED, MLX5E_STATE_DESTROYING, }; @@ -654,6 +675,13 @@ enum { MLX5E_NIC_PRIO }; +struct mlx5e_rss_params { + u32 indirection_rqt[MLX5E_INDIR_RQT_SIZE]; + u32 rx_hash_fields[MLX5E_NUM_INDIR_TIRS]; + u8 toeplitz_hash_key[40]; + u8 hfunc; +}; + struct mlx5e_priv { /* priv data path fields - start */ struct mlx5e_txqsq *txq2sq[MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC]; @@ -674,6 +702,7 @@ struct mlx5e_priv { struct mlx5e_tir indir_tir[MLX5E_NUM_INDIR_TIRS]; struct mlx5e_tir inner_indir_tir[MLX5E_NUM_INDIR_TIRS]; struct mlx5e_tir direct_tir[MLX5E_MAX_NUM_CHANNELS]; + struct mlx5e_rss_params rss_params; u32 tx_rates[MLX5E_MAX_NUM_SQS]; struct mlx5e_flow_steering fs; @@ -683,6 +712,8 @@ struct mlx5e_priv { struct work_struct set_rx_mode_work; struct work_struct tx_timeout_work; struct work_struct update_stats_work; + struct work_struct monitor_counters_work; + struct mlx5_nb monitor_counters_nb; struct mlx5_core_dev *mdev; struct net_device *netdev; @@ -692,6 +723,8 @@ struct mlx5e_priv { struct hwtstamp_config tstamp; u16 q_counter; u16 drop_rq_q_counter; + struct notifier_block events_nb; + #ifdef CONFIG_MLX5_CORE_EN_DCB struct mlx5e_dcbx dcbx; #endif @@ -769,6 +802,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, struct mlx5e_wqe_frag_info *wi, u32 cqe_bcnt); void mlx5e_update_stats(struct mlx5e_priv *priv); +void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats); void mlx5e_init_l2_addr(struct mlx5e_priv *priv); int mlx5e_self_test_num(struct mlx5e_priv *priv); @@ -799,9 +833,11 @@ struct mlx5e_redirect_rqt_param { int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, struct mlx5e_redirect_rqt_param rrp); -void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params, - enum mlx5e_traffic_types tt, +void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params, + const struct mlx5e_tirc_config *ttconfig, void *tirc, bool inner); +void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen); +struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt); int mlx5e_open_locked(struct net_device *netdev); int mlx5e_close_locked(struct net_device *netdev); @@ -931,14 +967,16 @@ int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc, void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn); int mlx5e_create_tises(struct mlx5e_priv *priv); -void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv); +void mlx5e_update_carrier(struct mlx5e_priv *priv); int mlx5e_close(struct net_device *netdev); int mlx5e_open(struct net_device *netdev); +void mlx5e_update_ndo_stats(struct mlx5e_priv *priv); void mlx5e_queue_update_stats(struct mlx5e_priv *priv); int mlx5e_bits_invert(unsigned long a, int size); typedef int (*change_hw_mtu_cb)(struct mlx5e_priv *priv); +int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv); int mlx5e_change_mtu(struct net_device *netdev, int new_mtu, change_hw_mtu_cb set_mtu_cb); @@ -962,12 +1000,20 @@ int mlx5e_ethtool_get_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal); int mlx5e_ethtool_set_coalesce(struct mlx5e_priv *priv, struct ethtool_coalesce *coal); +int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, + struct ethtool_link_ksettings *link_ksettings); +int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, + const struct ethtool_link_ksettings *link_ksettings); u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv); u32 mlx5e_ethtool_get_rxfh_indir_size(struct mlx5e_priv *priv); int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, struct ethtool_ts_info *info); int mlx5e_ethtool_flash_device(struct mlx5e_priv *priv, struct ethtool_flash *flash); +void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv, + struct ethtool_pauseparam *pauseparam); +int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv, + struct ethtool_pauseparam *pauseparam); /* mlx5e generic netdev management API */ int mlx5e_netdev_init(struct net_device *netdev, @@ -983,12 +1029,26 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv); void mlx5e_detach_netdev(struct mlx5e_priv *priv); void mlx5e_destroy_netdev(struct mlx5e_priv *priv); void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, + struct mlx5e_rss_params *rss_params, struct mlx5e_params *params, u16 max_channels, u16 mtu); void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params); -void mlx5e_build_rss_params(struct mlx5e_params *params); +void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params, + u16 num_channels); u8 mlx5e_params_calculate_tx_min_inline(struct mlx5_core_dev *mdev); void mlx5e_rx_dim_work(struct work_struct *work); void mlx5e_tx_dim_work(struct work_struct *work); + +void mlx5e_add_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti); +void mlx5e_del_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti); +netdev_features_t mlx5e_features_check(struct sk_buff *skb, + struct net_device *netdev, + netdev_features_t features); +#ifdef CONFIG_MLX5_ESWITCH +int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac); +int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, int max_tx_rate); +int mlx5e_get_vf_config(struct net_device *dev, int vf, struct ifla_vf_info *ivi); +int mlx5e_get_vf_stats(struct net_device *dev, int vf, struct ifla_vf_stats *vf_stats); +#endif #endif /* __MLX5_EN_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h index 1431232c9a09..be5961ff24cc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/fs.h @@ -73,6 +73,22 @@ enum mlx5e_traffic_types { MLX5E_NUM_INDIR_TIRS = MLX5E_TT_ANY, }; +struct mlx5e_tirc_config { + u8 l3_prot_type; + u8 l4_prot_type; + u32 rx_hash_fields; +}; + +#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ + MLX5_HASH_FIELD_SEL_DST_IP) +#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\ + MLX5_HASH_FIELD_SEL_DST_IP |\ + MLX5_HASH_FIELD_SEL_L4_SPORT |\ + MLX5_HASH_FIELD_SEL_L4_DPORT) +#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\ + MLX5_HASH_FIELD_SEL_DST_IP |\ + MLX5_HASH_FIELD_SEL_IPSEC_SPI) + enum mlx5e_tunnel_types { MLX5E_TT_IPV4_GRE, MLX5E_TT_IPV6_GRE, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c new file mode 100644 index 000000000000..2ce420851e77 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c @@ -0,0 +1,169 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (c) 2018 Mellanox Technologies. */ + +#include "en.h" +#include "monitor_stats.h" +#include "lib/eq.h" + +/* Driver will set the following watch counters list: + * Ppcnt.802_3: + * a_in_range_length_errors Type: 0x0, Counter: 0x0, group_id = N/A + * a_out_of_range_length_field Type: 0x0, Counter: 0x1, group_id = N/A + * a_frame_too_long_errors Type: 0x0, Counter: 0x2, group_id = N/A + * a_frame_check_sequence_errors Type: 0x0, Counter: 0x3, group_id = N/A + * a_alignment_errors Type: 0x0, Counter: 0x4, group_id = N/A + * if_out_discards Type: 0x0, Counter: 0x5, group_id = N/A + * Q_Counters: + * Q[index].rx_out_of_buffer Type: 0x1, Counter: 0x4, group_id = counter_ix + */ + +#define NUM_REQ_PPCNT_COUNTER_S1 MLX5_CMD_SET_MONITOR_NUM_PPCNT_COUNTER_SET1 +#define NUM_REQ_Q_COUNTERS_S1 MLX5_CMD_SET_MONITOR_NUM_Q_COUNTERS_SET1 + +int mlx5e_monitor_counter_supported(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + + if (!MLX5_CAP_GEN(mdev, max_num_of_monitor_counters)) + return false; + if (MLX5_CAP_PCAM_REG(mdev, ppcnt) && + MLX5_CAP_GEN(mdev, num_ppcnt_monitor_counters) < + NUM_REQ_PPCNT_COUNTER_S1) + return false; + if (MLX5_CAP_GEN(mdev, num_q_monitor_counters) < + NUM_REQ_Q_COUNTERS_S1) + return false; + return true; +} + +void mlx5e_monitor_counter_arm(struct mlx5e_priv *priv) +{ + u32 in[MLX5_ST_SZ_DW(arm_monitor_counter_in)] = {}; + u32 out[MLX5_ST_SZ_DW(arm_monitor_counter_out)] = {}; + + MLX5_SET(arm_monitor_counter_in, in, opcode, + MLX5_CMD_OP_ARM_MONITOR_COUNTER); + mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out)); +} + +static void mlx5e_monitor_counters_work(struct work_struct *work) +{ + struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, + monitor_counters_work); + + mutex_lock(&priv->state_lock); + mlx5e_update_ndo_stats(priv); + mutex_unlock(&priv->state_lock); + mlx5e_monitor_counter_arm(priv); +} + +static int mlx5e_monitor_event_handler(struct notifier_block *nb, + unsigned long event, void *eqe) +{ + struct mlx5e_priv *priv = mlx5_nb_cof(nb, struct mlx5e_priv, + monitor_counters_nb); + queue_work(priv->wq, &priv->monitor_counters_work); + return NOTIFY_OK; +} + +void mlx5e_monitor_counter_start(struct mlx5e_priv *priv) +{ + MLX5_NB_INIT(&priv->monitor_counters_nb, mlx5e_monitor_event_handler, + MONITOR_COUNTER); + mlx5_eq_notifier_register(priv->mdev, &priv->monitor_counters_nb); +} + +static void mlx5e_monitor_counter_stop(struct mlx5e_priv *priv) +{ + mlx5_eq_notifier_unregister(priv->mdev, &priv->monitor_counters_nb); + cancel_work_sync(&priv->monitor_counters_work); +} + +static int fill_monitor_counter_ppcnt_set1(int cnt, u32 *in) +{ + enum mlx5_monitor_counter_ppcnt ppcnt_cnt; + + for (ppcnt_cnt = 0; + ppcnt_cnt < NUM_REQ_PPCNT_COUNTER_S1; + ppcnt_cnt++, cnt++) { + MLX5_SET(set_monitor_counter_in, in, + monitor_counter[cnt].type, + MLX5_QUERY_MONITOR_CNT_TYPE_PPCNT); + MLX5_SET(set_monitor_counter_in, in, + monitor_counter[cnt].counter, + ppcnt_cnt); + } + return ppcnt_cnt; +} + +static int fill_monitor_counter_q_counter_set1(int cnt, int q_counter, u32 *in) +{ + MLX5_SET(set_monitor_counter_in, in, + monitor_counter[cnt].type, + MLX5_QUERY_MONITOR_CNT_TYPE_Q_COUNTER); + MLX5_SET(set_monitor_counter_in, in, + monitor_counter[cnt].counter, + MLX5_QUERY_MONITOR_Q_COUNTER_RX_OUT_OF_BUFFER); + MLX5_SET(set_monitor_counter_in, in, + monitor_counter[cnt].counter_group_id, + q_counter); + return 1; +} + +/* check if mlx5e_monitor_counter_supported before calling this function*/ +static void mlx5e_set_monitor_counter(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; + int max_num_of_counters = MLX5_CAP_GEN(mdev, max_num_of_monitor_counters); + int num_q_counters = MLX5_CAP_GEN(mdev, num_q_monitor_counters); + int num_ppcnt_counters = !MLX5_CAP_PCAM_REG(mdev, ppcnt) ? 0 : + MLX5_CAP_GEN(mdev, num_ppcnt_monitor_counters); + u32 in[MLX5_ST_SZ_DW(set_monitor_counter_in)] = {}; + u32 out[MLX5_ST_SZ_DW(set_monitor_counter_out)] = {}; + int q_counter = priv->q_counter; + int cnt = 0; + + if (num_ppcnt_counters >= NUM_REQ_PPCNT_COUNTER_S1 && + max_num_of_counters >= (NUM_REQ_PPCNT_COUNTER_S1 + cnt)) + cnt += fill_monitor_counter_ppcnt_set1(cnt, in); + + if (num_q_counters >= NUM_REQ_Q_COUNTERS_S1 && + max_num_of_counters >= (NUM_REQ_Q_COUNTERS_S1 + cnt) && + q_counter) + cnt += fill_monitor_counter_q_counter_set1(cnt, q_counter, in); + + MLX5_SET(set_monitor_counter_in, in, num_of_counters, cnt); + MLX5_SET(set_monitor_counter_in, in, opcode, + MLX5_CMD_OP_SET_MONITOR_COUNTER); + + mlx5_cmd_exec(mdev, in, sizeof(in), out, sizeof(out)); +} + +/* check if mlx5e_monitor_counter_supported before calling this function*/ +void mlx5e_monitor_counter_init(struct mlx5e_priv *priv) +{ + INIT_WORK(&priv->monitor_counters_work, mlx5e_monitor_counters_work); + mlx5e_monitor_counter_start(priv); + mlx5e_set_monitor_counter(priv); + mlx5e_monitor_counter_arm(priv); + queue_work(priv->wq, &priv->update_stats_work); +} + +static void mlx5e_monitor_counter_disable(struct mlx5e_priv *priv) +{ + u32 in[MLX5_ST_SZ_DW(set_monitor_counter_in)] = {}; + u32 out[MLX5_ST_SZ_DW(set_monitor_counter_out)] = {}; + + MLX5_SET(set_monitor_counter_in, in, num_of_counters, 0); + MLX5_SET(set_monitor_counter_in, in, opcode, + MLX5_CMD_OP_SET_MONITOR_COUNTER); + + mlx5_cmd_exec(priv->mdev, in, sizeof(in), out, sizeof(out)); +} + +/* check if mlx5e_monitor_counter_supported before calling this function*/ +void mlx5e_monitor_counter_cleanup(struct mlx5e_priv *priv) +{ + mlx5e_monitor_counter_disable(priv); + mlx5e_monitor_counter_stop(priv); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.h new file mode 100644 index 000000000000..e1ac4b3d22fb --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2018 Mellanox Technologies. */ + +#ifndef __MLX5_MONITOR_H__ +#define __MLX5_MONITOR_H__ + +int mlx5e_monitor_counter_supported(struct mlx5e_priv *priv); +void mlx5e_monitor_counter_init(struct mlx5e_priv *priv); +void mlx5e_monitor_counter_cleanup(struct mlx5e_priv *priv); +void mlx5e_monitor_counter_arm(struct mlx5e_priv *priv); + +#endif /* __MLX5_MONITOR_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c new file mode 100644 index 000000000000..046948ead152 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.c @@ -0,0 +1,634 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2018 Mellanox Technologies. */ + +#include <net/vxlan.h> +#include <net/gre.h> +#include "lib/vxlan.h" +#include "en/tc_tun.h" + +static int get_route_and_out_devs(struct mlx5e_priv *priv, + struct net_device *dev, + struct net_device **route_dev, + struct net_device **out_dev) +{ + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct net_device *uplink_dev, *uplink_upper; + bool dst_is_lag_dev; + + uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); + uplink_upper = netdev_master_upper_dev_get(uplink_dev); + dst_is_lag_dev = (uplink_upper && + netif_is_lag_master(uplink_upper) && + dev == uplink_upper && + mlx5_lag_is_sriov(priv->mdev)); + + /* if the egress device isn't on the same HW e-switch or + * it's a LAG device, use the uplink + */ + if (!switchdev_port_same_parent_id(priv->netdev, dev) || + dst_is_lag_dev) { + *route_dev = uplink_dev; + *out_dev = *route_dev; + } else { + *route_dev = dev; + if (is_vlan_dev(*route_dev)) + *out_dev = uplink_dev; + else if (mlx5e_eswitch_rep(dev)) + *out_dev = *route_dev; + else + return -EOPNOTSUPP; + } + + return 0; +} + +static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, + struct net_device *mirred_dev, + struct net_device **out_dev, + struct net_device **route_dev, + struct flowi4 *fl4, + struct neighbour **out_n, + u8 *out_ttl) +{ + struct rtable *rt; + struct neighbour *n = NULL; + +#if IS_ENABLED(CONFIG_INET) + int ret; + + rt = ip_route_output_key(dev_net(mirred_dev), fl4); + ret = PTR_ERR_OR_ZERO(rt); + if (ret) + return ret; +#else + return -EOPNOTSUPP; +#endif + + ret = get_route_and_out_devs(priv, rt->dst.dev, route_dev, out_dev); + if (ret < 0) + return ret; + + if (!(*out_ttl)) + *out_ttl = ip4_dst_hoplimit(&rt->dst); + n = dst_neigh_lookup(&rt->dst, &fl4->daddr); + ip_rt_put(rt); + if (!n) + return -ENOMEM; + + *out_n = n; + return 0; +} + +static const char *mlx5e_netdev_kind(struct net_device *dev) +{ + if (dev->rtnl_link_ops) + return dev->rtnl_link_ops->kind; + else + return ""; +} + +static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, + struct net_device *mirred_dev, + struct net_device **out_dev, + struct net_device **route_dev, + struct flowi6 *fl6, + struct neighbour **out_n, + u8 *out_ttl) +{ + struct neighbour *n = NULL; + struct dst_entry *dst; + +#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) + int ret; + + ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst, + fl6); + if (ret < 0) + return ret; + + if (!(*out_ttl)) + *out_ttl = ip6_dst_hoplimit(dst); + + ret = get_route_and_out_devs(priv, dst->dev, route_dev, out_dev); + if (ret < 0) + return ret; +#else + return -EOPNOTSUPP; +#endif + + n = dst_neigh_lookup(dst, &fl6->daddr); + dst_release(dst); + if (!n) + return -ENOMEM; + + *out_n = n; + return 0; +} + +static int mlx5e_gen_vxlan_header(char buf[], struct ip_tunnel_key *tun_key) +{ + __be32 tun_id = tunnel_id_to_key32(tun_key->tun_id); + struct udphdr *udp = (struct udphdr *)(buf); + struct vxlanhdr *vxh = (struct vxlanhdr *) + ((char *)udp + sizeof(struct udphdr)); + + udp->dest = tun_key->tp_dst; + vxh->vx_flags = VXLAN_HF_VNI; + vxh->vx_vni = vxlan_vni_field(tun_id); + + return 0; +} + +static int mlx5e_gen_gre_header(char buf[], struct ip_tunnel_key *tun_key) +{ + __be32 tun_id = tunnel_id_to_key32(tun_key->tun_id); + int hdr_len; + struct gre_base_hdr *greh = (struct gre_base_hdr *)(buf); + + /* the HW does not calculate GRE csum or sequences */ + if (tun_key->tun_flags & (TUNNEL_CSUM | TUNNEL_SEQ)) + return -EOPNOTSUPP; + + greh->protocol = htons(ETH_P_TEB); + + /* GRE key */ + hdr_len = gre_calc_hlen(tun_key->tun_flags); + greh->flags = gre_tnl_flags_to_gre_flags(tun_key->tun_flags); + if (tun_key->tun_flags & TUNNEL_KEY) { + __be32 *ptr = (__be32 *)(((u8 *)greh) + hdr_len - 4); + + *ptr = tun_id; + } + + return 0; +} + +static int mlx5e_gen_ip_tunnel_header(char buf[], __u8 *ip_proto, + struct mlx5e_encap_entry *e) +{ + int err = 0; + struct ip_tunnel_key *key = &e->tun_info.key; + + if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) { + *ip_proto = IPPROTO_UDP; + err = mlx5e_gen_vxlan_header(buf, key); + } else if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) { + *ip_proto = IPPROTO_GRE; + err = mlx5e_gen_gre_header(buf, key); + } else { + pr_warn("mlx5: Cannot generate tunnel header for tunnel type (%d)\n" + , e->tunnel_type); + err = -EOPNOTSUPP; + } + + return err; +} + +static char *gen_eth_tnl_hdr(char *buf, struct net_device *dev, + struct mlx5e_encap_entry *e, + u16 proto) +{ + struct ethhdr *eth = (struct ethhdr *)buf; + char *ip; + + ether_addr_copy(eth->h_dest, e->h_dest); + ether_addr_copy(eth->h_source, dev->dev_addr); + if (is_vlan_dev(dev)) { + struct vlan_hdr *vlan = (struct vlan_hdr *) + ((char *)eth + ETH_HLEN); + ip = (char *)vlan + VLAN_HLEN; + eth->h_proto = vlan_dev_vlan_proto(dev); + vlan->h_vlan_TCI = htons(vlan_dev_vlan_id(dev)); + vlan->h_vlan_encapsulated_proto = htons(proto); + } else { + eth->h_proto = htons(proto); + ip = (char *)eth + ETH_HLEN; + } + + return ip; +} + +int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, + struct net_device *mirred_dev, + struct mlx5e_encap_entry *e) +{ + int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); + struct ip_tunnel_key *tun_key = &e->tun_info.key; + struct net_device *out_dev, *route_dev; + struct neighbour *n = NULL; + struct flowi4 fl4 = {}; + int ipv4_encap_size; + char *encap_header; + u8 nud_state, ttl; + struct iphdr *ip; + int err; + + /* add the IP fields */ + fl4.flowi4_tos = tun_key->tos; + fl4.daddr = tun_key->u.ipv4.dst; + fl4.saddr = tun_key->u.ipv4.src; + ttl = tun_key->ttl; + + err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev, &route_dev, + &fl4, &n, &ttl); + if (err) + return err; + + ipv4_encap_size = + (is_vlan_dev(route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) + + sizeof(struct iphdr) + + e->tunnel_hlen; + + if (max_encap_size < ipv4_encap_size) { + mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n", + ipv4_encap_size, max_encap_size); + return -EOPNOTSUPP; + } + + encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL); + if (!encap_header) + return -ENOMEM; + + /* used by mlx5e_detach_encap to lookup a neigh hash table + * entry in the neigh hash table when a user deletes a rule + */ + e->m_neigh.dev = n->dev; + e->m_neigh.family = n->ops->family; + memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); + e->out_dev = out_dev; + + /* It's important to add the neigh to the hash table before checking + * the neigh validity state. So if we'll get a notification, in case the + * neigh changes it's validity state, we would find the relevant neigh + * in the hash. + */ + err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e); + if (err) + goto free_encap; + + read_lock_bh(&n->lock); + nud_state = n->nud_state; + ether_addr_copy(e->h_dest, n->ha); + read_unlock_bh(&n->lock); + + /* add ethernet header */ + ip = (struct iphdr *)gen_eth_tnl_hdr(encap_header, route_dev, e, + ETH_P_IP); + + /* add ip header */ + ip->tos = tun_key->tos; + ip->version = 0x4; + ip->ihl = 0x5; + ip->ttl = ttl; + ip->daddr = fl4.daddr; + ip->saddr = fl4.saddr; + + /* add tunneling protocol header */ + err = mlx5e_gen_ip_tunnel_header((char *)ip + sizeof(struct iphdr), + &ip->protocol, e); + if (err) + goto destroy_neigh_entry; + + e->encap_size = ipv4_encap_size; + e->encap_header = encap_header; + + if (!(nud_state & NUD_VALID)) { + neigh_event_send(n, NULL); + err = -EAGAIN; + goto out; + } + + err = mlx5_packet_reformat_alloc(priv->mdev, + e->reformat_type, + ipv4_encap_size, encap_header, + MLX5_FLOW_NAMESPACE_FDB, + &e->encap_id); + if (err) + goto destroy_neigh_entry; + + e->flags |= MLX5_ENCAP_ENTRY_VALID; + mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev)); + neigh_release(n); + return err; + +destroy_neigh_entry: + mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); +free_encap: + kfree(encap_header); +out: + if (n) + neigh_release(n); + return err; +} + +int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, + struct net_device *mirred_dev, + struct mlx5e_encap_entry *e) +{ + int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); + struct ip_tunnel_key *tun_key = &e->tun_info.key; + struct net_device *out_dev, *route_dev; + struct neighbour *n = NULL; + struct flowi6 fl6 = {}; + struct ipv6hdr *ip6h; + int ipv6_encap_size; + char *encap_header; + u8 nud_state, ttl; + int err; + + ttl = tun_key->ttl; + + fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label); + fl6.daddr = tun_key->u.ipv6.dst; + fl6.saddr = tun_key->u.ipv6.src; + + err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev, &route_dev, + &fl6, &n, &ttl); + if (err) + return err; + + ipv6_encap_size = + (is_vlan_dev(route_dev) ? VLAN_ETH_HLEN : ETH_HLEN) + + sizeof(struct ipv6hdr) + + e->tunnel_hlen; + + if (max_encap_size < ipv6_encap_size) { + mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n", + ipv6_encap_size, max_encap_size); + return -EOPNOTSUPP; + } + + encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL); + if (!encap_header) + return -ENOMEM; + + /* used by mlx5e_detach_encap to lookup a neigh hash table + * entry in the neigh hash table when a user deletes a rule + */ + e->m_neigh.dev = n->dev; + e->m_neigh.family = n->ops->family; + memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); + e->out_dev = out_dev; + + /* It's importent to add the neigh to the hash table before checking + * the neigh validity state. So if we'll get a notification, in case the + * neigh changes it's validity state, we would find the relevant neigh + * in the hash. + */ + err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e); + if (err) + goto free_encap; + + read_lock_bh(&n->lock); + nud_state = n->nud_state; + ether_addr_copy(e->h_dest, n->ha); + read_unlock_bh(&n->lock); + + /* add ethernet header */ + ip6h = (struct ipv6hdr *)gen_eth_tnl_hdr(encap_header, route_dev, e, + ETH_P_IPV6); + + /* add ip header */ + ip6_flow_hdr(ip6h, tun_key->tos, 0); + /* the HW fills up ipv6 payload len */ + ip6h->hop_limit = ttl; + ip6h->daddr = fl6.daddr; + ip6h->saddr = fl6.saddr; + + /* add tunneling protocol header */ + err = mlx5e_gen_ip_tunnel_header((char *)ip6h + sizeof(struct ipv6hdr), + &ip6h->nexthdr, e); + if (err) + goto destroy_neigh_entry; + + e->encap_size = ipv6_encap_size; + e->encap_header = encap_header; + + if (!(nud_state & NUD_VALID)) { + neigh_event_send(n, NULL); + err = -EAGAIN; + goto out; + } + + err = mlx5_packet_reformat_alloc(priv->mdev, + e->reformat_type, + ipv6_encap_size, encap_header, + MLX5_FLOW_NAMESPACE_FDB, + &e->encap_id); + if (err) + goto destroy_neigh_entry; + + e->flags |= MLX5_ENCAP_ENTRY_VALID; + mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev)); + neigh_release(n); + return err; + +destroy_neigh_entry: + mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); +free_encap: + kfree(encap_header); +out: + if (n) + neigh_release(n); + return err; +} + +int mlx5e_tc_tun_get_type(struct net_device *tunnel_dev) +{ + if (netif_is_vxlan(tunnel_dev)) + return MLX5E_TC_TUNNEL_TYPE_VXLAN; + else if (netif_is_gretap(tunnel_dev) || + netif_is_ip6gretap(tunnel_dev)) + return MLX5E_TC_TUNNEL_TYPE_GRETAP; + else + return MLX5E_TC_TUNNEL_TYPE_UNKNOWN; +} + +bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv, + struct net_device *netdev) +{ + int tunnel_type = mlx5e_tc_tun_get_type(netdev); + + if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN && + MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) + return true; + else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP && + MLX5_CAP_ESW(priv->mdev, nvgre_encap_decap)) + return true; + else + return false; +} + +int mlx5e_tc_tun_init_encap_attr(struct net_device *tunnel_dev, + struct mlx5e_priv *priv, + struct mlx5e_encap_entry *e, + struct netlink_ext_ack *extack) +{ + e->tunnel_type = mlx5e_tc_tun_get_type(tunnel_dev); + + if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) { + int dst_port = be16_to_cpu(e->tun_info.key.tp_dst); + + if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, dst_port)) { + NL_SET_ERR_MSG_MOD(extack, + "vxlan udp dport was not registered with the HW"); + netdev_warn(priv->netdev, + "%d isn't an offloaded vxlan udp dport\n", + dst_port); + return -EOPNOTSUPP; + } + e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN; + e->tunnel_hlen = VXLAN_HLEN; + } else if (e->tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) { + e->reformat_type = MLX5_REFORMAT_TYPE_L2_TO_NVGRE; + e->tunnel_hlen = gre_calc_hlen(e->tun_info.key.tun_flags); + } else { + e->reformat_type = -1; + e->tunnel_hlen = -1; + return -EOPNOTSUPP; + } + return 0; +} + +static int mlx5e_tc_tun_parse_vxlan(struct mlx5e_priv *priv, + struct mlx5_flow_spec *spec, + struct tc_cls_flower_offload *f, + void *headers_c, + void *headers_v) +{ + struct netlink_ext_ack *extack = f->common.extack; + struct flow_dissector_key_ports *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ENC_PORTS, + f->key); + struct flow_dissector_key_ports *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ENC_PORTS, + f->mask); + void *misc_c = MLX5_ADDR_OF(fte_match_param, + spec->match_criteria, + misc_parameters); + void *misc_v = MLX5_ADDR_OF(fte_match_param, + spec->match_value, + misc_parameters); + + /* Full udp dst port must be given */ + if (!dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS) || + memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) { + NL_SET_ERR_MSG_MOD(extack, + "VXLAN decap filter must include enc_dst_port condition"); + netdev_warn(priv->netdev, + "VXLAN decap filter must include enc_dst_port condition\n"); + return -EOPNOTSUPP; + } + + /* udp dst port must be knonwn as a VXLAN port */ + if (!mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->dst))) { + NL_SET_ERR_MSG_MOD(extack, + "Matched UDP port is not registered as a VXLAN port"); + netdev_warn(priv->netdev, + "UDP port %d is not registered as a VXLAN port\n", + be16_to_cpu(key->dst)); + return -EOPNOTSUPP; + } + + /* dst UDP port is valid here */ + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP); + + MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_dport, ntohs(mask->dst)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_dport, ntohs(key->dst)); + + MLX5_SET(fte_match_set_lyr_2_4, headers_c, udp_sport, ntohs(mask->src)); + MLX5_SET(fte_match_set_lyr_2_4, headers_v, udp_sport, ntohs(key->src)); + + /* match on VNI */ + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { + struct flow_dissector_key_keyid *key = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ENC_KEYID, + f->key); + struct flow_dissector_key_keyid *mask = + skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ENC_KEYID, + f->mask); + MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni, + be32_to_cpu(mask->keyid)); + MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni, + be32_to_cpu(key->keyid)); + } + return 0; +} + +static int mlx5e_tc_tun_parse_gretap(struct mlx5e_priv *priv, + struct mlx5_flow_spec *spec, + struct tc_cls_flower_offload *f, + void *outer_headers_c, + void *outer_headers_v) +{ + void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + misc_parameters); + void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, + misc_parameters); + + if (!MLX5_CAP_ESW(priv->mdev, nvgre_encap_decap)) { + NL_SET_ERR_MSG_MOD(f->common.extack, + "GRE HW offloading is not supported"); + netdev_warn(priv->netdev, "GRE HW offloading is not supported\n"); + return -EOPNOTSUPP; + } + + MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol); + MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, + ip_protocol, IPPROTO_GRE); + + /* gre protocol*/ + MLX5_SET_TO_ONES(fte_match_set_misc, misc_c, gre_protocol); + MLX5_SET(fte_match_set_misc, misc_v, gre_protocol, ETH_P_TEB); + + /* gre key */ + if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { + struct flow_dissector_key_keyid *mask = NULL; + struct flow_dissector_key_keyid *key = NULL; + + mask = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ENC_KEYID, + f->mask); + MLX5_SET(fte_match_set_misc, misc_c, + gre_key.key, be32_to_cpu(mask->keyid)); + + key = skb_flow_dissector_target(f->dissector, + FLOW_DISSECTOR_KEY_ENC_KEYID, + f->key); + MLX5_SET(fte_match_set_misc, misc_v, + gre_key.key, be32_to_cpu(key->keyid)); + } + + return 0; +} + +int mlx5e_tc_tun_parse(struct net_device *filter_dev, + struct mlx5e_priv *priv, + struct mlx5_flow_spec *spec, + struct tc_cls_flower_offload *f, + void *headers_c, + void *headers_v) +{ + int tunnel_type; + int err = 0; + + tunnel_type = mlx5e_tc_tun_get_type(filter_dev); + if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_VXLAN) { + err = mlx5e_tc_tun_parse_vxlan(priv, spec, f, + headers_c, headers_v); + } else if (tunnel_type == MLX5E_TC_TUNNEL_TYPE_GRETAP) { + err = mlx5e_tc_tun_parse_gretap(priv, spec, f, + headers_c, headers_v); + } else { + netdev_warn(priv->netdev, + "decapsulation offload is not supported for %s net device (%d)\n", + mlx5e_netdev_kind(filter_dev), tunnel_type); + return -EOPNOTSUPP; + } + return err; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h new file mode 100644 index 000000000000..706ce7bf15e7 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_tun.h @@ -0,0 +1,44 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2018 Mellanox Technologies. */ + +#ifndef __MLX5_EN_TC_TUNNEL_H__ +#define __MLX5_EN_TC_TUNNEL_H__ + +#include <linux/netdevice.h> +#include <linux/mlx5/fs.h> +#include <net/pkt_cls.h> +#include <linux/netlink.h> +#include "en.h" +#include "en_rep.h" + +enum { + MLX5E_TC_TUNNEL_TYPE_UNKNOWN, + MLX5E_TC_TUNNEL_TYPE_VXLAN, + MLX5E_TC_TUNNEL_TYPE_GRETAP +}; + +int mlx5e_tc_tun_init_encap_attr(struct net_device *tunnel_dev, + struct mlx5e_priv *priv, + struct mlx5e_encap_entry *e, + struct netlink_ext_ack *extack); + +int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv, + struct net_device *mirred_dev, + struct mlx5e_encap_entry *e); + +int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv, + struct net_device *mirred_dev, + struct mlx5e_encap_entry *e); + +int mlx5e_tc_tun_get_type(struct net_device *tunnel_dev); +bool mlx5e_tc_tun_device_to_offload(struct mlx5e_priv *priv, + struct net_device *netdev); + +int mlx5e_tc_tun_parse(struct net_device *filter_dev, + struct mlx5e_priv *priv, + struct mlx5_flow_spec *spec, + struct tc_cls_flower_offload *f, + void *headers_c, + void *headers_v); + +#endif //__MLX5_EN_TC_TUNNEL_H__ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index ad6d471d00dd..3740177eed09 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -47,7 +47,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_dma_info *di, xdpi.xdpf->len, PCI_DMA_TODEVICE); xdpi.di = *di; - return mlx5e_xmit_xdp_frame(sq, &xdpi); + return sq->xmit_xdp_frame(sq, &xdpi); } /* returns true if packet was consumed by xdp */ @@ -102,7 +102,98 @@ xdp_abort: } } -bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi) +static void mlx5e_xdp_mpwqe_session_start(struct mlx5e_xdpsq *sq) +{ + struct mlx5e_xdp_mpwqe *session = &sq->mpwqe; + struct mlx5_wq_cyc *wq = &sq->wq; + u8 wqebbs; + u16 pi; + + mlx5e_xdpsq_fetch_wqe(sq, &session->wqe); + + prefetchw(session->wqe->data); + session->ds_count = MLX5E_XDP_TX_EMPTY_DS_COUNT; + + pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); + +/* The mult of MLX5_SEND_WQE_MAX_WQEBBS * MLX5_SEND_WQEBB_NUM_DS + * (16 * 4 == 64) does not fit in the 6-bit DS field of Ctrl Segment. + * We use a bound lower that MLX5_SEND_WQE_MAX_WQEBBS to let a + * full-session WQE be cache-aligned. + */ +#if L1_CACHE_BYTES < 128 +#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 1) +#else +#define MLX5E_XDP_MPW_MAX_WQEBBS (MLX5_SEND_WQE_MAX_WQEBBS - 2) +#endif + + wqebbs = min_t(u16, mlx5_wq_cyc_get_contig_wqebbs(wq, pi), + MLX5E_XDP_MPW_MAX_WQEBBS); + + session->max_ds_count = MLX5_SEND_WQEBB_NUM_DS * wqebbs; +} + +static void mlx5e_xdp_mpwqe_complete(struct mlx5e_xdpsq *sq) +{ + struct mlx5_wq_cyc *wq = &sq->wq; + struct mlx5e_xdp_mpwqe *session = &sq->mpwqe; + struct mlx5_wqe_ctrl_seg *cseg = &session->wqe->ctrl; + u16 ds_count = session->ds_count; + u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); + struct mlx5e_xdp_wqe_info *wi = &sq->db.wqe_info[pi]; + + cseg->opmod_idx_opcode = + cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_ENHANCED_MPSW); + cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_count); + + wi->num_wqebbs = DIV_ROUND_UP(ds_count, MLX5_SEND_WQEBB_NUM_DS); + wi->num_ds = ds_count - MLX5E_XDP_TX_EMPTY_DS_COUNT; + + sq->pc += wi->num_wqebbs; + + sq->doorbell_cseg = cseg; + + session->wqe = NULL; /* Close session */ +} + +static bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, + struct mlx5e_xdp_info *xdpi) +{ + struct mlx5e_xdp_mpwqe *session = &sq->mpwqe; + struct mlx5e_xdpsq_stats *stats = sq->stats; + + dma_addr_t dma_addr = xdpi->dma_addr; + struct xdp_frame *xdpf = xdpi->xdpf; + unsigned int dma_len = xdpf->len; + + if (unlikely(sq->hw_mtu < dma_len)) { + stats->err++; + return false; + } + + if (unlikely(!session->wqe)) { + if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, + MLX5_SEND_WQE_MAX_WQEBBS))) { + /* SQ is full, ring doorbell */ + mlx5e_xmit_xdp_doorbell(sq); + stats->full++; + return false; + } + + mlx5e_xdp_mpwqe_session_start(sq); + } + + mlx5e_xdp_mpwqe_add_dseg(sq, dma_addr, dma_len); + + if (unlikely(session->ds_count == session->max_ds_count)) + mlx5e_xdp_mpwqe_complete(sq); + + mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi); + stats->xmit++; + return true; +} + +static bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi) { struct mlx5_wq_cyc *wq = &sq->wq; u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); @@ -126,11 +217,8 @@ bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi) } if (unlikely(!mlx5e_wqc_has_room_for(wq, sq->cc, sq->pc, 1))) { - if (sq->doorbell) { - /* SQ is full, ring doorbell */ - mlx5e_xmit_xdp_doorbell(sq); - sq->doorbell = false; - } + /* SQ is full, ring doorbell */ + mlx5e_xmit_xdp_doorbell(sq); stats->full++; return false; } @@ -152,23 +240,20 @@ bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi) cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND); - /* move page to reference to sq responsibility, - * and mark so it's not put back in page-cache. - */ - sq->db.xdpi[pi] = *xdpi; sq->pc++; - sq->doorbell = true; + sq->doorbell_cseg = cseg; + mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi); stats->xmit++; return true; } -bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) +bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq) { + struct mlx5e_xdp_info_fifo *xdpi_fifo; struct mlx5e_xdpsq *sq; struct mlx5_cqe64 *cqe; - struct mlx5e_rq *rq; bool is_redirect; u16 sqcc; int i; @@ -182,8 +267,8 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) if (!cqe) return false; - is_redirect = test_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state); - rq = container_of(sq, struct mlx5e_rq, xdpsq); + is_redirect = !rq; + xdpi_fifo = &sq->db.xdpi_fifo; /* sq->cc must be updated only after mlx5_cqwq_update_db_record(), * otherwise a cq overrun may occur @@ -199,20 +284,33 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) wqe_counter = be16_to_cpu(cqe->wqe_counter); + if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) + netdev_WARN_ONCE(sq->channel->netdev, + "Bad OP in XDPSQ CQE: 0x%x\n", + get_cqe_opcode(cqe)); + do { - u16 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); - struct mlx5e_xdp_info *xdpi = &sq->db.xdpi[ci]; + struct mlx5e_xdp_wqe_info *wi; + u16 ci, j; last_wqe = (sqcc == wqe_counter); - sqcc++; - - if (is_redirect) { - xdp_return_frame(xdpi->xdpf); - dma_unmap_single(sq->pdev, xdpi->dma_addr, - xdpi->xdpf->len, DMA_TO_DEVICE); - } else { - /* Recycle RX page */ - mlx5e_page_release(rq, &xdpi->di, true); + ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc); + wi = &sq->db.wqe_info[ci]; + + sqcc += wi->num_wqebbs; + + for (j = 0; j < wi->num_ds; j++) { + struct mlx5e_xdp_info xdpi = + mlx5e_xdpi_fifo_pop(xdpi_fifo); + + if (is_redirect) { + xdp_return_frame(xdpi.xdpf); + dma_unmap_single(sq->pdev, xdpi.dma_addr, + xdpi.xdpf->len, DMA_TO_DEVICE); + } else { + /* Recycle RX page */ + mlx5e_page_release(rq, &xdpi.di, true); + } } } while (!last_wqe); } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); @@ -228,27 +326,32 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq) return (i == MLX5E_TX_CQ_POLL_BUDGET); } -void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq) +void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq) { - struct mlx5e_rq *rq; - bool is_redirect; - - is_redirect = test_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state); - rq = is_redirect ? NULL : container_of(sq, struct mlx5e_rq, xdpsq); + struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo; + bool is_redirect = !rq; while (sq->cc != sq->pc) { - u16 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc); - struct mlx5e_xdp_info *xdpi = &sq->db.xdpi[ci]; - - sq->cc++; - - if (is_redirect) { - xdp_return_frame(xdpi->xdpf); - dma_unmap_single(sq->pdev, xdpi->dma_addr, - xdpi->xdpf->len, DMA_TO_DEVICE); - } else { - /* Recycle RX page */ - mlx5e_page_release(rq, &xdpi->di, false); + struct mlx5e_xdp_wqe_info *wi; + u16 ci, i; + + ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc); + wi = &sq->db.wqe_info[ci]; + + sq->cc += wi->num_wqebbs; + + for (i = 0; i < wi->num_ds; i++) { + struct mlx5e_xdp_info xdpi = + mlx5e_xdpi_fifo_pop(xdpi_fifo); + + if (is_redirect) { + xdp_return_frame(xdpi.xdpf); + dma_unmap_single(sq->pdev, xdpi.dma_addr, + xdpi.xdpf->len, DMA_TO_DEVICE); + } else { + /* Recycle RX page */ + mlx5e_page_release(rq, &xdpi.di, false); + } } } } @@ -292,7 +395,7 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, xdpi.xdpf = xdpf; - if (unlikely(!mlx5e_xmit_xdp_frame(sq, &xdpi))) { + if (unlikely(!sq->xmit_xdp_frame(sq, &xdpi))) { dma_unmap_single(sq->pdev, xdpi.dma_addr, xdpf->len, DMA_TO_DEVICE); xdp_return_frame_rx_napi(xdpf); @@ -300,8 +403,33 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, } } - if (flags & XDP_XMIT_FLUSH) + if (flags & XDP_XMIT_FLUSH) { + if (sq->mpwqe.wqe) + mlx5e_xdp_mpwqe_complete(sq); mlx5e_xmit_xdp_doorbell(sq); + } return n - drops; } + +void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq) +{ + struct mlx5e_xdpsq *xdpsq = &rq->xdpsq; + + if (xdpsq->mpwqe.wqe) + mlx5e_xdp_mpwqe_complete(xdpsq); + + mlx5e_xmit_xdp_doorbell(xdpsq); + + if (xdpsq->redirect_flush) { + xdp_do_flush_map(); + xdpsq->redirect_flush = false; + } +} + +void mlx5e_set_xmit_fp(struct mlx5e_xdpsq *sq, bool is_mpw) +{ + sq->xmit_xdp_frame = is_mpw ? + mlx5e_xmit_xdp_frame_mpwqe : mlx5e_xmit_xdp_frame; +} + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h index 6dfab045925f..3a67cb3cd179 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.h @@ -37,27 +37,62 @@ #define MLX5E_XDP_MAX_MTU ((int)(PAGE_SIZE - \ MLX5_SKB_FRAG_SZ(XDP_PACKET_HEADROOM))) #define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN) -#define MLX5E_XDP_TX_DS_COUNT \ - ((sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) + 1 /* SG DS */) +#define MLX5E_XDP_TX_EMPTY_DS_COUNT \ + (sizeof(struct mlx5e_tx_wqe) / MLX5_SEND_WQE_DS) +#define MLX5E_XDP_TX_DS_COUNT (MLX5E_XDP_TX_EMPTY_DS_COUNT + 1 /* SG DS */) bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, void *va, u16 *rx_headroom, u32 *len); -bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq); -void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq); - -bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi); +bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq); +void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq); +void mlx5e_set_xmit_fp(struct mlx5e_xdpsq *sq, bool is_mpw); +void mlx5e_xdp_rx_poll_complete(struct mlx5e_rq *rq); int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags); static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq) { + if (sq->doorbell_cseg) { + mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg); + sq->doorbell_cseg = NULL; + } +} + +static inline void +mlx5e_xdp_mpwqe_add_dseg(struct mlx5e_xdpsq *sq, dma_addr_t dma_addr, u16 dma_len) +{ + struct mlx5e_xdp_mpwqe *session = &sq->mpwqe; + struct mlx5_wqe_data_seg *dseg = + (struct mlx5_wqe_data_seg *)session->wqe + session->ds_count++; + + dseg->addr = cpu_to_be64(dma_addr); + dseg->byte_count = cpu_to_be32(dma_len); + dseg->lkey = sq->mkey_be; +} + +static inline void mlx5e_xdpsq_fetch_wqe(struct mlx5e_xdpsq *sq, + struct mlx5e_tx_wqe **wqe) +{ struct mlx5_wq_cyc *wq = &sq->wq; - struct mlx5e_tx_wqe *wqe; - u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc - 1); /* last pi */ + u16 pi = mlx5_wq_cyc_ctr2ix(wq, sq->pc); + + *wqe = mlx5_wq_cyc_get_wqe(wq, pi); + memset(*wqe, 0, sizeof(**wqe)); +} - wqe = mlx5_wq_cyc_get_wqe(wq, pi); +static inline void +mlx5e_xdpi_fifo_push(struct mlx5e_xdp_info_fifo *fifo, + struct mlx5e_xdp_info *xi) +{ + u32 i = (*fifo->pc)++ & fifo->mask; - mlx5e_notify_hw(wq, sq->pc, sq->uar_map, &wqe->ctrl); + fifo->xi[i] = *xi; +} + +static inline struct mlx5e_xdp_info +mlx5e_xdpi_fifo_pop(struct mlx5e_xdp_info_fifo *fifo) +{ + return fifo->xi[(*fifo->cc)++ & fifo->mask]; } #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c index 128a82b1dbfc..53608afd39b6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c @@ -254,11 +254,13 @@ struct sk_buff *mlx5e_ipsec_handle_tx_skb(struct net_device *netdev, struct mlx5e_ipsec_metadata *mdata; struct mlx5e_ipsec_sa_entry *sa_entry; struct xfrm_state *x; + struct sec_path *sp; if (!xo) return skb; - if (unlikely(skb->sp->len != 1)) { + sp = skb_sec_path(skb); + if (unlikely(sp->len != 1)) { atomic64_inc(&priv->ipsec->sw_stats.ipsec_tx_drop_bundle); goto drop; } @@ -305,10 +307,11 @@ mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb, struct mlx5e_priv *priv = netdev_priv(netdev); struct xfrm_offload *xo; struct xfrm_state *xs; + struct sec_path *sp; u32 sa_handle; - skb->sp = secpath_dup(skb->sp); - if (unlikely(!skb->sp)) { + sp = secpath_set(skb); + if (unlikely(!sp)) { atomic64_inc(&priv->ipsec->sw_stats.ipsec_rx_drop_sp_alloc); return NULL; } @@ -320,8 +323,9 @@ mlx5e_ipsec_build_sp(struct net_device *netdev, struct sk_buff *skb, return NULL; } - skb->sp->xvec[skb->sp->len++] = xs; - skb->sp->olen++; + sp = skb_sec_path(skb); + sp->xvec[sp->len++] = xs; + sp->olen++; xo = xfrm_offload(skb); xo->flags = CRYPTO_DONE; @@ -372,10 +376,11 @@ struct sk_buff *mlx5e_ipsec_handle_rx_skb(struct net_device *netdev, bool mlx5e_ipsec_feature_check(struct sk_buff *skb, struct net_device *netdev, netdev_features_t features) { + struct sec_path *sp = skb_sec_path(skb); struct xfrm_state *x; - if (skb->sp && skb->sp->len) { - x = skb->sp->xvec[0]; + if (sp && sp->len) { + x = sp->xvec[0]; if (x && x->xso.offload_handle) return true; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index f480763dcd0d..c9df08133718 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -135,14 +135,15 @@ void mlx5e_build_ptys2ethtool_map(void) ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT); } -static const char mlx5e_priv_flags[][ETH_GSTRING_LEN] = { - "rx_cqe_moder", - "tx_cqe_moder", - "rx_cqe_compress", - "rx_striding_rq", - "rx_no_csum_complete", +typedef int (*mlx5e_pflag_handler)(struct net_device *netdev, bool enable); + +struct pflag_desc { + char name[ETH_GSTRING_LEN]; + mlx5e_pflag_handler handler; }; +static const struct pflag_desc mlx5e_priv_flags[MLX5E_NUM_PFLAGS]; + int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset) { int i, num_stats = 0; @@ -153,7 +154,7 @@ int mlx5e_ethtool_get_sset_count(struct mlx5e_priv *priv, int sset) num_stats += mlx5e_stats_grps[i].get_num_stats(priv); return num_stats; case ETH_SS_PRIV_FLAGS: - return ARRAY_SIZE(mlx5e_priv_flags); + return MLX5E_NUM_PFLAGS; case ETH_SS_TEST: return mlx5e_self_test_num(priv); /* fallthrough */ @@ -183,8 +184,9 @@ void mlx5e_ethtool_get_strings(struct mlx5e_priv *priv, u32 stringset, u8 *data) switch (stringset) { case ETH_SS_PRIV_FLAGS: - for (i = 0; i < ARRAY_SIZE(mlx5e_priv_flags); i++) - strcpy(data + i * ETH_GSTRING_LEN, mlx5e_priv_flags[i]); + for (i = 0; i < MLX5E_NUM_PFLAGS; i++) + strcpy(data + i * ETH_GSTRING_LEN, + mlx5e_priv_flags[i].name); break; case ETH_SS_TEST: @@ -353,7 +355,7 @@ int mlx5e_ethtool_set_channels(struct mlx5e_priv *priv, new_channels.params = priv->channels.params; new_channels.params.num_channels = count; if (!netif_is_rxfh_configured(priv->netdev)) - mlx5e_build_default_indir_rqt(new_channels.params.indirection_rqt, + mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt, MLX5E_INDIR_RQT_SIZE, count); if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { @@ -785,10 +787,9 @@ static void get_lp_advertising(u32 eth_proto_lp, ptys2ethtool_adver_link(lp_advertising, eth_proto_lp); } -static int mlx5e_get_link_ksettings(struct net_device *netdev, - struct ethtool_link_ksettings *link_ksettings) +int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv, + struct ethtool_link_ksettings *link_ksettings) { - struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0}; u32 rx_pause = 0; @@ -804,7 +805,7 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev, err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, 1); if (err) { - netdev_err(netdev, "%s: query port ptys failed: %d\n", + netdev_err(priv->netdev, "%s: query port ptys failed: %d\n", __func__, err); goto err_query_regs; } @@ -824,7 +825,7 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev, get_supported(eth_proto_cap, link_ksettings); get_advertising(eth_proto_admin, tx_pause, rx_pause, link_ksettings); - get_speed_duplex(netdev, eth_proto_oper, link_ksettings); + get_speed_duplex(priv->netdev, eth_proto_oper, link_ksettings); eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap; @@ -844,7 +845,7 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev, Autoneg); if (get_fec_supported_advertised(mdev, link_ksettings)) - netdev_dbg(netdev, "%s: FEC caps query failed: %d\n", + netdev_dbg(priv->netdev, "%s: FEC caps query failed: %d\n", __func__, err); if (!an_disable_admin) @@ -855,6 +856,14 @@ err_query_regs: return err; } +static int mlx5e_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *link_ksettings) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings); +} + static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes) { u32 i, ptys_modes = 0; @@ -869,10 +878,9 @@ static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes) return ptys_modes; } -static int mlx5e_set_link_ksettings(struct net_device *netdev, - const struct ethtool_link_ksettings *link_ksettings) +int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, + const struct ethtool_link_ksettings *link_ksettings) { - struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; u32 eth_proto_cap, eth_proto_admin; bool an_changes = false; @@ -892,14 +900,14 @@ static int mlx5e_set_link_ksettings(struct net_device *netdev, err = mlx5_query_port_proto_cap(mdev, ð_proto_cap, MLX5_PTYS_EN); if (err) { - netdev_err(netdev, "%s: query port eth proto cap failed: %d\n", + netdev_err(priv->netdev, "%s: query port eth proto cap failed: %d\n", __func__, err); goto out; } link_modes = link_modes & eth_proto_cap; if (!link_modes) { - netdev_err(netdev, "%s: Not supported link mode(s) requested", + netdev_err(priv->netdev, "%s: Not supported link mode(s) requested", __func__); err = -EINVAL; goto out; @@ -907,7 +915,7 @@ static int mlx5e_set_link_ksettings(struct net_device *netdev, err = mlx5_query_port_proto_admin(mdev, ð_proto_admin, MLX5_PTYS_EN); if (err) { - netdev_err(netdev, "%s: query port eth proto admin failed: %d\n", + netdev_err(priv->netdev, "%s: query port eth proto admin failed: %d\n", __func__, err); goto out; } @@ -929,9 +937,17 @@ out: return err; } +static int mlx5e_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *link_ksettings) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + return mlx5e_ethtool_set_link_ksettings(priv, link_ksettings); +} + u32 mlx5e_ethtool_get_rxfh_key_size(struct mlx5e_priv *priv) { - return sizeof(priv->channels.params.toeplitz_hash_key); + return sizeof(priv->rss_params.toeplitz_hash_key); } static u32 mlx5e_get_rxfh_key_size(struct net_device *netdev) @@ -957,50 +973,27 @@ static int mlx5e_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) { struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_rss_params *rss = &priv->rss_params; if (indir) - memcpy(indir, priv->channels.params.indirection_rqt, - sizeof(priv->channels.params.indirection_rqt)); + memcpy(indir, rss->indirection_rqt, + sizeof(rss->indirection_rqt)); if (key) - memcpy(key, priv->channels.params.toeplitz_hash_key, - sizeof(priv->channels.params.toeplitz_hash_key)); + memcpy(key, rss->toeplitz_hash_key, + sizeof(rss->toeplitz_hash_key)); if (hfunc) - *hfunc = priv->channels.params.rss_hfunc; + *hfunc = rss->hfunc; return 0; } -static void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen) -{ - void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx); - struct mlx5_core_dev *mdev = priv->mdev; - int ctxlen = MLX5_ST_SZ_BYTES(tirc); - int tt; - - MLX5_SET(modify_tir_in, in, bitmask.hash, 1); - - for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { - memset(tirc, 0, ctxlen); - mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false); - mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen); - } - - if (!mlx5e_tunnel_inner_ft_supported(priv->mdev)) - return; - - for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { - memset(tirc, 0, ctxlen); - mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, true); - mlx5_core_modify_tir(mdev, priv->inner_indir_tir[tt].tirn, in, inlen); - } -} - static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, const u8 *key, const u8 hfunc) { struct mlx5e_priv *priv = netdev_priv(dev); + struct mlx5e_rss_params *rss = &priv->rss_params; int inlen = MLX5_ST_SZ_BYTES(modify_tir_in); bool hash_changed = false; void *in; @@ -1016,15 +1009,14 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, mutex_lock(&priv->state_lock); - if (hfunc != ETH_RSS_HASH_NO_CHANGE && - hfunc != priv->channels.params.rss_hfunc) { - priv->channels.params.rss_hfunc = hfunc; + if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != rss->hfunc) { + rss->hfunc = hfunc; hash_changed = true; } if (indir) { - memcpy(priv->channels.params.indirection_rqt, indir, - sizeof(priv->channels.params.indirection_rqt)); + memcpy(rss->indirection_rqt, indir, + sizeof(rss->indirection_rqt)); if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { u32 rqtn = priv->indir_rqt.rqtn; @@ -1032,7 +1024,7 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, .is_rss = true, { .rss = { - .hfunc = priv->channels.params.rss_hfunc, + .hfunc = rss->hfunc, .channels = &priv->channels, }, }, @@ -1043,10 +1035,9 @@ static int mlx5e_set_rxfh(struct net_device *dev, const u32 *indir, } if (key) { - memcpy(priv->channels.params.toeplitz_hash_key, key, - sizeof(priv->channels.params.toeplitz_hash_key)); - hash_changed = hash_changed || - priv->channels.params.rss_hfunc == ETH_RSS_HASH_TOP; + memcpy(rss->toeplitz_hash_key, key, + sizeof(rss->toeplitz_hash_key)); + hash_changed = hash_changed || rss->hfunc == ETH_RSS_HASH_TOP; } if (hash_changed) @@ -1150,25 +1141,31 @@ static int mlx5e_set_tunable(struct net_device *dev, return err; } -static void mlx5e_get_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pauseparam) +void mlx5e_ethtool_get_pauseparam(struct mlx5e_priv *priv, + struct ethtool_pauseparam *pauseparam) { - struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; int err; err = mlx5_query_port_pause(mdev, &pauseparam->rx_pause, &pauseparam->tx_pause); if (err) { - netdev_err(netdev, "%s: mlx5_query_port_pause failed:0x%x\n", + netdev_err(priv->netdev, "%s: mlx5_query_port_pause failed:0x%x\n", __func__, err); } } -static int mlx5e_set_pauseparam(struct net_device *netdev, - struct ethtool_pauseparam *pauseparam) +static void mlx5e_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pauseparam) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + mlx5e_ethtool_get_pauseparam(priv, pauseparam); +} + +int mlx5e_ethtool_set_pauseparam(struct mlx5e_priv *priv, + struct ethtool_pauseparam *pauseparam) { - struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; int err; @@ -1179,13 +1176,21 @@ static int mlx5e_set_pauseparam(struct net_device *netdev, pauseparam->rx_pause ? 1 : 0, pauseparam->tx_pause ? 1 : 0); if (err) { - netdev_err(netdev, "%s: mlx5_set_port_pause failed:0x%x\n", + netdev_err(priv->netdev, "%s: mlx5_set_port_pause failed:0x%x\n", __func__, err); } return err; } +static int mlx5e_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pauseparam) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + return mlx5e_ethtool_set_pauseparam(priv, pauseparam); +} + int mlx5e_ethtool_get_ts_info(struct mlx5e_priv *priv, struct ethtool_ts_info *info) { @@ -1505,8 +1510,6 @@ static int mlx5e_get_module_eeprom(struct net_device *netdev, return 0; } -typedef int (*mlx5e_pflag_handler)(struct net_device *netdev, bool enable); - static int set_pflag_cqe_based_moder(struct net_device *netdev, bool enable, bool is_rx_cq) { @@ -1669,23 +1672,58 @@ static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable) return 0; } +static int set_pflag_xdp_tx_mpwqe(struct net_device *netdev, bool enable) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_channels new_channels = {}; + int err; + + if (enable && !MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe)) + return -EOPNOTSUPP; + + new_channels.params = priv->channels.params; + + MLX5E_SET_PFLAG(&new_channels.params, MLX5E_PFLAG_XDP_TX_MPWQE, enable); + + if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) { + priv->channels.params = new_channels.params; + return 0; + } + + err = mlx5e_open_channels(priv, &new_channels); + if (err) + return err; + + mlx5e_switch_priv_channels(priv, &new_channels, NULL); + return 0; +} + +static const struct pflag_desc mlx5e_priv_flags[MLX5E_NUM_PFLAGS] = { + { "rx_cqe_moder", set_pflag_rx_cqe_based_moder }, + { "tx_cqe_moder", set_pflag_tx_cqe_based_moder }, + { "rx_cqe_compress", set_pflag_rx_cqe_compress }, + { "rx_striding_rq", set_pflag_rx_striding_rq }, + { "rx_no_csum_complete", set_pflag_rx_no_csum_complete }, + { "xdp_tx_mpwqe", set_pflag_xdp_tx_mpwqe }, +}; + static int mlx5e_handle_pflag(struct net_device *netdev, u32 wanted_flags, - enum mlx5e_priv_flag flag, - mlx5e_pflag_handler pflag_handler) + enum mlx5e_priv_flag flag) { struct mlx5e_priv *priv = netdev_priv(netdev); - bool enable = !!(wanted_flags & flag); + bool enable = !!(wanted_flags & BIT(flag)); u32 changes = wanted_flags ^ priv->channels.params.pflags; int err; - if (!(changes & flag)) + if (!(changes & BIT(flag))) return 0; - err = pflag_handler(netdev, enable); + err = mlx5e_priv_flags[flag].handler(netdev, enable); if (err) { - netdev_err(netdev, "%s private flag 0x%x failed err %d\n", - enable ? "Enable" : "Disable", flag, err); + netdev_err(netdev, "%s private flag '%s' failed err %d\n", + enable ? "Enable" : "Disable", mlx5e_priv_flags[flag].name, err); return err; } @@ -1696,38 +1734,17 @@ static int mlx5e_handle_pflag(struct net_device *netdev, static int mlx5e_set_priv_flags(struct net_device *netdev, u32 pflags) { struct mlx5e_priv *priv = netdev_priv(netdev); + enum mlx5e_priv_flag pflag; int err; mutex_lock(&priv->state_lock); - err = mlx5e_handle_pflag(netdev, pflags, - MLX5E_PFLAG_RX_CQE_BASED_MODER, - set_pflag_rx_cqe_based_moder); - if (err) - goto out; - err = mlx5e_handle_pflag(netdev, pflags, - MLX5E_PFLAG_TX_CQE_BASED_MODER, - set_pflag_tx_cqe_based_moder); - if (err) - goto out; - - err = mlx5e_handle_pflag(netdev, pflags, - MLX5E_PFLAG_RX_CQE_COMPRESS, - set_pflag_rx_cqe_compress); - if (err) - goto out; - - err = mlx5e_handle_pflag(netdev, pflags, - MLX5E_PFLAG_RX_STRIDING_RQ, - set_pflag_rx_striding_rq); - if (err) - goto out; - - err = mlx5e_handle_pflag(netdev, pflags, - MLX5E_PFLAG_RX_NO_CSUM_COMPLETE, - set_pflag_rx_no_csum_complete); + for (pflag = 0; pflag < MLX5E_NUM_PFLAGS; pflag++) { + err = mlx5e_handle_pflag(netdev, pflags, pflag); + if (err) + break; + } -out: mutex_unlock(&priv->state_lock); /* Need to fix some features.. */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index c18dcebe1462..4421c10f58ae 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c @@ -771,6 +771,112 @@ void mlx5e_ethtool_init_steering(struct mlx5e_priv *priv) INIT_LIST_HEAD(&priv->fs.ethtool.rules); } +static enum mlx5e_traffic_types flow_type_to_traffic_type(u32 flow_type) +{ + switch (flow_type) { + case TCP_V4_FLOW: + return MLX5E_TT_IPV4_TCP; + case TCP_V6_FLOW: + return MLX5E_TT_IPV6_TCP; + case UDP_V4_FLOW: + return MLX5E_TT_IPV4_UDP; + case UDP_V6_FLOW: + return MLX5E_TT_IPV6_UDP; + case AH_V4_FLOW: + return MLX5E_TT_IPV4_IPSEC_AH; + case AH_V6_FLOW: + return MLX5E_TT_IPV6_IPSEC_AH; + case ESP_V4_FLOW: + return MLX5E_TT_IPV4_IPSEC_ESP; + case ESP_V6_FLOW: + return MLX5E_TT_IPV6_IPSEC_ESP; + case IPV4_FLOW: + return MLX5E_TT_IPV4; + case IPV6_FLOW: + return MLX5E_TT_IPV6; + default: + return MLX5E_NUM_INDIR_TIRS; + } +} + +static int mlx5e_set_rss_hash_opt(struct mlx5e_priv *priv, + struct ethtool_rxnfc *nfc) +{ + int inlen = MLX5_ST_SZ_BYTES(modify_tir_in); + enum mlx5e_traffic_types tt; + u8 rx_hash_field = 0; + void *in; + + tt = flow_type_to_traffic_type(nfc->flow_type); + if (tt == MLX5E_NUM_INDIR_TIRS) + return -EINVAL; + + /* RSS does not support anything other than hashing to queues + * on src IP, dest IP, TCP/UDP src port and TCP/UDP dest + * port. + */ + if (nfc->flow_type != TCP_V4_FLOW && + nfc->flow_type != TCP_V6_FLOW && + nfc->flow_type != UDP_V4_FLOW && + nfc->flow_type != UDP_V6_FLOW) + return -EOPNOTSUPP; + + if (nfc->data & ~(RXH_IP_SRC | RXH_IP_DST | + RXH_L4_B_0_1 | RXH_L4_B_2_3)) + return -EOPNOTSUPP; + + if (nfc->data & RXH_IP_SRC) + rx_hash_field |= MLX5_HASH_FIELD_SEL_SRC_IP; + if (nfc->data & RXH_IP_DST) + rx_hash_field |= MLX5_HASH_FIELD_SEL_DST_IP; + if (nfc->data & RXH_L4_B_0_1) + rx_hash_field |= MLX5_HASH_FIELD_SEL_L4_SPORT; + if (nfc->data & RXH_L4_B_2_3) + rx_hash_field |= MLX5_HASH_FIELD_SEL_L4_DPORT; + + in = kvzalloc(inlen, GFP_KERNEL); + if (!in) + return -ENOMEM; + + mutex_lock(&priv->state_lock); + + if (rx_hash_field == priv->rss_params.rx_hash_fields[tt]) + goto out; + + priv->rss_params.rx_hash_fields[tt] = rx_hash_field; + mlx5e_modify_tirs_hash(priv, in, inlen); + +out: + mutex_unlock(&priv->state_lock); + kvfree(in); + return 0; +} + +static int mlx5e_get_rss_hash_opt(struct mlx5e_priv *priv, + struct ethtool_rxnfc *nfc) +{ + enum mlx5e_traffic_types tt; + u32 hash_field = 0; + + tt = flow_type_to_traffic_type(nfc->flow_type); + if (tt == MLX5E_NUM_INDIR_TIRS) + return -EINVAL; + + hash_field = priv->rss_params.rx_hash_fields[tt]; + nfc->data = 0; + + if (hash_field & MLX5_HASH_FIELD_SEL_SRC_IP) + nfc->data |= RXH_IP_SRC; + if (hash_field & MLX5_HASH_FIELD_SEL_DST_IP) + nfc->data |= RXH_IP_DST; + if (hash_field & MLX5_HASH_FIELD_SEL_L4_SPORT) + nfc->data |= RXH_L4_B_0_1; + if (hash_field & MLX5_HASH_FIELD_SEL_L4_DPORT) + nfc->data |= RXH_L4_B_2_3; + + return 0; +} + int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) { int err = 0; @@ -783,6 +889,9 @@ int mlx5e_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) case ETHTOOL_SRXCLSRLDEL: err = mlx5e_ethtool_flow_remove(priv, cmd->fs.location); break; + case ETHTOOL_SRXFH: + err = mlx5e_set_rss_hash_opt(priv, cmd); + break; default: err = -EOPNOTSUPP; break; @@ -810,6 +919,9 @@ int mlx5e_get_rxnfc(struct net_device *dev, case ETHTOOL_GRXCLSRLALL: err = mlx5e_ethtool_get_all_flows(priv, info, rule_locs); break; + case ETHTOOL_GRXFH: + err = mlx5e_get_rss_hash_opt(priv, info); + break; default: err = -EOPNOTSUPP; break; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index b70cb6fd164c..8cfd2ec7c0a2 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -49,6 +49,8 @@ #include "lib/clock.h" #include "en/port.h" #include "en/xdp.h" +#include "lib/eq.h" +#include "en/monitor_stats.h" struct mlx5e_rq_param { u32 rqc[MLX5_ST_SZ_DW(rqc)]; @@ -59,6 +61,7 @@ struct mlx5e_rq_param { struct mlx5e_sq_param { u32 sqc[MLX5_ST_SZ_DW(sqc)]; struct mlx5_wq_param wq; + bool is_mpw; }; struct mlx5e_cq_param { @@ -228,7 +231,7 @@ void mlx5e_set_rq_type(struct mlx5_core_dev *mdev, struct mlx5e_params *params) MLX5_WQ_TYPE_CYCLIC; } -static void mlx5e_update_carrier(struct mlx5e_priv *priv) +void mlx5e_update_carrier(struct mlx5e_priv *priv) { struct mlx5_core_dev *mdev = priv->mdev; u8 port_state; @@ -267,7 +270,7 @@ void mlx5e_update_stats(struct mlx5e_priv *priv) mlx5e_stats_grps[i].update_stats(priv); } -static void mlx5e_update_ndo_stats(struct mlx5e_priv *priv) +void mlx5e_update_ndo_stats(struct mlx5e_priv *priv) { int i; @@ -298,33 +301,35 @@ void mlx5e_queue_update_stats(struct mlx5e_priv *priv) queue_work(priv->wq, &priv->update_stats_work); } -static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv, - enum mlx5_dev_event event, unsigned long param) +static int async_event(struct notifier_block *nb, unsigned long event, void *data) { - struct mlx5e_priv *priv = vpriv; + struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb); + struct mlx5_eqe *eqe = data; - if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state)) - return; + if (event != MLX5_EVENT_TYPE_PORT_CHANGE) + return NOTIFY_DONE; - switch (event) { - case MLX5_DEV_EVENT_PORT_UP: - case MLX5_DEV_EVENT_PORT_DOWN: + switch (eqe->sub_type) { + case MLX5_PORT_CHANGE_SUBTYPE_DOWN: + case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: queue_work(priv->wq, &priv->update_carrier_work); break; default: - break; + return NOTIFY_DONE; } + + return NOTIFY_OK; } static void mlx5e_enable_async_events(struct mlx5e_priv *priv) { - set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state); + priv->events_nb.notifier_call = async_event; + mlx5_notifier_register(priv->mdev, &priv->events_nb); } static void mlx5e_disable_async_events(struct mlx5e_priv *priv) { - clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state); - synchronize_irq(pci_irq_vector(priv->mdev->pdev, MLX5_EQ_VEC_ASYNC)); + mlx5_notifier_unregister(priv->mdev, &priv->events_nb); } static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq, @@ -988,18 +993,42 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq) static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq) { - kvfree(sq->db.xdpi); + kvfree(sq->db.xdpi_fifo.xi); + kvfree(sq->db.wqe_info); +} + +static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa) +{ + struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo; + int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); + int dsegs_per_wq = wq_sz * MLX5_SEND_WQEBB_NUM_DS; + + xdpi_fifo->xi = kvzalloc_node(sizeof(*xdpi_fifo->xi) * dsegs_per_wq, + GFP_KERNEL, numa); + if (!xdpi_fifo->xi) + return -ENOMEM; + + xdpi_fifo->pc = &sq->xdpi_fifo_pc; + xdpi_fifo->cc = &sq->xdpi_fifo_cc; + xdpi_fifo->mask = dsegs_per_wq - 1; + + return 0; } static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa) { int wq_sz = mlx5_wq_cyc_get_size(&sq->wq); + int err; - sq->db.xdpi = kvzalloc_node(array_size(wq_sz, sizeof(*sq->db.xdpi)), - GFP_KERNEL, numa); - if (!sq->db.xdpi) { - mlx5e_free_xdpsq_db(sq); + sq->db.wqe_info = kvzalloc_node(sizeof(*sq->db.wqe_info) * wq_sz, + GFP_KERNEL, numa); + if (!sq->db.wqe_info) return -ENOMEM; + + err = mlx5e_alloc_xdpsq_fifo(sq, numa); + if (err) { + mlx5e_free_xdpsq_db(sq); + return err; } return 0; @@ -1558,11 +1587,8 @@ static int mlx5e_open_xdpsq(struct mlx5e_channel *c, struct mlx5e_xdpsq *sq, bool is_redirect) { - unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT; struct mlx5e_create_sq_param csp = {}; - unsigned int inline_hdr_sz = 0; int err; - int i; err = mlx5e_alloc_xdpsq(c, params, param, sq, is_redirect); if (err) @@ -1573,30 +1599,40 @@ static int mlx5e_open_xdpsq(struct mlx5e_channel *c, csp.cqn = sq->cq.mcq.cqn; csp.wq_ctrl = &sq->wq_ctrl; csp.min_inline_mode = sq->min_inline_mode; - if (is_redirect) - set_bit(MLX5E_SQ_STATE_REDIRECT, &sq->state); set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn); if (err) goto err_free_xdpsq; - if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) { - inline_hdr_sz = MLX5E_XDP_MIN_INLINE; - ds_cnt++; - } + mlx5e_set_xmit_fp(sq, param->is_mpw); - /* Pre initialize fixed WQE fields */ - for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) { - struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i); - struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; - struct mlx5_wqe_eth_seg *eseg = &wqe->eth; - struct mlx5_wqe_data_seg *dseg; + if (!param->is_mpw) { + unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT; + unsigned int inline_hdr_sz = 0; + int i; - cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); - eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz); + if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) { + inline_hdr_sz = MLX5E_XDP_MIN_INLINE; + ds_cnt++; + } + + /* Pre initialize fixed WQE fields */ + for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) { + struct mlx5e_xdp_wqe_info *wi = &sq->db.wqe_info[i]; + struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i); + struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl; + struct mlx5_wqe_eth_seg *eseg = &wqe->eth; + struct mlx5_wqe_data_seg *dseg; + + cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt); + eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz); - dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1); - dseg->lkey = sq->mkey_be; + dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1); + dseg->lkey = sq->mkey_be; + + wi->num_wqebbs = 1; + wi->num_ds = 1; + } } return 0; @@ -1608,7 +1644,7 @@ err_free_xdpsq: return err; } -static void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq) +static void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq) { struct mlx5e_channel *c = sq->channel; @@ -1616,7 +1652,7 @@ static void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq) napi_synchronize(&c->napi); mlx5e_destroy_sq(c->mdev, sq->sqn); - mlx5e_free_xdpsq_descs(sq); + mlx5e_free_xdpsq_descs(sq, rq); mlx5e_free_xdpsq(sq); } @@ -1769,11 +1805,6 @@ static void mlx5e_close_cq(struct mlx5e_cq *cq) mlx5e_free_cq(cq); } -static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix) -{ - return cpumask_first(priv->mdev->priv.irq_info[ix].mask); -} - static int mlx5e_open_tx_cqs(struct mlx5e_channel *c, struct mlx5e_params *params, struct mlx5e_channel_param *cparam) @@ -1924,9 +1955,9 @@ static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix, struct mlx5e_channel_param *cparam, struct mlx5e_channel **cp) { + int cpu = cpumask_first(mlx5_comp_irq_get_affinity_mask(priv->mdev, ix)); struct net_dim_cq_moder icocq_moder = {0, 0}; struct net_device *netdev = priv->netdev; - int cpu = mlx5e_get_cpu(priv, ix); struct mlx5e_channel *c; unsigned int irq; int err; @@ -2009,7 +2040,7 @@ err_close_rq: err_close_xdp_sq: if (c->xdp) - mlx5e_close_xdpsq(&c->rq.xdpsq); + mlx5e_close_xdpsq(&c->rq.xdpsq, &c->rq); err_close_sqs: mlx5e_close_sqs(c); @@ -2062,10 +2093,10 @@ static void mlx5e_deactivate_channel(struct mlx5e_channel *c) static void mlx5e_close_channel(struct mlx5e_channel *c) { - mlx5e_close_xdpsq(&c->xdpsq); + mlx5e_close_xdpsq(&c->xdpsq, NULL); mlx5e_close_rq(&c->rq); if (c->xdp) - mlx5e_close_xdpsq(&c->rq.xdpsq); + mlx5e_close_xdpsq(&c->rq.xdpsq, &c->rq); mlx5e_close_sqs(c); mlx5e_close_icosq(&c->icosq); napi_disable(&c->napi); @@ -2232,6 +2263,8 @@ static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv, void *cqc = param->cqc; MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index); + if (MLX5_CAP_GEN(priv->mdev, cqe_128_always) && cache_line_size() >= 128) + MLX5_SET(cqc, cqc, cqe_sz, CQE_STRIDE_128_PAD); } static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv, @@ -2308,6 +2341,7 @@ static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv, mlx5e_build_sq_param_common(priv, param); MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size); + param->is_mpw = MLX5E_GET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE); } static void mlx5e_build_channel_param(struct mlx5e_priv *priv, @@ -2510,7 +2544,7 @@ static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, int sz, if (rrp.rss.hfunc == ETH_RSS_HASH_XOR) ix = mlx5e_bits_invert(i, ilog2(sz)); - ix = priv->channels.params.indirection_rqt[ix]; + ix = priv->rss_params.indirection_rqt[ix]; rqn = rrp.rss.channels->c[ix]->rq.rqn; } else { rqn = rrp.rqn; @@ -2593,7 +2627,7 @@ static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv, { .rss = { .channels = chs, - .hfunc = chs->params.rss_hfunc, + .hfunc = priv->rss_params.hfunc, } }, }; @@ -2613,6 +2647,54 @@ static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv) mlx5e_redirect_rqts(priv, drop_rrp); } +static const struct mlx5e_tirc_config tirc_default_config[MLX5E_NUM_INDIR_TIRS] = { + [MLX5E_TT_IPV4_TCP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, + .l4_prot_type = MLX5_L4_PROT_TYPE_TCP, + .rx_hash_fields = MLX5_HASH_IP_L4PORTS, + }, + [MLX5E_TT_IPV6_TCP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, + .l4_prot_type = MLX5_L4_PROT_TYPE_TCP, + .rx_hash_fields = MLX5_HASH_IP_L4PORTS, + }, + [MLX5E_TT_IPV4_UDP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, + .l4_prot_type = MLX5_L4_PROT_TYPE_UDP, + .rx_hash_fields = MLX5_HASH_IP_L4PORTS, + }, + [MLX5E_TT_IPV6_UDP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, + .l4_prot_type = MLX5_L4_PROT_TYPE_UDP, + .rx_hash_fields = MLX5_HASH_IP_L4PORTS, + }, + [MLX5E_TT_IPV4_IPSEC_AH] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, + .l4_prot_type = 0, + .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI, + }, + [MLX5E_TT_IPV6_IPSEC_AH] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, + .l4_prot_type = 0, + .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI, + }, + [MLX5E_TT_IPV4_IPSEC_ESP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, + .l4_prot_type = 0, + .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI, + }, + [MLX5E_TT_IPV6_IPSEC_ESP] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, + .l4_prot_type = 0, + .rx_hash_fields = MLX5_HASH_IP_IPSEC_SPI, + }, + [MLX5E_TT_IPV4] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV4, + .l4_prot_type = 0, + .rx_hash_fields = MLX5_HASH_IP, + }, + [MLX5E_TT_IPV6] = { .l3_prot_type = MLX5_L3_PROT_TYPE_IPV6, + .l4_prot_type = 0, + .rx_hash_fields = MLX5_HASH_IP, + }, +}; + +struct mlx5e_tirc_config mlx5e_tirc_get_default_config(enum mlx5e_traffic_types tt) +{ + return tirc_default_config[tt]; +} + static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc) { if (!params->lro_en) @@ -2628,116 +2710,68 @@ static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc) MLX5_SET(tirc, tirc, lro_timeout_period_usecs, params->lro_timeout); } -void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params, - enum mlx5e_traffic_types tt, +void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_rss_params *rss_params, + const struct mlx5e_tirc_config *ttconfig, void *tirc, bool inner) { void *hfso = inner ? MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner) : MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer); -#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\ - MLX5_HASH_FIELD_SEL_DST_IP) - -#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\ - MLX5_HASH_FIELD_SEL_DST_IP |\ - MLX5_HASH_FIELD_SEL_L4_SPORT |\ - MLX5_HASH_FIELD_SEL_L4_DPORT) - -#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\ - MLX5_HASH_FIELD_SEL_DST_IP |\ - MLX5_HASH_FIELD_SEL_IPSEC_SPI) - - MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_rx_hash_fn(params->rss_hfunc)); - if (params->rss_hfunc == ETH_RSS_HASH_TOP) { + MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_rx_hash_fn(rss_params->hfunc)); + if (rss_params->hfunc == ETH_RSS_HASH_TOP) { void *rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key); size_t len = MLX5_FLD_SZ_BYTES(tirc, rx_hash_toeplitz_key); MLX5_SET(tirc, tirc, rx_hash_symmetric, 1); - memcpy(rss_key, params->toeplitz_hash_key, len); + memcpy(rss_key, rss_params->toeplitz_hash_key, len); } + MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, + ttconfig->l3_prot_type); + MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, + ttconfig->l4_prot_type); + MLX5_SET(rx_hash_field_select, hfso, selected_fields, + ttconfig->rx_hash_fields); +} - switch (tt) { - case MLX5E_TT_IPV4_TCP: - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, - MLX5_L3_PROT_TYPE_IPV4); - MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, - MLX5_L4_PROT_TYPE_TCP); - MLX5_SET(rx_hash_field_select, hfso, selected_fields, - MLX5_HASH_IP_L4PORTS); - break; - - case MLX5E_TT_IPV6_TCP: - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, - MLX5_L3_PROT_TYPE_IPV6); - MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, - MLX5_L4_PROT_TYPE_TCP); - MLX5_SET(rx_hash_field_select, hfso, selected_fields, - MLX5_HASH_IP_L4PORTS); - break; - - case MLX5E_TT_IPV4_UDP: - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, - MLX5_L3_PROT_TYPE_IPV4); - MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, - MLX5_L4_PROT_TYPE_UDP); - MLX5_SET(rx_hash_field_select, hfso, selected_fields, - MLX5_HASH_IP_L4PORTS); - break; - - case MLX5E_TT_IPV6_UDP: - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, - MLX5_L3_PROT_TYPE_IPV6); - MLX5_SET(rx_hash_field_select, hfso, l4_prot_type, - MLX5_L4_PROT_TYPE_UDP); - MLX5_SET(rx_hash_field_select, hfso, selected_fields, - MLX5_HASH_IP_L4PORTS); - break; - - case MLX5E_TT_IPV4_IPSEC_AH: - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, - MLX5_L3_PROT_TYPE_IPV4); - MLX5_SET(rx_hash_field_select, hfso, selected_fields, - MLX5_HASH_IP_IPSEC_SPI); - break; +static void mlx5e_update_rx_hash_fields(struct mlx5e_tirc_config *ttconfig, + enum mlx5e_traffic_types tt, + u32 rx_hash_fields) +{ + *ttconfig = tirc_default_config[tt]; + ttconfig->rx_hash_fields = rx_hash_fields; +} - case MLX5E_TT_IPV6_IPSEC_AH: - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, - MLX5_L3_PROT_TYPE_IPV6); - MLX5_SET(rx_hash_field_select, hfso, selected_fields, - MLX5_HASH_IP_IPSEC_SPI); - break; +void mlx5e_modify_tirs_hash(struct mlx5e_priv *priv, void *in, int inlen) +{ + void *tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx); + struct mlx5e_rss_params *rss = &priv->rss_params; + struct mlx5_core_dev *mdev = priv->mdev; + int ctxlen = MLX5_ST_SZ_BYTES(tirc); + struct mlx5e_tirc_config ttconfig; + int tt; - case MLX5E_TT_IPV4_IPSEC_ESP: - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, - MLX5_L3_PROT_TYPE_IPV4); - MLX5_SET(rx_hash_field_select, hfso, selected_fields, - MLX5_HASH_IP_IPSEC_SPI); - break; + MLX5_SET(modify_tir_in, in, bitmask.hash, 1); - case MLX5E_TT_IPV6_IPSEC_ESP: - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, - MLX5_L3_PROT_TYPE_IPV6); - MLX5_SET(rx_hash_field_select, hfso, selected_fields, - MLX5_HASH_IP_IPSEC_SPI); - break; + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { + memset(tirc, 0, ctxlen); + mlx5e_update_rx_hash_fields(&ttconfig, tt, + rss->rx_hash_fields[tt]); + mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, false); + mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in, inlen); + } - case MLX5E_TT_IPV4: - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, - MLX5_L3_PROT_TYPE_IPV4); - MLX5_SET(rx_hash_field_select, hfso, selected_fields, - MLX5_HASH_IP); - break; + if (!mlx5e_tunnel_inner_ft_supported(priv->mdev)) + return; - case MLX5E_TT_IPV6: - MLX5_SET(rx_hash_field_select, hfso, l3_prot_type, - MLX5_L3_PROT_TYPE_IPV6); - MLX5_SET(rx_hash_field_select, hfso, selected_fields, - MLX5_HASH_IP); - break; - default: - WARN_ONCE(true, "%s: bad traffic type!\n", __func__); + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { + memset(tirc, 0, ctxlen); + mlx5e_update_rx_hash_fields(&ttconfig, tt, + rss->rx_hash_fields[tt]); + mlx5e_build_indir_tir_ctx_hash(rss, &ttconfig, tirc, true); + mlx5_core_modify_tir(mdev, priv->inner_indir_tir[tt].tirn, in, + inlen); } } @@ -2794,7 +2828,8 @@ static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv, MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn); MLX5_SET(tirc, tirc, tunneled_offload_en, 0x1); - mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, true); + mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, + &tirc_default_config[tt], tirc, true); } static int mlx5e_set_mtu(struct mlx5_core_dev *mdev, @@ -2825,7 +2860,7 @@ static void mlx5e_query_mtu(struct mlx5_core_dev *mdev, *mtu = MLX5E_HW2SW_MTU(params, hw_mtu); } -static int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv) +int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv) { struct mlx5e_params *params = &priv->channels.params; struct net_device *netdev = priv->netdev; @@ -2905,7 +2940,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv) mlx5e_activate_channels(&priv->channels); netif_tx_start_all_queues(priv->netdev); - if (MLX5_ESWITCH_MANAGER(priv->mdev)) + if (mlx5e_is_vport_rep(priv)) mlx5e_add_sqs_fwd_rules(priv); mlx5e_wait_channels_min_rx_wqes(&priv->channels); @@ -2916,7 +2951,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv) { mlx5e_redirect_rqts_to_drop(priv); - if (MLX5_ESWITCH_MANAGER(priv->mdev)) + if (mlx5e_is_vport_rep(priv)) mlx5e_remove_sqs_fwd_rules(priv); /* FIXME: This is a W/A only for tx timeout watch dog false alarm when @@ -3168,7 +3203,7 @@ err_close_tises: return err; } -void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv) +static void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv) { int tc; @@ -3186,7 +3221,9 @@ static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn); - mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false); + + mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, + &tirc_default_config[tt], tirc, false); } static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc) @@ -3391,11 +3428,14 @@ static int mlx5e_setup_tc_cls_flower(struct mlx5e_priv *priv, { switch (cls_flower->command) { case TC_CLSFLOWER_REPLACE: - return mlx5e_configure_flower(priv, cls_flower, flags); + return mlx5e_configure_flower(priv->netdev, priv, cls_flower, + flags); case TC_CLSFLOWER_DESTROY: - return mlx5e_delete_flower(priv, cls_flower, flags); + return mlx5e_delete_flower(priv->netdev, priv, cls_flower, + flags); case TC_CLSFLOWER_STATS: - return mlx5e_stats_flower(priv, cls_flower, flags); + return mlx5e_stats_flower(priv->netdev, priv, cls_flower, + flags); default: return -EOPNOTSUPP; } @@ -3408,7 +3448,8 @@ static int mlx5e_setup_tc_block_cb(enum tc_setup_type type, void *type_data, switch (type) { case TC_SETUP_CLSFLOWER: - return mlx5e_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS); + return mlx5e_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS | + MLX5E_TC_NIC_OFFLOAD); default: return -EOPNOTSUPP; } @@ -3451,7 +3492,7 @@ static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type, } } -static void +void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mlx5e_priv *priv = netdev_priv(dev); @@ -3459,8 +3500,10 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) struct mlx5e_vport_stats *vstats = &priv->stats.vport; struct mlx5e_pport_stats *pstats = &priv->stats.pport; - /* update HW stats in background for next time */ - mlx5e_queue_update_stats(priv); + if (!mlx5e_monitor_counter_supported(priv)) { + /* update HW stats in background for next time */ + mlx5e_queue_update_stats(priv); + } if (mlx5e_is_uplink_rep(priv)) { stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok); @@ -3593,7 +3636,7 @@ static int set_feature_tc_num_filters(struct net_device *netdev, bool enable) { struct mlx5e_priv *priv = netdev_priv(netdev); - if (!enable && mlx5e_tc_num_filters(priv)) { + if (!enable && mlx5e_tc_num_filters(priv, MLX5E_TC_NIC_OFFLOAD)) { netdev_err(netdev, "Active offloaded tc filters, can't turn hw_tc_offload off\n"); return -EINVAL; @@ -3895,7 +3938,7 @@ static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) } #ifdef CONFIG_MLX5_ESWITCH -static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac) +int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac) { struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5_core_dev *mdev = priv->mdev; @@ -3932,8 +3975,8 @@ static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting) return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting); } -static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, - int max_tx_rate) +int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate, + int max_tx_rate) { struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5_core_dev *mdev = priv->mdev; @@ -3974,8 +4017,8 @@ static int mlx5e_set_vf_link_state(struct net_device *dev, int vf, mlx5_ifla_link2vport(link_state)); } -static int mlx5e_get_vf_config(struct net_device *dev, - int vf, struct ifla_vf_info *ivi) +int mlx5e_get_vf_config(struct net_device *dev, + int vf, struct ifla_vf_info *ivi) { struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5_core_dev *mdev = priv->mdev; @@ -3988,8 +4031,8 @@ static int mlx5e_get_vf_config(struct net_device *dev, return 0; } -static int mlx5e_get_vf_stats(struct net_device *dev, - int vf, struct ifla_vf_stats *vf_stats) +int mlx5e_get_vf_stats(struct net_device *dev, + int vf, struct ifla_vf_stats *vf_stats) { struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5_core_dev *mdev = priv->mdev; @@ -4050,8 +4093,7 @@ static void mlx5e_vxlan_queue_work(struct mlx5e_priv *priv, u16 port, int add) queue_work(priv->wq, &vxlan_work->work); } -static void mlx5e_add_vxlan_port(struct net_device *netdev, - struct udp_tunnel_info *ti) +void mlx5e_add_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -4064,8 +4106,7 @@ static void mlx5e_add_vxlan_port(struct net_device *netdev, mlx5e_vxlan_queue_work(priv, be16_to_cpu(ti->port), 1); } -static void mlx5e_del_vxlan_port(struct net_device *netdev, - struct udp_tunnel_info *ti) +void mlx5e_del_vxlan_port(struct net_device *netdev, struct udp_tunnel_info *ti) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -4115,9 +4156,9 @@ out: return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK); } -static netdev_features_t mlx5e_features_check(struct sk_buff *skb, - struct net_device *netdev, - netdev_features_t features) +netdev_features_t mlx5e_features_check(struct sk_buff *skb, + struct net_device *netdev, + netdev_features_t features) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -4140,17 +4181,17 @@ static netdev_features_t mlx5e_features_check(struct sk_buff *skb, static bool mlx5e_tx_timeout_eq_recover(struct net_device *dev, struct mlx5e_txqsq *sq) { - struct mlx5_eq *eq = sq->cq.mcq.eq; + struct mlx5_eq_comp *eq = sq->cq.mcq.eq; u32 eqe_count; netdev_err(dev, "EQ 0x%x: Cons = 0x%x, irqn = 0x%x\n", - eq->eqn, eq->cons_index, eq->irqn); + eq->core.eqn, eq->core.cons_index, eq->core.irqn); eqe_count = mlx5_eq_poll_irq_disabled(eq); if (!eqe_count) return false; - netdev_err(dev, "Recover %d eqes on EQ 0x%x\n", eqe_count, eq->eqn); + netdev_err(dev, "Recover %d eqes on EQ 0x%x\n", eqe_count, eq->core.eqn); sq->channel->stats->eq_rearm++; return true; } @@ -4377,8 +4418,6 @@ const struct net_device_ops mlx5e_netdev_ops = { .ndo_get_vf_config = mlx5e_get_vf_config, .ndo_set_vf_link_state = mlx5e_set_vf_link_state, .ndo_get_vf_stats = mlx5e_get_vf_stats, - .ndo_has_offload_stats = mlx5e_has_offload_stats, - .ndo_get_offload_stats = mlx5e_get_offload_stats, #endif }; @@ -4524,15 +4563,23 @@ void mlx5e_build_rq_params(struct mlx5_core_dev *mdev, mlx5e_init_rq_type_params(mdev, params); } -void mlx5e_build_rss_params(struct mlx5e_params *params) +void mlx5e_build_rss_params(struct mlx5e_rss_params *rss_params, + u16 num_channels) { - params->rss_hfunc = ETH_RSS_HASH_XOR; - netdev_rss_key_fill(params->toeplitz_hash_key, sizeof(params->toeplitz_hash_key)); - mlx5e_build_default_indir_rqt(params->indirection_rqt, - MLX5E_INDIR_RQT_SIZE, params->num_channels); + enum mlx5e_traffic_types tt; + + rss_params->hfunc = ETH_RSS_HASH_XOR; + netdev_rss_key_fill(rss_params->toeplitz_hash_key, + sizeof(rss_params->toeplitz_hash_key)); + mlx5e_build_default_indir_rqt(rss_params->indirection_rqt, + MLX5E_INDIR_RQT_SIZE, num_channels); + for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) + rss_params->rx_hash_fields[tt] = + tirc_default_config[tt].rx_hash_fields; } void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, + struct mlx5e_rss_params *rss_params, struct mlx5e_params *params, u16 max_channels, u16 mtu) { @@ -4548,6 +4595,10 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE : MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; + /* XDP SQ */ + MLX5E_SET_PFLAG(params, MLX5E_PFLAG_XDP_TX_MPWQE, + MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe)); + /* set CQE compression */ params->rx_cqe_compress_def = false; if (MLX5_CAP_GEN(mdev, cqe_compression) && @@ -4581,7 +4632,7 @@ void mlx5e_build_nic_params(struct mlx5_core_dev *mdev, params->tx_min_inline_mode = mlx5e_params_calculate_tx_min_inline(mdev); /* RSS */ - mlx5e_build_rss_params(params); + mlx5e_build_rss_params(rss_params, params->num_channels); } static void mlx5e_set_netdev_dev_addr(struct net_device *netdev) @@ -4596,12 +4647,6 @@ static void mlx5e_set_netdev_dev_addr(struct net_device *netdev) } } -#if IS_ENABLED(CONFIG_MLX5_ESWITCH) -static const struct switchdev_ops mlx5e_switchdev_ops = { - .switchdev_port_attr_get = mlx5e_attr_get, -}; -#endif - static void mlx5e_build_nic_netdev(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -4711,12 +4756,6 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->priv_flags |= IFF_UNICAST_FLT; mlx5e_set_netdev_dev_addr(netdev); - -#if IS_ENABLED(CONFIG_MLX5_ESWITCH) - if (MLX5_ESWITCH_MANAGER(mdev)) - netdev->switchdev_ops = &mlx5e_switchdev_ops; -#endif - mlx5e_ipsec_build_netdev(priv); mlx5e_tls_build_netdev(priv); } @@ -4754,14 +4793,16 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, void *ppriv) { struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_rss_params *rss = &priv->rss_params; int err; err = mlx5e_netdev_init(netdev, priv, mdev, profile, ppriv); if (err) return err; - mlx5e_build_nic_params(mdev, &priv->channels.params, - mlx5e_get_netdev_max_channels(netdev), netdev->mtu); + mlx5e_build_nic_params(mdev, rss, &priv->channels.params, + mlx5e_get_netdev_max_channels(netdev), + netdev->mtu); mlx5e_timestamp_init(priv); @@ -4891,9 +4932,8 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv) mlx5_lag_add(mdev, netdev); mlx5e_enable_async_events(priv); - - if (MLX5_ESWITCH_MANAGER(priv->mdev)) - mlx5e_register_vport_reps(priv); + if (mlx5e_monitor_counter_supported(priv)) + mlx5e_monitor_counter_init(priv); if (netdev->reg_state != NETREG_REGISTERED) return; @@ -4927,8 +4967,8 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv) queue_work(priv->wq, &priv->set_rx_mode_work); - if (MLX5_ESWITCH_MANAGER(priv->mdev)) - mlx5e_unregister_vport_reps(priv); + if (mlx5e_monitor_counter_supported(priv)) + mlx5e_monitor_counter_cleanup(priv); mlx5e_disable_async_events(priv); mlx5_lag_remove(mdev); @@ -4981,7 +5021,7 @@ int mlx5e_netdev_init(struct net_device *netdev, netif_carrier_off(netdev); #ifdef CONFIG_MLX5_EN_ARFS - netdev->rx_cpu_rmap = mdev->rmap; + netdev->rx_cpu_rmap = mlx5_eq_table_get_rmap(mdev); #endif return 0; @@ -5036,7 +5076,7 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv) if (priv->channels.params.num_channels > max_nch) { mlx5_core_warn(priv->mdev, "MLX5E: Reducing number of channels to %d\n", max_nch); priv->channels.params.num_channels = max_nch; - mlx5e_build_default_indir_rqt(priv->channels.params.indirection_rqt, + mlx5e_build_default_indir_rqt(priv->rss_params.indirection_rqt, MLX5E_INDIR_RQT_SIZE, max_nch); } @@ -5125,7 +5165,6 @@ static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv) static void *mlx5e_add(struct mlx5_core_dev *mdev) { struct net_device *netdev; - void *rpriv = NULL; void *priv; int err; int nch; @@ -5135,20 +5174,18 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev) return NULL; #ifdef CONFIG_MLX5_ESWITCH - if (MLX5_ESWITCH_MANAGER(mdev)) { - rpriv = mlx5e_alloc_nic_rep_priv(mdev); - if (!rpriv) { - mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n"); - return NULL; - } + if (MLX5_ESWITCH_MANAGER(mdev) && + mlx5_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) { + mlx5e_rep_register_vport_reps(mdev); + return mdev; } #endif nch = mlx5e_get_max_num_channels(mdev); - netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, nch, rpriv); + netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, nch, NULL); if (!netdev) { mlx5_core_err(mdev, "mlx5e_create_netdev failed\n"); - goto err_free_rpriv; + return NULL; } priv = netdev_priv(netdev); @@ -5174,30 +5211,26 @@ err_detach: mlx5e_detach(mdev, priv); err_destroy_netdev: mlx5e_destroy_netdev(priv); -err_free_rpriv: - kfree(rpriv); return NULL; } static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv) { - struct mlx5e_priv *priv = vpriv; - void *ppriv = priv->ppriv; + struct mlx5e_priv *priv; +#ifdef CONFIG_MLX5_ESWITCH + if (MLX5_ESWITCH_MANAGER(mdev) && vpriv == mdev) { + mlx5e_rep_unregister_vport_reps(mdev); + return; + } +#endif + priv = vpriv; #ifdef CONFIG_MLX5_CORE_EN_DCB mlx5e_dcbnl_delete_app(priv); #endif unregister_netdev(priv->netdev); mlx5e_detach(mdev, vpriv); mlx5e_destroy_netdev(priv); - kfree(ppriv); -} - -static void *mlx5e_get_netdev(void *vpriv) -{ - struct mlx5e_priv *priv = vpriv; - - return priv->netdev; } static struct mlx5_interface mlx5e_interface = { @@ -5205,9 +5238,7 @@ static struct mlx5_interface mlx5e_interface = { .remove = mlx5e_remove, .attach = mlx5e_attach, .detach = mlx5e_detach, - .event = mlx5e_async_event, .protocol = MLX5_INTERFACE_PROTOCOL_ETH, - .get_dev = mlx5e_get_netdev, }; void mlx5e_init(void) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 820fe85100b0..96cc0c6a4014 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -42,14 +42,24 @@ #include "en.h" #include "en_rep.h" #include "en_tc.h" +#include "en/tc_tun.h" #include "fs_core.h" -#define MLX5E_REP_PARAMS_LOG_SQ_SIZE \ - max(0x6, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE) +#define MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE \ + max(0x7, MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE) #define MLX5E_REP_PARAMS_DEF_NUM_CHANNELS 1 static const char mlx5e_rep_driver_name[] = "mlx5e_rep"; +struct mlx5e_rep_indr_block_priv { + struct net_device *netdev; + struct mlx5e_rep_priv *rpriv; + + struct list_head list; +}; + +static void mlx5e_rep_indr_unregister_block(struct net_device *netdev); + static void mlx5e_rep_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { @@ -99,7 +109,7 @@ static void mlx5e_rep_get_strings(struct net_device *dev, } } -static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv) +static void mlx5e_vf_rep_update_hw_counters(struct mlx5e_priv *priv) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5e_rep_priv *rpriv = priv->ppriv; @@ -122,6 +132,32 @@ static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv) vport_stats->tx_bytes = vf_stats.rx_bytes; } +static void mlx5e_uplink_rep_update_hw_counters(struct mlx5e_priv *priv) +{ + struct mlx5e_pport_stats *pstats = &priv->stats.pport; + struct rtnl_link_stats64 *vport_stats; + + mlx5e_grp_802_3_update_stats(priv); + + vport_stats = &priv->stats.vf_vport; + + vport_stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok); + vport_stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok); + vport_stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok); + vport_stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok); +} + +static void mlx5e_rep_update_hw_counters(struct mlx5e_priv *priv) +{ + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5_eswitch_rep *rep = rpriv->rep; + + if (rep->vport == FDB_UPLINK_VPORT) + mlx5e_uplink_rep_update_hw_counters(priv); + else + mlx5e_vf_rep_update_hw_counters(priv); +} + static void mlx5e_rep_update_sw_counters(struct mlx5e_priv *priv) { struct mlx5e_sw_stats *s = &priv->stats.sw; @@ -257,6 +293,22 @@ static int mlx5e_rep_set_channels(struct net_device *dev, return 0; } +static int mlx5e_rep_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + return mlx5e_ethtool_get_coalesce(priv, coal); +} + +static int mlx5e_rep_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *coal) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + return mlx5e_ethtool_set_coalesce(priv, coal); +} + static u32 mlx5e_rep_get_rxfh_key_size(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -271,7 +323,55 @@ static u32 mlx5e_rep_get_rxfh_indir_size(struct net_device *netdev) return mlx5e_ethtool_get_rxfh_indir_size(priv); } -static const struct ethtool_ops mlx5e_rep_ethtool_ops = { +static void mlx5e_uplink_rep_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pauseparam) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + mlx5e_ethtool_get_pauseparam(priv, pauseparam); +} + +static int mlx5e_uplink_rep_set_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pauseparam) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + return mlx5e_ethtool_set_pauseparam(priv, pauseparam); +} + +static int mlx5e_uplink_rep_get_link_ksettings(struct net_device *netdev, + struct ethtool_link_ksettings *link_ksettings) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings); +} + +static int mlx5e_uplink_rep_set_link_ksettings(struct net_device *netdev, + const struct ethtool_link_ksettings *link_ksettings) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + + return mlx5e_ethtool_set_link_ksettings(priv, link_ksettings); +} + +static const struct ethtool_ops mlx5e_vf_rep_ethtool_ops = { + .get_drvinfo = mlx5e_rep_get_drvinfo, + .get_link = ethtool_op_get_link, + .get_strings = mlx5e_rep_get_strings, + .get_sset_count = mlx5e_rep_get_sset_count, + .get_ethtool_stats = mlx5e_rep_get_ethtool_stats, + .get_ringparam = mlx5e_rep_get_ringparam, + .set_ringparam = mlx5e_rep_set_ringparam, + .get_channels = mlx5e_rep_get_channels, + .set_channels = mlx5e_rep_set_channels, + .get_coalesce = mlx5e_rep_get_coalesce, + .set_coalesce = mlx5e_rep_set_coalesce, + .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size, + .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size, +}; + +static const struct ethtool_ops mlx5e_uplink_rep_ethtool_ops = { .get_drvinfo = mlx5e_rep_get_drvinfo, .get_link = ethtool_op_get_link, .get_strings = mlx5e_rep_get_strings, @@ -281,24 +381,44 @@ static const struct ethtool_ops mlx5e_rep_ethtool_ops = { .set_ringparam = mlx5e_rep_set_ringparam, .get_channels = mlx5e_rep_get_channels, .set_channels = mlx5e_rep_set_channels, + .get_coalesce = mlx5e_rep_get_coalesce, + .set_coalesce = mlx5e_rep_set_coalesce, + .get_link_ksettings = mlx5e_uplink_rep_get_link_ksettings, + .set_link_ksettings = mlx5e_uplink_rep_set_link_ksettings, .get_rxfh_key_size = mlx5e_rep_get_rxfh_key_size, .get_rxfh_indir_size = mlx5e_rep_get_rxfh_indir_size, + .get_pauseparam = mlx5e_uplink_rep_get_pauseparam, + .set_pauseparam = mlx5e_uplink_rep_set_pauseparam, }; -int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr) +static int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr) { struct mlx5e_priv *priv = netdev_priv(dev); - struct mlx5e_rep_priv *rpriv = priv->ppriv; - struct mlx5_eswitch_rep *rep = rpriv->rep; struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct net_device *uplink_upper = NULL; + struct mlx5e_priv *uplink_priv = NULL; + struct net_device *uplink_dev; if (esw->mode == SRIOV_NONE) return -EOPNOTSUPP; + uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); + if (uplink_dev) { + uplink_upper = netdev_master_upper_dev_get(uplink_dev); + uplink_priv = netdev_priv(uplink_dev); + } + switch (attr->id) { case SWITCHDEV_ATTR_ID_PORT_PARENT_ID: attr->u.ppid.id_len = ETH_ALEN; - ether_addr_copy(attr->u.ppid.id, rep->hw_id); + if (uplink_upper && mlx5_lag_is_sriov(uplink_priv->mdev)) { + ether_addr_copy(attr->u.ppid.id, uplink_upper->dev_addr); + } else { + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5_eswitch_rep *rep = rpriv->rep; + + ether_addr_copy(attr->u.ppid.id, rep->hw_id); + } break; default: return -EOPNOTSUPP; @@ -519,6 +639,184 @@ static void mlx5e_rep_neigh_update(struct work_struct *work) neigh_release(n); } +static struct mlx5e_rep_indr_block_priv * +mlx5e_rep_indr_block_priv_lookup(struct mlx5e_rep_priv *rpriv, + struct net_device *netdev) +{ + struct mlx5e_rep_indr_block_priv *cb_priv; + + /* All callback list access should be protected by RTNL. */ + ASSERT_RTNL(); + + list_for_each_entry(cb_priv, + &rpriv->uplink_priv.tc_indr_block_priv_list, + list) + if (cb_priv->netdev == netdev) + return cb_priv; + + return NULL; +} + +static void mlx5e_rep_indr_clean_block_privs(struct mlx5e_rep_priv *rpriv) +{ + struct mlx5e_rep_indr_block_priv *cb_priv, *temp; + struct list_head *head = &rpriv->uplink_priv.tc_indr_block_priv_list; + + list_for_each_entry_safe(cb_priv, temp, head, list) { + mlx5e_rep_indr_unregister_block(cb_priv->netdev); + kfree(cb_priv); + } +} + +static int +mlx5e_rep_indr_offload(struct net_device *netdev, + struct tc_cls_flower_offload *flower, + struct mlx5e_rep_indr_block_priv *indr_priv) +{ + struct mlx5e_priv *priv = netdev_priv(indr_priv->rpriv->netdev); + int flags = MLX5E_TC_EGRESS | MLX5E_TC_ESW_OFFLOAD; + int err = 0; + + switch (flower->command) { + case TC_CLSFLOWER_REPLACE: + err = mlx5e_configure_flower(netdev, priv, flower, flags); + break; + case TC_CLSFLOWER_DESTROY: + err = mlx5e_delete_flower(netdev, priv, flower, flags); + break; + case TC_CLSFLOWER_STATS: + err = mlx5e_stats_flower(netdev, priv, flower, flags); + break; + default: + err = -EOPNOTSUPP; + } + + return err; +} + +static int mlx5e_rep_indr_setup_block_cb(enum tc_setup_type type, + void *type_data, void *indr_priv) +{ + struct mlx5e_rep_indr_block_priv *priv = indr_priv; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return mlx5e_rep_indr_offload(priv->netdev, type_data, priv); + default: + return -EOPNOTSUPP; + } +} + +static int +mlx5e_rep_indr_setup_tc_block(struct net_device *netdev, + struct mlx5e_rep_priv *rpriv, + struct tc_block_offload *f) +{ + struct mlx5e_rep_indr_block_priv *indr_priv; + int err = 0; + + if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + switch (f->command) { + case TC_BLOCK_BIND: + indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev); + if (indr_priv) + return -EEXIST; + + indr_priv = kmalloc(sizeof(*indr_priv), GFP_KERNEL); + if (!indr_priv) + return -ENOMEM; + + indr_priv->netdev = netdev; + indr_priv->rpriv = rpriv; + list_add(&indr_priv->list, + &rpriv->uplink_priv.tc_indr_block_priv_list); + + err = tcf_block_cb_register(f->block, + mlx5e_rep_indr_setup_block_cb, + netdev, indr_priv, f->extack); + if (err) { + list_del(&indr_priv->list); + kfree(indr_priv); + } + + return err; + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, + mlx5e_rep_indr_setup_block_cb, + netdev); + indr_priv = mlx5e_rep_indr_block_priv_lookup(rpriv, netdev); + if (indr_priv) { + list_del(&indr_priv->list); + kfree(indr_priv); + } + + return 0; + default: + return -EOPNOTSUPP; + } + return 0; +} + +static +int mlx5e_rep_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv, + enum tc_setup_type type, void *type_data) +{ + switch (type) { + case TC_SETUP_BLOCK: + return mlx5e_rep_indr_setup_tc_block(netdev, cb_priv, + type_data); + default: + return -EOPNOTSUPP; + } +} + +static int mlx5e_rep_indr_register_block(struct mlx5e_rep_priv *rpriv, + struct net_device *netdev) +{ + int err; + + err = __tc_indr_block_cb_register(netdev, rpriv, + mlx5e_rep_indr_setup_tc_cb, + netdev); + if (err) { + struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); + + mlx5_core_err(priv->mdev, "Failed to register remote block notifier for %s err=%d\n", + netdev_name(netdev), err); + } + return err; +} + +static void mlx5e_rep_indr_unregister_block(struct net_device *netdev) +{ + __tc_indr_block_cb_unregister(netdev, mlx5e_rep_indr_setup_tc_cb, + netdev); +} + +static int mlx5e_nic_rep_netdevice_event(struct notifier_block *nb, + unsigned long event, void *ptr) +{ + struct mlx5e_rep_priv *rpriv = container_of(nb, struct mlx5e_rep_priv, + uplink_priv.netdevice_nb); + struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); + struct net_device *netdev = netdev_notifier_info_to_dev(ptr); + + if (!mlx5e_tc_tun_device_to_offload(priv, netdev)) + return NOTIFY_OK; + + switch (event) { + case NETDEV_REGISTER: + mlx5e_rep_indr_register_block(rpriv, netdev); + break; + case NETDEV_UNREGISTER: + mlx5e_rep_indr_unregister_block(netdev); + break; + } + return NOTIFY_OK; +} + static struct mlx5e_neigh_hash_entry * mlx5e_rep_neigh_entry_lookup(struct mlx5e_priv *priv, struct mlx5e_neigh *m_neigh); @@ -780,7 +1078,7 @@ void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv, mlx5e_rep_neigh_entry_destroy(priv, nhe); } -static int mlx5e_rep_open(struct net_device *dev) +static int mlx5e_vf_rep_open(struct net_device *dev) { struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_rep_priv *rpriv = priv->ppriv; @@ -802,7 +1100,7 @@ unlock: return err; } -static int mlx5e_rep_close(struct net_device *dev) +static int mlx5e_vf_rep_close(struct net_device *dev) { struct mlx5e_priv *priv = netdev_priv(dev); struct mlx5e_rep_priv *rpriv = priv->ppriv; @@ -839,24 +1137,14 @@ mlx5e_rep_setup_tc_cls_flower(struct mlx5e_priv *priv, { switch (cls_flower->command) { case TC_CLSFLOWER_REPLACE: - return mlx5e_configure_flower(priv, cls_flower, flags); + return mlx5e_configure_flower(priv->netdev, priv, cls_flower, + flags); case TC_CLSFLOWER_DESTROY: - return mlx5e_delete_flower(priv, cls_flower, flags); + return mlx5e_delete_flower(priv->netdev, priv, cls_flower, + flags); case TC_CLSFLOWER_STATS: - return mlx5e_stats_flower(priv, cls_flower, flags); - default: - return -EOPNOTSUPP; - } -} - -static int mlx5e_rep_setup_tc_cb_egdev(enum tc_setup_type type, void *type_data, - void *cb_priv) -{ - struct mlx5e_priv *priv = cb_priv; - - switch (type) { - case TC_SETUP_CLSFLOWER: - return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_EGRESS); + return mlx5e_stats_flower(priv->netdev, priv, cls_flower, + flags); default: return -EOPNOTSUPP; } @@ -869,7 +1157,8 @@ static int mlx5e_rep_setup_tc_cb(enum tc_setup_type type, void *type_data, switch (type) { case TC_SETUP_CLSFLOWER: - return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS); + return mlx5e_rep_setup_tc_cls_flower(priv, type_data, MLX5E_TC_INGRESS | + MLX5E_TC_ESW_OFFLOAD); default: return -EOPNOTSUPP; } @@ -908,43 +1197,23 @@ static int mlx5e_rep_setup_tc(struct net_device *dev, enum tc_setup_type type, bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) { - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5e_rep_priv *rpriv = priv->ppriv; struct mlx5_eswitch_rep *rep; if (!MLX5_ESWITCH_MANAGER(priv->mdev)) return false; - rep = rpriv->rep; - if (esw->mode == SRIOV_OFFLOADS && - rep && rep->vport == FDB_UPLINK_VPORT) - return true; - - return false; -} - -static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv) -{ - struct mlx5e_rep_priv *rpriv = priv->ppriv; - struct mlx5_eswitch_rep *rep; - - if (!MLX5_ESWITCH_MANAGER(priv->mdev)) + if (!rpriv) /* non vport rep mlx5e instances don't use this field */ return false; rep = rpriv->rep; - if (rep && rep->vport != FDB_UPLINK_VPORT) - return true; - - return false; + return (rep->vport == FDB_UPLINK_VPORT); } -bool mlx5e_has_offload_stats(const struct net_device *dev, int attr_id) +static bool mlx5e_rep_has_offload_stats(const struct net_device *dev, int attr_id) { - struct mlx5e_priv *priv = netdev_priv(dev); - switch (attr_id) { case IFLA_OFFLOAD_XSTATS_CPU_HIT: - if (mlx5e_is_vf_vport_rep(priv) || mlx5e_is_uplink_rep(priv)) return true; } @@ -970,8 +1239,8 @@ mlx5e_get_sw_stats64(const struct net_device *dev, return 0; } -int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev, - void *sp) +static int mlx5e_rep_get_offload_stats(int attr_id, const struct net_device *dev, + void *sp) { switch (attr_id) { case IFLA_OFFLOAD_XSTATS_CPU_HIT: @@ -982,7 +1251,7 @@ int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev, } static void -mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) +mlx5e_vf_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mlx5e_priv *priv = netdev_priv(dev); @@ -991,37 +1260,93 @@ mlx5e_rep_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) memcpy(stats, &priv->stats.vf_vport, sizeof(*stats)); } +static int mlx5e_vf_rep_change_mtu(struct net_device *netdev, int new_mtu) +{ + return mlx5e_change_mtu(netdev, new_mtu, NULL); +} + +static int mlx5e_uplink_rep_change_mtu(struct net_device *netdev, int new_mtu) +{ + return mlx5e_change_mtu(netdev, new_mtu, mlx5e_set_dev_port_mtu); +} + +static int mlx5e_uplink_rep_set_mac(struct net_device *netdev, void *addr) +{ + struct sockaddr *saddr = addr; + + if (!is_valid_ether_addr(saddr->sa_data)) + return -EADDRNOTAVAIL; + + ether_addr_copy(netdev->dev_addr, saddr->sa_data); + return 0; +} + static const struct switchdev_ops mlx5e_rep_switchdev_ops = { .switchdev_port_attr_get = mlx5e_attr_get, }; -static int mlx5e_change_rep_mtu(struct net_device *netdev, int new_mtu) -{ - return mlx5e_change_mtu(netdev, new_mtu, NULL); -} +static const struct net_device_ops mlx5e_netdev_ops_vf_rep = { + .ndo_open = mlx5e_vf_rep_open, + .ndo_stop = mlx5e_vf_rep_close, + .ndo_start_xmit = mlx5e_xmit, + .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name, + .ndo_setup_tc = mlx5e_rep_setup_tc, + .ndo_get_stats64 = mlx5e_vf_rep_get_stats, + .ndo_has_offload_stats = mlx5e_rep_has_offload_stats, + .ndo_get_offload_stats = mlx5e_rep_get_offload_stats, + .ndo_change_mtu = mlx5e_vf_rep_change_mtu, +}; -static const struct net_device_ops mlx5e_netdev_ops_rep = { - .ndo_open = mlx5e_rep_open, - .ndo_stop = mlx5e_rep_close, +static const struct net_device_ops mlx5e_netdev_ops_uplink_rep = { + .ndo_open = mlx5e_open, + .ndo_stop = mlx5e_close, .ndo_start_xmit = mlx5e_xmit, + .ndo_set_mac_address = mlx5e_uplink_rep_set_mac, .ndo_get_phys_port_name = mlx5e_rep_get_phys_port_name, .ndo_setup_tc = mlx5e_rep_setup_tc, - .ndo_get_stats64 = mlx5e_rep_get_stats, - .ndo_has_offload_stats = mlx5e_has_offload_stats, - .ndo_get_offload_stats = mlx5e_get_offload_stats, - .ndo_change_mtu = mlx5e_change_rep_mtu, + .ndo_get_stats64 = mlx5e_get_stats, + .ndo_has_offload_stats = mlx5e_rep_has_offload_stats, + .ndo_get_offload_stats = mlx5e_rep_get_offload_stats, + .ndo_change_mtu = mlx5e_uplink_rep_change_mtu, + .ndo_udp_tunnel_add = mlx5e_add_vxlan_port, + .ndo_udp_tunnel_del = mlx5e_del_vxlan_port, + .ndo_features_check = mlx5e_features_check, + .ndo_set_vf_mac = mlx5e_set_vf_mac, + .ndo_set_vf_rate = mlx5e_set_vf_rate, + .ndo_get_vf_config = mlx5e_get_vf_config, + .ndo_get_vf_stats = mlx5e_get_vf_stats, }; -static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev, - struct mlx5e_params *params, u16 mtu) +bool mlx5e_eswitch_rep(struct net_device *netdev) +{ + if (netdev->netdev_ops == &mlx5e_netdev_ops_vf_rep || + netdev->netdev_ops == &mlx5e_netdev_ops_uplink_rep) + return true; + + return false; +} + +static void mlx5e_build_rep_params(struct net_device *netdev) { + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5_eswitch_rep *rep = rpriv->rep; + struct mlx5_core_dev *mdev = priv->mdev; + struct mlx5e_params *params; + u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ? MLX5_CQ_PERIOD_MODE_START_FROM_CQE : MLX5_CQ_PERIOD_MODE_START_FROM_EQE; + params = &priv->channels.params; params->hard_mtu = MLX5E_ETH_HARD_MTU; - params->sw_mtu = mtu; - params->log_sq_size = MLX5E_REP_PARAMS_LOG_SQ_SIZE; + params->sw_mtu = netdev->mtu; + + /* SQ */ + if (rep->vport == FDB_UPLINK_VPORT) + params->log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE; + else + params->log_sq_size = MLX5E_REP_PARAMS_DEF_LOG_SQ_SIZE; /* RQ */ mlx5e_build_rq_params(mdev, params); @@ -1035,24 +1360,38 @@ static void mlx5e_build_rep_params(struct mlx5_core_dev *mdev, mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode); /* RSS */ - mlx5e_build_rss_params(params); + mlx5e_build_rss_params(&priv->rss_params, params->num_channels); } static void mlx5e_build_rep_netdev(struct net_device *netdev) { struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5_eswitch_rep *rep = rpriv->rep; struct mlx5_core_dev *mdev = priv->mdev; - u16 max_mtu; - netdev->netdev_ops = &mlx5e_netdev_ops_rep; + if (rep->vport == FDB_UPLINK_VPORT) { + SET_NETDEV_DEV(netdev, &priv->mdev->pdev->dev); + netdev->netdev_ops = &mlx5e_netdev_ops_uplink_rep; + /* we want a persistent mac for the uplink rep */ + mlx5_query_nic_vport_mac_address(mdev, 0, netdev->dev_addr); + netdev->ethtool_ops = &mlx5e_uplink_rep_ethtool_ops; +#ifdef CONFIG_MLX5_CORE_EN_DCB + if (MLX5_CAP_GEN(mdev, qos)) + netdev->dcbnl_ops = &mlx5e_dcbnl_ops; +#endif + } else { + netdev->netdev_ops = &mlx5e_netdev_ops_vf_rep; + eth_hw_addr_random(netdev); + netdev->ethtool_ops = &mlx5e_vf_rep_ethtool_ops; + } netdev->watchdog_timeo = 15 * HZ; - netdev->ethtool_ops = &mlx5e_rep_ethtool_ops; netdev->switchdev_ops = &mlx5e_rep_switchdev_ops; - netdev->features |= NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL; + netdev->features |= NETIF_F_HW_TC | NETIF_F_NETNS_LOCAL; netdev->hw_features |= NETIF_F_HW_TC; netdev->hw_features |= NETIF_F_SG; @@ -1063,13 +1402,10 @@ static void mlx5e_build_rep_netdev(struct net_device *netdev) netdev->hw_features |= NETIF_F_TSO6; netdev->hw_features |= NETIF_F_RXCSUM; - netdev->features |= netdev->hw_features; - - eth_hw_addr_random(netdev); + if (rep->vport != FDB_UPLINK_VPORT) + netdev->features |= NETIF_F_VLAN_CHALLENGED; - netdev->min_mtu = ETH_MIN_MTU; - mlx5_query_port_max_mtu(mdev, &max_mtu, 1); - netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu); + netdev->features |= netdev->hw_features; } static int mlx5e_init_rep(struct mlx5_core_dev *mdev, @@ -1086,7 +1422,7 @@ static int mlx5e_init_rep(struct mlx5_core_dev *mdev, priv->channels.params.num_channels = MLX5E_REP_PARAMS_DEF_NUM_CHANNELS; - mlx5e_build_rep_params(mdev, &priv->channels.params, netdev->mtu); + mlx5e_build_rep_params(netdev); mlx5e_build_rep_netdev(netdev); mlx5e_timestamp_init(priv); @@ -1209,94 +1545,173 @@ static void mlx5e_cleanup_rep_rx(struct mlx5e_priv *priv) static int mlx5e_init_rep_tx(struct mlx5e_priv *priv) { - int err; + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5_rep_uplink_priv *uplink_priv; + int tc, err; err = mlx5e_create_tises(priv); if (err) { mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err); return err; } - return 0; -} -static const struct mlx5e_profile mlx5e_rep_profile = { - .init = mlx5e_init_rep, - .cleanup = mlx5e_cleanup_rep, - .init_rx = mlx5e_init_rep_rx, - .cleanup_rx = mlx5e_cleanup_rep_rx, - .init_tx = mlx5e_init_rep_tx, - .cleanup_tx = mlx5e_cleanup_nic_tx, - .update_stats = mlx5e_rep_update_hw_counters, - .update_carrier = NULL, - .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep, - .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, - .max_tc = 1, -}; + if (rpriv->rep->vport == FDB_UPLINK_VPORT) { + uplink_priv = &rpriv->uplink_priv; -/* e-Switch vport representors */ + /* init shared tc flow table */ + err = mlx5e_tc_esw_init(&uplink_priv->tc_ht); + if (err) + goto destroy_tises; + + /* init indirect block notifications */ + INIT_LIST_HEAD(&uplink_priv->tc_indr_block_priv_list); + uplink_priv->netdevice_nb.notifier_call = mlx5e_nic_rep_netdevice_event; + err = register_netdevice_notifier(&uplink_priv->netdevice_nb); + if (err) { + mlx5_core_err(priv->mdev, "Failed to register netdev notifier\n"); + goto tc_esw_cleanup; + } + } -static int -mlx5e_nic_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) + return 0; + +tc_esw_cleanup: + mlx5e_tc_esw_cleanup(&uplink_priv->tc_ht); +destroy_tises: + for (tc = 0; tc < priv->profile->max_tc; tc++) + mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]); + return err; +} + +static void mlx5e_cleanup_rep_tx(struct mlx5e_priv *priv) { - struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep); - struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); + struct mlx5e_rep_priv *rpriv = priv->ppriv; + int tc; - int err; + for (tc = 0; tc < priv->profile->max_tc; tc++) + mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]); - if (test_bit(MLX5E_STATE_OPENED, &priv->state)) { - err = mlx5e_add_sqs_fwd_rules(priv); - if (err) - return err; + if (rpriv->rep->vport == FDB_UPLINK_VPORT) { + /* clean indirect TC block notifications */ + unregister_netdevice_notifier(&rpriv->uplink_priv.netdevice_nb); + mlx5e_rep_indr_clean_block_privs(rpriv); + + /* delete shared tc flow table */ + mlx5e_tc_esw_cleanup(&rpriv->uplink_priv.tc_ht); } +} - err = mlx5e_rep_neigh_init(rpriv); - if (err) - goto err_remove_sqs; +static void mlx5e_vf_rep_enable(struct mlx5e_priv *priv) +{ + struct net_device *netdev = priv->netdev; + struct mlx5_core_dev *mdev = priv->mdev; + u16 max_mtu; - /* init shared tc flow table */ - err = mlx5e_tc_esw_init(&rpriv->tc_ht); - if (err) - goto err_neigh_cleanup; + netdev->min_mtu = ETH_MIN_MTU; + mlx5_query_port_max_mtu(mdev, &max_mtu, 1); + netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu); +} - return 0; +static int uplink_rep_async_event(struct notifier_block *nb, unsigned long event, void *data) +{ + struct mlx5e_priv *priv = container_of(nb, struct mlx5e_priv, events_nb); + struct mlx5_eqe *eqe = data; -err_neigh_cleanup: - mlx5e_rep_neigh_cleanup(rpriv); -err_remove_sqs: - mlx5e_remove_sqs_fwd_rules(priv); - return err; + if (event != MLX5_EVENT_TYPE_PORT_CHANGE) + return NOTIFY_DONE; + + switch (eqe->sub_type) { + case MLX5_PORT_CHANGE_SUBTYPE_DOWN: + case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: + queue_work(priv->wq, &priv->update_carrier_work); + break; + default: + return NOTIFY_DONE; + } + + return NOTIFY_OK; } -static void -mlx5e_nic_rep_unload(struct mlx5_eswitch_rep *rep) +static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv) { - struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep); - struct mlx5e_priv *priv = netdev_priv(rpriv->netdev); + struct net_device *netdev = priv->netdev; + struct mlx5_core_dev *mdev = priv->mdev; + u16 max_mtu; - if (test_bit(MLX5E_STATE_OPENED, &priv->state)) - mlx5e_remove_sqs_fwd_rules(priv); + netdev->min_mtu = ETH_MIN_MTU; + mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1); + netdev->max_mtu = MLX5E_HW2SW_MTU(&priv->channels.params, max_mtu); + mlx5e_set_dev_port_mtu(priv); + + mlx5_lag_add(mdev, netdev); + priv->events_nb.notifier_call = uplink_rep_async_event; + mlx5_notifier_register(mdev, &priv->events_nb); +#ifdef CONFIG_MLX5_CORE_EN_DCB + mlx5e_dcbnl_initialize(priv); + mlx5e_dcbnl_init_app(priv); +#endif +} - /* clean uplink offloaded TC rules, delete shared tc flow table */ - mlx5e_tc_esw_cleanup(&rpriv->tc_ht); +static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv) +{ + struct mlx5_core_dev *mdev = priv->mdev; - mlx5e_rep_neigh_cleanup(rpriv); +#ifdef CONFIG_MLX5_CORE_EN_DCB + mlx5e_dcbnl_delete_app(priv); +#endif + mlx5_notifier_unregister(mdev, &priv->events_nb); + mlx5_lag_remove(mdev); } +static const struct mlx5e_profile mlx5e_vf_rep_profile = { + .init = mlx5e_init_rep, + .cleanup = mlx5e_cleanup_rep, + .init_rx = mlx5e_init_rep_rx, + .cleanup_rx = mlx5e_cleanup_rep_rx, + .init_tx = mlx5e_init_rep_tx, + .cleanup_tx = mlx5e_cleanup_rep_tx, + .enable = mlx5e_vf_rep_enable, + .update_stats = mlx5e_vf_rep_update_hw_counters, + .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep, + .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, + .max_tc = 1, +}; + +static const struct mlx5e_profile mlx5e_uplink_rep_profile = { + .init = mlx5e_init_rep, + .cleanup = mlx5e_cleanup_rep, + .init_rx = mlx5e_init_rep_rx, + .cleanup_rx = mlx5e_cleanup_rep_rx, + .init_tx = mlx5e_init_rep_tx, + .cleanup_tx = mlx5e_cleanup_rep_tx, + .enable = mlx5e_uplink_rep_enable, + .disable = mlx5e_uplink_rep_disable, + .update_stats = mlx5e_uplink_rep_update_hw_counters, + .update_carrier = mlx5e_update_carrier, + .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe_rep, + .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq, + .max_tc = MLX5E_MAX_NUM_TC, +}; + +/* e-Switch vport representors */ static int mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) { - struct mlx5e_rep_priv *uplink_rpriv; + const struct mlx5e_profile *profile; struct mlx5e_rep_priv *rpriv; struct net_device *netdev; - struct mlx5e_priv *upriv; int nch, err; rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL); if (!rpriv) return -ENOMEM; + /* rpriv->rep to be looked up when profile->init() is called */ + rpriv->rep = rep; + nch = mlx5e_get_max_num_channels(dev); - netdev = mlx5e_create_netdev(dev, &mlx5e_rep_profile, nch, rpriv); + profile = (rep->vport == FDB_UPLINK_VPORT) ? &mlx5e_uplink_rep_profile : &mlx5e_vf_rep_profile; + netdev = mlx5e_create_netdev(dev, profile, nch, rpriv); if (!netdev) { pr_warn("Failed to create representor netdev for vport %d\n", rep->vport); @@ -1305,15 +1720,20 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) } rpriv->netdev = netdev; - rpriv->rep = rep; rep->rep_if[REP_ETH].priv = rpriv; INIT_LIST_HEAD(&rpriv->vport_sqs_list); + if (rep->vport == FDB_UPLINK_VPORT) { + err = mlx5e_create_mdev_resources(dev); + if (err) + goto err_destroy_netdev; + } + err = mlx5e_attach_netdev(netdev_priv(netdev)); if (err) { pr_warn("Failed to attach representor netdev for vport %d\n", rep->vport); - goto err_destroy_netdev; + goto err_destroy_mdev_resources; } err = mlx5e_rep_neigh_init(rpriv); @@ -1323,32 +1743,25 @@ mlx5e_vport_rep_load(struct mlx5_core_dev *dev, struct mlx5_eswitch_rep *rep) goto err_detach_netdev; } - uplink_rpriv = mlx5_eswitch_get_uplink_priv(dev->priv.eswitch, REP_ETH); - upriv = netdev_priv(uplink_rpriv->netdev); - err = tc_setup_cb_egdev_register(netdev, mlx5e_rep_setup_tc_cb_egdev, - upriv); - if (err) - goto err_neigh_cleanup; - err = register_netdev(netdev); if (err) { pr_warn("Failed to register representor netdev for vport %d\n", rep->vport); - goto err_egdev_cleanup; + goto err_neigh_cleanup; } return 0; -err_egdev_cleanup: - tc_setup_cb_egdev_unregister(netdev, mlx5e_rep_setup_tc_cb_egdev, - upriv); - err_neigh_cleanup: mlx5e_rep_neigh_cleanup(rpriv); err_detach_netdev: mlx5e_detach_netdev(netdev_priv(netdev)); +err_destroy_mdev_resources: + if (rep->vport == FDB_UPLINK_VPORT) + mlx5e_destroy_mdev_resources(dev); + err_destroy_netdev: mlx5e_destroy_netdev(netdev_priv(netdev)); kfree(rpriv); @@ -1361,18 +1774,13 @@ mlx5e_vport_rep_unload(struct mlx5_eswitch_rep *rep) struct mlx5e_rep_priv *rpriv = mlx5e_rep_to_rep_priv(rep); struct net_device *netdev = rpriv->netdev; struct mlx5e_priv *priv = netdev_priv(netdev); - struct mlx5e_rep_priv *uplink_rpriv; void *ppriv = priv->ppriv; - struct mlx5e_priv *upriv; unregister_netdev(netdev); - uplink_rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch, - REP_ETH); - upriv = netdev_priv(uplink_rpriv->netdev); - tc_setup_cb_egdev_unregister(netdev, mlx5e_rep_setup_tc_cb_egdev, - upriv); mlx5e_rep_neigh_cleanup(rpriv); mlx5e_detach_netdev(priv); + if (rep->vport == FDB_UPLINK_VPORT) + mlx5e_destroy_mdev_resources(priv->mdev); mlx5e_destroy_netdev(priv); kfree(ppriv); /* mlx5e_rep_priv */ } @@ -1386,14 +1794,13 @@ static void *mlx5e_vport_rep_get_proto_dev(struct mlx5_eswitch_rep *rep) return rpriv->netdev; } -static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv) +void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev) { - struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5_eswitch *esw = mdev->priv.eswitch; + struct mlx5_eswitch *esw = mdev->priv.eswitch; int total_vfs = MLX5_TOTAL_VPORTS(mdev); int vport; - for (vport = 1; vport < total_vfs; vport++) { + for (vport = 0; vport < total_vfs; vport++) { struct mlx5_eswitch_rep_if rep_if = {}; rep_if.load = mlx5e_vport_rep_load; @@ -1403,55 +1810,12 @@ static void mlx5e_rep_register_vf_vports(struct mlx5e_priv *priv) } } -static void mlx5e_rep_unregister_vf_vports(struct mlx5e_priv *priv) +void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev) { - struct mlx5_core_dev *mdev = priv->mdev; struct mlx5_eswitch *esw = mdev->priv.eswitch; int total_vfs = MLX5_TOTAL_VPORTS(mdev); int vport; - for (vport = 1; vport < total_vfs; vport++) + for (vport = total_vfs - 1; vport >= 0; vport--) mlx5_eswitch_unregister_vport_rep(esw, vport, REP_ETH); } - -void mlx5e_register_vport_reps(struct mlx5e_priv *priv) -{ - struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5_eswitch *esw = mdev->priv.eswitch; - struct mlx5_eswitch_rep_if rep_if; - struct mlx5e_rep_priv *rpriv; - - rpriv = priv->ppriv; - rpriv->netdev = priv->netdev; - - rep_if.load = mlx5e_nic_rep_load; - rep_if.unload = mlx5e_nic_rep_unload; - rep_if.get_proto_dev = mlx5e_vport_rep_get_proto_dev; - rep_if.priv = rpriv; - INIT_LIST_HEAD(&rpriv->vport_sqs_list); - mlx5_eswitch_register_vport_rep(esw, 0, &rep_if, REP_ETH); /* UPLINK PF vport*/ - - mlx5e_rep_register_vf_vports(priv); /* VFs vports */ -} - -void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv) -{ - struct mlx5_core_dev *mdev = priv->mdev; - struct mlx5_eswitch *esw = mdev->priv.eswitch; - - mlx5e_rep_unregister_vf_vports(priv); /* VFs vports */ - mlx5_eswitch_unregister_vport_rep(esw, 0, REP_ETH); /* UPLINK PF*/ -} - -void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev) -{ - struct mlx5_eswitch *esw = mdev->priv.eswitch; - struct mlx5e_rep_priv *rpriv; - - rpriv = kzalloc(sizeof(*rpriv), GFP_KERNEL); - if (!rpriv) - return NULL; - - rpriv->rep = &esw->offloads.vport_reps[0]; - return rpriv; -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h index 844d32d5c29f..edd722824697 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.h @@ -53,13 +53,33 @@ struct mlx5e_neigh_update_table { unsigned long min_interval; /* jiffies */ }; +struct mlx5_rep_uplink_priv { + /* Filters DB - instantiated by the uplink representor and shared by + * the uplink's VFs + */ + struct rhashtable tc_ht; + + /* indirect block callbacks are invoked on bind/unbind events + * on registered higher level devices (e.g. tunnel devices) + * + * tc_indr_block_cb_priv_list is used to lookup indirect callback + * private data + * + * netdevice_nb is the netdev events notifier - used to register + * tunnel devices for block events + * + */ + struct list_head tc_indr_block_priv_list; + struct notifier_block netdevice_nb; +}; + struct mlx5e_rep_priv { struct mlx5_eswitch_rep *rep; struct mlx5e_neigh_update_table neigh_update; struct net_device *netdev; struct mlx5_flow_handle *vport_rx_rule; struct list_head vport_sqs_list; - struct rhashtable tc_ht; /* valid for uplink rep */ + struct mlx5_rep_uplink_priv uplink_priv; /* valid for uplink rep */ }; static inline @@ -129,6 +149,8 @@ struct mlx5e_encap_entry { struct net_device *out_dev; int tunnel_type; + int tunnel_hlen; + int reformat_type; u8 flags; char *encap_header; int encap_size; @@ -140,16 +162,12 @@ struct mlx5e_rep_sq { }; void *mlx5e_alloc_nic_rep_priv(struct mlx5_core_dev *mdev); -void mlx5e_register_vport_reps(struct mlx5e_priv *priv); -void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv); +void mlx5e_rep_register_vport_reps(struct mlx5_core_dev *mdev); +void mlx5e_rep_unregister_vport_reps(struct mlx5_core_dev *mdev); bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv); int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv); void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv); -int mlx5e_get_offload_stats(int attr_id, const struct net_device *dev, void *sp); -bool mlx5e_has_offload_stats(const struct net_device *dev, int attr_id); - -int mlx5e_attr_get(struct net_device *dev, struct switchdev_attr *attr); void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe); int mlx5e_rep_encap_entry_attach(struct mlx5e_priv *priv, @@ -158,12 +176,17 @@ void mlx5e_rep_encap_entry_detach(struct mlx5e_priv *priv, struct mlx5e_encap_entry *e); void mlx5e_rep_queue_neigh_stats_work(struct mlx5e_priv *priv); + +bool mlx5e_eswitch_rep(struct net_device *netdev); + #else /* CONFIG_MLX5_ESWITCH */ -static inline void mlx5e_register_vport_reps(struct mlx5e_priv *priv) {} -static inline void mlx5e_unregister_vport_reps(struct mlx5e_priv *priv) {} static inline bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv) { return false; } static inline int mlx5e_add_sqs_fwd_rules(struct mlx5e_priv *priv) { return 0; } static inline void mlx5e_remove_sqs_fwd_rules(struct mlx5e_priv *priv) {} #endif +static inline bool mlx5e_is_vport_rep(struct mlx5e_priv *priv) +{ + return (MLX5_ESWITCH_MANAGER(priv->mdev) && priv->ppriv); +} #endif /* __MLX5E_REP_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 0b5ef6d4e815..1d0bb5ff8c26 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -554,9 +554,9 @@ static inline void mlx5e_poll_ico_single_cqe(struct mlx5e_cq *cq, mlx5_cqwq_pop(&cq->wq); - if (unlikely((cqe->op_own >> 4) != MLX5_CQE_REQ)) { + if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) { netdev_WARN_ONCE(cq->channel->netdev, - "Bad OP in ICOSQ CQE: 0x%x\n", cqe->op_own); + "Bad OP in ICOSQ CQE: 0x%x\n", get_cqe_opcode(cqe)); return; } @@ -898,7 +898,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, prefetchw(va); /* xdp_frame data area */ prefetch(data); - if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { + if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) { rq->stats->wqe_err++; return NULL; } @@ -930,7 +930,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, u16 byte_cnt = cqe_bcnt - headlen; struct sk_buff *skb; - if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { + if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) { rq->stats->wqe_err++; return NULL; } @@ -1154,7 +1154,7 @@ void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) wi->consumed_strides += cstrides; - if (unlikely((cqe->op_own >> 4) != MLX5_CQE_RESP_SEND)) { + if (unlikely(get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND)) { rq->stats->wqe_err++; goto mpwrq_cqe_out; } @@ -1190,7 +1190,6 @@ mpwrq_cqe_out: int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) { struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq); - struct mlx5e_xdpsq *xdpsq = &rq->xdpsq; struct mlx5_cqe64 *cqe; int work_done = 0; @@ -1221,15 +1220,8 @@ int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget) } while ((++work_done < budget) && (cqe = mlx5_cqwq_get_cqe(&cq->wq))); out: - if (xdpsq->doorbell) { - mlx5e_xmit_xdp_doorbell(xdpsq); - xdpsq->doorbell = false; - } - - if (xdpsq->redirect_flush) { - xdp_do_flush_map(); - xdpsq->redirect_flush = false; - } + if (rq->xdp_prog) + mlx5e_xdp_rx_poll_complete(rq); mlx5_cqwq_update_db_record(&cq->wq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index 4337afd610d7..d3fe48ff9da9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -30,6 +30,7 @@ * SOFTWARE. */ +#include "lib/mlx5.h" #include "en.h" #include "en_accel/ipsec.h" #include "en_accel/tls.h" @@ -480,7 +481,10 @@ static int mlx5e_grp_802_3_fill_stats(struct mlx5e_priv *priv, u64 *data, return idx; } -static void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv) +#define MLX5_BASIC_PPCNT_SUPPORTED(mdev) \ + (MLX5_CAP_GEN(mdev, pcam_reg) ? MLX5_CAP_PCAM_REG(mdev, ppcnt) : 1) + +void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv) { struct mlx5e_pport_stats *pstats = &priv->stats.pport; struct mlx5_core_dev *mdev = priv->mdev; @@ -488,6 +492,9 @@ static void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv) int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); void *out; + if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev)) + return; + MLX5_SET(ppcnt_reg, in, local_port, 1); out = pstats->IEEE_802_3_counters; MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP); @@ -600,6 +607,9 @@ static void mlx5e_grp_2819_update_stats(struct mlx5e_priv *priv) int sz = MLX5_ST_SZ_BYTES(ppcnt_reg); void *out; + if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev)) + return; + MLX5_SET(ppcnt_reg, in, local_port, 1); out = pstats->RFC_2819_counters; MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP); @@ -934,7 +944,7 @@ static const struct counter_desc pport_per_prio_pfc_stats_desc[] = { }; static const struct counter_desc pport_pfc_stall_stats_desc[] = { - { "tx_pause_storm_warning_events ", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) }, + { "tx_pause_storm_warning_events", PPORT_PER_PRIO_OFF(device_stall_minor_watermark_cnt) }, { "tx_pause_storm_error_events", PPORT_PER_PRIO_OFF(device_stall_critical_watermark_cnt) }, }; @@ -1075,6 +1085,9 @@ static void mlx5e_grp_per_prio_update_stats(struct mlx5e_priv *priv) int prio; void *out; + if (!MLX5_BASIC_PPCNT_SUPPORTED(mdev)) + return; + MLX5_SET(ppcnt_reg, in, local_port, 1); MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP); for (prio = 0; prio < NUM_PPORT_PRIO; prio++) { @@ -1086,13 +1099,13 @@ static void mlx5e_grp_per_prio_update_stats(struct mlx5e_priv *priv) } static const struct counter_desc mlx5e_pme_status_desc[] = { - { "module_unplug", 8 }, + { "module_unplug", sizeof(u64) * MLX5_MODULE_STATUS_UNPLUGGED }, }; static const struct counter_desc mlx5e_pme_error_desc[] = { - { "module_bus_stuck", 16 }, /* bus stuck (I2C or data shorted) */ - { "module_high_temp", 48 }, /* high temperature */ - { "module_bad_shorted", 56 }, /* bad or shorted cable/module */ + { "module_bus_stuck", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BUS_STUCK }, + { "module_high_temp", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE }, + { "module_bad_shorted", sizeof(u64) * MLX5_MODULE_EVENT_ERROR_BAD_CABLE }, }; #define NUM_PME_STATUS_STATS ARRAY_SIZE(mlx5e_pme_status_desc) @@ -1120,15 +1133,17 @@ static int mlx5e_grp_pme_fill_strings(struct mlx5e_priv *priv, u8 *data, static int mlx5e_grp_pme_fill_stats(struct mlx5e_priv *priv, u64 *data, int idx) { - struct mlx5_priv *mlx5_priv = &priv->mdev->priv; + struct mlx5_pme_stats pme_stats; int i; + mlx5_get_pme_stats(priv->mdev, &pme_stats); + for (i = 0; i < NUM_PME_STATUS_STATS; i++) - data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.status_counters, + data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.status_counters, mlx5e_pme_status_desc, i); for (i = 0; i < NUM_PME_ERR_STATS; i++) - data[idx++] = MLX5E_READ_CTR64_CPU(mlx5_priv->pme_stats.error_counters, + data[idx++] = MLX5E_READ_CTR64_CPU(pme_stats.error_counters, mlx5e_pme_error_desc, i); return idx; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 3ff69ddae2d3..fe91ec06e3c7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -278,5 +278,6 @@ extern const struct mlx5e_stats_grp mlx5e_stats_grps[]; extern const int mlx5e_num_stats_grps; void mlx5e_grp_sw_update_stats(struct mlx5e_priv *priv); +void mlx5e_grp_802_3_update_stats(struct mlx5e_priv *priv); #endif /* __MLX5_EN_STATS_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 9dabe9d4b279..cae6c6d48984 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -44,15 +44,15 @@ #include <net/tc_act/tc_tunnel_key.h> #include <net/tc_act/tc_pedit.h> #include <net/tc_act/tc_csum.h> -#include <net/vxlan.h> #include <net/arp.h> #include "en.h" #include "en_rep.h" #include "en_tc.h" #include "eswitch.h" -#include "lib/vxlan.h" #include "fs_core.h" #include "en/port.h" +#include "en/tc_tun.h" +#include "lib/devcom.h" struct mlx5_nic_flow_attr { u32 action; @@ -69,25 +69,54 @@ struct mlx5_nic_flow_attr { enum { MLX5E_TC_FLOW_INGRESS = MLX5E_TC_INGRESS, MLX5E_TC_FLOW_EGRESS = MLX5E_TC_EGRESS, - MLX5E_TC_FLOW_ESWITCH = BIT(MLX5E_TC_FLOW_BASE), - MLX5E_TC_FLOW_NIC = BIT(MLX5E_TC_FLOW_BASE + 1), - MLX5E_TC_FLOW_OFFLOADED = BIT(MLX5E_TC_FLOW_BASE + 2), - MLX5E_TC_FLOW_HAIRPIN = BIT(MLX5E_TC_FLOW_BASE + 3), - MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 4), - MLX5E_TC_FLOW_SLOW = BIT(MLX5E_TC_FLOW_BASE + 5), + MLX5E_TC_FLOW_ESWITCH = MLX5E_TC_ESW_OFFLOAD, + MLX5E_TC_FLOW_NIC = MLX5E_TC_NIC_OFFLOAD, + MLX5E_TC_FLOW_OFFLOADED = BIT(MLX5E_TC_FLOW_BASE), + MLX5E_TC_FLOW_HAIRPIN = BIT(MLX5E_TC_FLOW_BASE + 1), + MLX5E_TC_FLOW_HAIRPIN_RSS = BIT(MLX5E_TC_FLOW_BASE + 2), + MLX5E_TC_FLOW_SLOW = BIT(MLX5E_TC_FLOW_BASE + 3), + MLX5E_TC_FLOW_DUP = BIT(MLX5E_TC_FLOW_BASE + 4), }; #define MLX5E_TC_MAX_SPLITS 1 +/* Helper struct for accessing a struct containing list_head array. + * Containing struct + * |- Helper array + * [0] Helper item 0 + * |- list_head item 0 + * |- index (0) + * [1] Helper item 1 + * |- list_head item 1 + * |- index (1) + * To access the containing struct from one of the list_head items: + * 1. Get the helper item from the list_head item using + * helper item = + * container_of(list_head item, helper struct type, list_head field) + * 2. Get the contining struct from the helper item and its index in the array: + * containing struct = + * container_of(helper item, containing struct type, helper field[index]) + */ +struct encap_flow_item { + struct list_head list; + int index; +}; + struct mlx5e_tc_flow { struct rhash_head node; struct mlx5e_priv *priv; u64 cookie; u16 flags; struct mlx5_flow_handle *rule[MLX5E_TC_MAX_SPLITS + 1]; - struct list_head encap; /* flows sharing the same encap ID */ + /* Flow can be associated with multiple encap IDs. + * The number of encaps is bounded by the number of supported + * destinations. + */ + struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS]; + struct mlx5e_tc_flow *peer_flow; struct list_head mod_hdr; /* flows sharing the same mod hdr ID */ struct list_head hairpin; /* flows sharing the same hairpin */ + struct list_head peer; /* flows with peer flow */ union { struct mlx5_esw_flow_attr esw_attr[0]; struct mlx5_nic_flow_attr nic_attr[0]; @@ -95,11 +124,12 @@ struct mlx5e_tc_flow { }; struct mlx5e_tc_flow_parse_attr { - struct ip_tunnel_info tun_info; + struct ip_tunnel_info tun_info[MLX5_MAX_FLOW_FWD_VPORTS]; + struct net_device *filter_dev; struct mlx5_flow_spec spec; int num_mod_hdr_actions; void *mod_hdr_actions; - int mirred_ifindex; + int mirred_ifindex[MLX5_MAX_FLOW_FWD_VPORTS]; }; #define MLX5E_TC_TABLE_NUM_GROUPS 4 @@ -316,7 +346,7 @@ static void mlx5e_hairpin_fill_rqt_rqns(struct mlx5e_hairpin *hp, void *rqtc) for (i = 0; i < sz; i++) { ix = i; - if (priv->channels.params.rss_hfunc == ETH_RSS_HASH_XOR) + if (priv->rss_params.hfunc == ETH_RSS_HASH_XOR) ix = mlx5e_bits_invert(i, ilog2(sz)); ix = indirection_rqt[ix]; rqn = hp->pair->rqn[ix]; @@ -360,13 +390,15 @@ static int mlx5e_hairpin_create_indirect_tirs(struct mlx5e_hairpin *hp) void *tirc; for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) { + struct mlx5e_tirc_config ttconfig = mlx5e_tirc_get_default_config(tt); + memset(in, 0, MLX5_ST_SZ_BYTES(create_tir_in)); tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); MLX5_SET(tirc, tirc, transport_domain, hp->tdn); MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT); MLX5_SET(tirc, tirc, indirect_table, hp->indir_rqt.rqtn); - mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false); + mlx5e_build_indir_tir_ctx_hash(&priv->rss_params, &ttconfig, tirc, false); err = mlx5_core_create_tir(hp->func_mdev, in, MLX5_ST_SZ_BYTES(create_tir_in), &hp->indir_tirn[tt]); @@ -569,7 +601,7 @@ static int mlx5e_hairpin_flow_add(struct mlx5e_priv *priv, struct mlx5e_tc_flow_parse_attr *parse_attr, struct netlink_ext_ack *extack) { - int peer_ifindex = parse_attr->mirred_ifindex; + int peer_ifindex = parse_attr->mirred_ifindex[0]; struct mlx5_hairpin_params params; struct mlx5_core_dev *peer_mdev; struct mlx5e_hairpin_entry *hpe; @@ -802,7 +834,7 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, mlx5_del_flow_rules(flow->rule[0]); mlx5_fc_destroy(priv->mdev, counter); - if (!mlx5e_tc_num_filters(priv) && priv->fs.tc.t) { + if (!mlx5e_tc_num_filters(priv, MLX5E_TC_NIC_OFFLOAD) && priv->fs.tc.t) { mlx5_destroy_flow_table(priv->fs.tc.t); priv->fs.tc.t = NULL; } @@ -815,14 +847,15 @@ static void mlx5e_tc_del_nic_flow(struct mlx5e_priv *priv, } static void mlx5e_detach_encap(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow); + struct mlx5e_tc_flow *flow, int out_index); static int mlx5e_attach_encap(struct mlx5e_priv *priv, struct ip_tunnel_info *tun_info, struct net_device *mirred_dev, struct net_device **encap_dev, struct mlx5e_tc_flow *flow, - struct netlink_ext_ack *extack); + struct netlink_ext_ack *extack, + int out_index); static struct mlx5_flow_handle * mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw, @@ -836,7 +869,7 @@ mlx5e_tc_offload_fdb_rules(struct mlx5_eswitch *esw, if (IS_ERR(rule)) return rule; - if (attr->mirror_count) { + if (attr->split_count) { flow->rule[1] = mlx5_eswitch_add_fwd_rule(esw, spec, attr); if (IS_ERR(flow->rule[1])) { mlx5_eswitch_del_offloaded_rule(esw, rule, attr); @@ -855,7 +888,7 @@ mlx5e_tc_unoffload_fdb_rules(struct mlx5_eswitch *esw, { flow->flags &= ~MLX5E_TC_FLOW_OFFLOADED; - if (attr->mirror_count) + if (attr->split_count) mlx5_eswitch_del_fwd_rule(esw, flow->rule[1], attr); mlx5_eswitch_del_offloaded_rule(esw, flow->rule[0], attr); @@ -871,7 +904,7 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw, memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr)); slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - slow_attr->mirror_count = 0; + slow_attr->split_count = 0; slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN; rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr); @@ -888,7 +921,7 @@ mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw, { memcpy(slow_attr, flow->esw_attr, sizeof(*slow_attr)); slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - slow_attr->mirror_count = 0; + slow_attr->split_count = 0; slow_attr->dest_chain = FDB_SLOW_PATH_CHAIN; mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr); flow->flags &= ~MLX5E_TC_FLOW_SLOW; @@ -909,6 +942,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, struct mlx5e_rep_priv *rpriv; struct mlx5e_priv *out_priv; int err = 0, encap_err = 0; + int out_index; if (!mlx5_eswitch_prios_supported(esw) && attr->prio != 1) { NL_SET_ERR_MSG(extack, "E-switch priorities unsupported, upgrade FW"); @@ -927,20 +961,27 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, goto err_max_prio_chain; } - if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) { + for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) { + int mirred_ifindex; + + if (!(attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP)) + continue; + + mirred_ifindex = attr->parse_attr->mirred_ifindex[out_index]; out_dev = __dev_get_by_index(dev_net(priv->netdev), - attr->parse_attr->mirred_ifindex); - encap_err = mlx5e_attach_encap(priv, &parse_attr->tun_info, - out_dev, &encap_dev, flow, - extack); - if (encap_err && encap_err != -EAGAIN) { - err = encap_err; + mirred_ifindex); + err = mlx5e_attach_encap(priv, + &parse_attr->tun_info[out_index], + out_dev, &encap_dev, flow, + extack, out_index); + if (err && err != -EAGAIN) goto err_attach_encap; - } + if (err == -EAGAIN) + encap_err = err; out_priv = netdev_priv(encap_dev); rpriv = out_priv->ppriv; - attr->out_rep[attr->out_count] = rpriv->rep; - attr->out_mdev[attr->out_count++] = out_priv->mdev; + attr->dests[out_index].rep = rpriv->rep; + attr->dests[out_index].mdev = out_priv->mdev; } err = mlx5_eswitch_add_vlan_action(esw, attr); @@ -955,7 +996,7 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, } if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) { - counter = mlx5_fc_create(esw->dev, true); + counter = mlx5_fc_create(attr->counter_dev, true); if (IS_ERR(counter)) { err = PTR_ERR(counter); goto err_create_counter; @@ -984,15 +1025,16 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, return 0; err_add_rule: - mlx5_fc_destroy(esw->dev, counter); + mlx5_fc_destroy(attr->counter_dev, counter); err_create_counter: if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) mlx5e_detach_mod_hdr(priv, flow); err_mod_hdr: mlx5_eswitch_del_vlan_action(esw, attr); err_add_vlan: - if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) - mlx5e_detach_encap(priv, flow); + for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) + if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) + mlx5e_detach_encap(priv, flow, out_index); err_attach_encap: err_max_prio_chain: return err; @@ -1004,6 +1046,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5_esw_flow_attr *attr = flow->esw_attr; struct mlx5_esw_flow_attr slow_attr; + int out_index; if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { if (flow->flags & MLX5E_TC_FLOW_SLOW) @@ -1014,16 +1057,16 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, mlx5_eswitch_del_vlan_action(esw, attr); - if (attr->action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) { - mlx5e_detach_encap(priv, flow); - kvfree(attr->parse_attr); - } + for (out_index = 0; out_index < MLX5_MAX_FLOW_FWD_VPORTS; out_index++) + if (attr->dests[out_index].flags & MLX5_ESW_DEST_ENCAP) + mlx5e_detach_encap(priv, flow, out_index); + kvfree(attr->parse_attr); if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) mlx5e_detach_mod_hdr(priv, flow); if (attr->action & MLX5_FLOW_CONTEXT_ACTION_COUNT) - mlx5_fc_destroy(esw->dev, attr->counter); + mlx5_fc_destroy(attr->counter_dev, attr->counter); } void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, @@ -1033,10 +1076,12 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, struct mlx5_esw_flow_attr slow_attr, *esw_attr; struct mlx5_flow_handle *rule; struct mlx5_flow_spec *spec; + struct encap_flow_item *efi; struct mlx5e_tc_flow *flow; int err; - err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type, + err = mlx5_packet_reformat_alloc(priv->mdev, + e->reformat_type, e->encap_size, e->encap_header, MLX5_FLOW_NAMESPACE_FDB, &e->encap_id); @@ -1048,11 +1093,31 @@ void mlx5e_tc_encap_flows_add(struct mlx5e_priv *priv, e->flags |= MLX5_ENCAP_ENTRY_VALID; mlx5e_rep_queue_neigh_stats_work(priv); - list_for_each_entry(flow, &e->flows, encap) { + list_for_each_entry(efi, &e->flows, list) { + bool all_flow_encaps_valid = true; + int i; + + flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]); esw_attr = flow->esw_attr; - esw_attr->encap_id = e->encap_id; spec = &esw_attr->parse_attr->spec; + esw_attr->dests[efi->index].encap_id = e->encap_id; + esw_attr->dests[efi->index].flags |= MLX5_ESW_DEST_ENCAP_VALID; + /* Flow can be associated with multiple encap entries. + * Before offloading the flow verify that all of them have + * a valid neighbour. + */ + for (i = 0; i < MLX5_MAX_FLOW_FWD_VPORTS; i++) { + if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP)) + continue; + if (!(esw_attr->dests[i].flags & MLX5_ESW_DEST_ENCAP_VALID)) { + all_flow_encaps_valid = false; + break; + } + } + /* Do not offload flows with unresolved neighbors */ + if (!all_flow_encaps_valid) + continue; /* update from slow path rule to encap rule */ rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, esw_attr); if (IS_ERR(rule)) { @@ -1075,14 +1140,18 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, struct mlx5_esw_flow_attr slow_attr; struct mlx5_flow_handle *rule; struct mlx5_flow_spec *spec; + struct encap_flow_item *efi; struct mlx5e_tc_flow *flow; int err; - list_for_each_entry(flow, &e->flows, encap) { + list_for_each_entry(efi, &e->flows, list) { + flow = container_of(efi, struct mlx5e_tc_flow, encaps[efi->index]); spec = &flow->esw_attr->parse_attr->spec; /* update from encap rule to slow path rule */ rule = mlx5e_tc_offload_to_slow_path(esw, flow, spec, &slow_attr); + /* mark the flow's encap dest as non-valid */ + flow->esw_attr->dests[efi->index].flags &= ~MLX5_ESW_DEST_ENCAP_VALID; if (IS_ERR(rule)) { err = PTR_ERR(rule); @@ -1130,9 +1199,12 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe) return; list_for_each_entry(e, &nhe->encap_list, encap_list) { + struct encap_flow_item *efi; if (!(e->flags & MLX5_ENCAP_ENTRY_VALID)) continue; - list_for_each_entry(flow, &e->flows, encap) { + list_for_each_entry(efi, &e->flows, list) { + flow = container_of(efi, struct mlx5e_tc_flow, + encaps[efi->index]); if (flow->flags & MLX5E_TC_FLOW_OFFLOADED) { counter = mlx5e_tc_get_counter(flow); mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); @@ -1162,11 +1234,11 @@ void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe) } static void mlx5e_detach_encap(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow) + struct mlx5e_tc_flow *flow, int out_index) { - struct list_head *next = flow->encap.next; + struct list_head *next = flow->encaps[out_index].list.next; - list_del(&flow->encap); + list_del(&flow->encaps[out_index].list); if (list_empty(next)) { struct mlx5e_encap_entry *e; @@ -1182,49 +1254,55 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv, } } -static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, - struct mlx5e_tc_flow *flow) +static void __mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow) { - if (flow->flags & MLX5E_TC_FLOW_ESWITCH) - mlx5e_tc_del_fdb_flow(priv, flow); - else - mlx5e_tc_del_nic_flow(priv, flow); + struct mlx5_eswitch *esw = flow->priv->mdev->priv.eswitch; + + if (!(flow->flags & MLX5E_TC_FLOW_ESWITCH) || + !(flow->flags & MLX5E_TC_FLOW_DUP)) + return; + + mutex_lock(&esw->offloads.peer_mutex); + list_del(&flow->peer); + mutex_unlock(&esw->offloads.peer_mutex); + + flow->flags &= ~MLX5E_TC_FLOW_DUP; + + mlx5e_tc_del_fdb_flow(flow->peer_flow->priv, flow->peer_flow); + kvfree(flow->peer_flow); + flow->peer_flow = NULL; } -static void parse_vxlan_attr(struct mlx5_flow_spec *spec, - struct tc_cls_flower_offload *f) +static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow) { - void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, - outer_headers); - void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, - outer_headers); - void *misc_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, - misc_parameters); - void *misc_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, - misc_parameters); + struct mlx5_core_dev *dev = flow->priv->mdev; + struct mlx5_devcom *devcom = dev->priv.devcom; + struct mlx5_eswitch *peer_esw; - MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ip_protocol); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, ip_protocol, IPPROTO_UDP); + peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); + if (!peer_esw) + return; - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) { - struct flow_dissector_key_keyid *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_KEYID, - f->key); - struct flow_dissector_key_keyid *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_KEYID, - f->mask); - MLX5_SET(fte_match_set_misc, misc_c, vxlan_vni, - be32_to_cpu(mask->keyid)); - MLX5_SET(fte_match_set_misc, misc_v, vxlan_vni, - be32_to_cpu(key->keyid)); + __mlx5e_tc_del_fdb_peer_flow(flow); + mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); +} + +static void mlx5e_tc_del_flow(struct mlx5e_priv *priv, + struct mlx5e_tc_flow *flow) +{ + if (flow->flags & MLX5E_TC_FLOW_ESWITCH) { + mlx5e_tc_del_fdb_peer_flow(flow); + mlx5e_tc_del_fdb_flow(priv, flow); + } else { + mlx5e_tc_del_nic_flow(priv, flow); } } + static int parse_tunnel_attr(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, - struct tc_cls_flower_offload *f) + struct tc_cls_flower_offload *f, + struct net_device *filter_dev) { struct netlink_ext_ack *extack = f->common.extack; void *headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, @@ -1236,48 +1314,14 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv, skb_flow_dissector_target(f->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL, f->key); + int err = 0; - if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) { - struct flow_dissector_key_ports *key = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_PORTS, - f->key); - struct flow_dissector_key_ports *mask = - skb_flow_dissector_target(f->dissector, - FLOW_DISSECTOR_KEY_ENC_PORTS, - f->mask); - - /* Full udp dst port must be given */ - if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst))) - goto vxlan_match_offload_err; - - if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->dst)) && - MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) - parse_vxlan_attr(spec, f); - else { - NL_SET_ERR_MSG_MOD(extack, - "port isn't an offloaded vxlan udp dport"); - netdev_warn(priv->netdev, - "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst)); - return -EOPNOTSUPP; - } - - MLX5_SET(fte_match_set_lyr_2_4, headers_c, - udp_dport, ntohs(mask->dst)); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, - udp_dport, ntohs(key->dst)); - - MLX5_SET(fte_match_set_lyr_2_4, headers_c, - udp_sport, ntohs(mask->src)); - MLX5_SET(fte_match_set_lyr_2_4, headers_v, - udp_sport, ntohs(key->src)); - } else { /* udp dst port must be given */ -vxlan_match_offload_err: + err = mlx5e_tc_tun_parse(filter_dev, priv, spec, f, + headers_c, headers_v); + if (err) { NL_SET_ERR_MSG_MOD(extack, - "IP tunnel decap offload supported only for vxlan, must set UDP dport"); - netdev_warn(priv->netdev, - "IP tunnel decap offload supported only for vxlan, must set UDP dport\n"); - return -EOPNOTSUPP; + "failed to parse tunnel attributes"); + return err; } if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) { @@ -1381,6 +1425,7 @@ vxlan_match_offload_err: static int __parse_cls_flower(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, struct tc_cls_flower_offload *f, + struct net_device *filter_dev, u8 *match_level) { struct netlink_ext_ack *extack = f->common.extack; @@ -1432,7 +1477,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, switch (key->addr_type) { case FLOW_DISSECTOR_KEY_IPV4_ADDRS: case FLOW_DISSECTOR_KEY_IPV6_ADDRS: - if (parse_tunnel_attr(priv, spec, f)) + if (parse_tunnel_attr(priv, spec, f, filter_dev)) return -EOPNOTSUPP; break; default: @@ -1774,7 +1819,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv, static int parse_cls_flower(struct mlx5e_priv *priv, struct mlx5e_tc_flow *flow, struct mlx5_flow_spec *spec, - struct tc_cls_flower_offload *f) + struct tc_cls_flower_offload *f, + struct net_device *filter_dev) { struct netlink_ext_ack *extack = f->common.extack; struct mlx5_core_dev *dev = priv->mdev; @@ -1784,7 +1830,7 @@ static int parse_cls_flower(struct mlx5e_priv *priv, u8 match_level; int err; - err = __parse_cls_flower(priv, spec, f, &match_level); + err = __parse_cls_flower(priv, spec, f, filter_dev, &match_level); if (!err && (flow->flags & MLX5E_TC_FLOW_ESWITCH)) { rep = rpriv->rep; @@ -2137,7 +2183,6 @@ static bool modify_header_match_supported(struct mlx5_flow_spec *spec, { const struct tc_action *a; bool modify_ip_header; - LIST_HEAD(actions); u8 htype, ip_proto; void *headers_v; u16 ethertype; @@ -2226,7 +2271,6 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, { struct mlx5_nic_flow_attr *attr = flow->nic_attr; const struct tc_action *a; - LIST_HEAD(actions); u32 action = 0; int err, i; @@ -2269,7 +2313,7 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, if (priv->netdev->netdev_ops == peer_dev->netdev_ops && same_hw_devs(priv, netdev_priv(peer_dev))) { - parse_attr->mirred_ifindex = peer_dev->ifindex; + parse_attr->mirred_ifindex[0] = peer_dev->ifindex; flow->flags |= MLX5E_TC_FLOW_HAIRPIN; action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_COUNT; @@ -2318,45 +2362,6 @@ static inline int hash_encap_info(struct ip_tunnel_key *key) return jhash(key, sizeof(*key), 0); } -static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv, - struct net_device *mirred_dev, - struct net_device **out_dev, - struct flowi4 *fl4, - struct neighbour **out_n, - u8 *out_ttl) -{ - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - struct mlx5e_rep_priv *uplink_rpriv; - struct rtable *rt; - struct neighbour *n = NULL; - -#if IS_ENABLED(CONFIG_INET) - int ret; - - rt = ip_route_output_key(dev_net(mirred_dev), fl4); - ret = PTR_ERR_OR_ZERO(rt); - if (ret) - return ret; -#else - return -EOPNOTSUPP; -#endif - uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); - /* if the egress device isn't on the same HW e-switch, we use the uplink */ - if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) - *out_dev = uplink_rpriv->netdev; - else - *out_dev = rt->dst.dev; - - if (!(*out_ttl)) - *out_ttl = ip4_dst_hoplimit(&rt->dst); - n = dst_neigh_lookup(&rt->dst, &fl4->daddr); - ip_rt_put(rt); - if (!n) - return -ENOMEM; - - *out_n = n; - return 0; -} static bool is_merged_eswitch_dev(struct mlx5e_priv *priv, struct net_device *peer_netdev) @@ -2372,377 +2377,24 @@ static bool is_merged_eswitch_dev(struct mlx5e_priv *priv, (peer_priv->mdev->priv.eswitch->mode == SRIOV_OFFLOADS)); } -static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv, - struct net_device *mirred_dev, - struct net_device **out_dev, - struct flowi6 *fl6, - struct neighbour **out_n, - u8 *out_ttl) -{ - struct neighbour *n = NULL; - struct dst_entry *dst; - -#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6) - struct mlx5e_rep_priv *uplink_rpriv; - struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; - int ret; - - ret = ipv6_stub->ipv6_dst_lookup(dev_net(mirred_dev), NULL, &dst, - fl6); - if (ret < 0) - return ret; - - if (!(*out_ttl)) - *out_ttl = ip6_dst_hoplimit(dst); - - uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); - /* if the egress device isn't on the same HW e-switch, we use the uplink */ - if (!switchdev_port_same_parent_id(priv->netdev, dst->dev)) - *out_dev = uplink_rpriv->netdev; - else - *out_dev = dst->dev; -#else - return -EOPNOTSUPP; -#endif - - n = dst_neigh_lookup(dst, &fl6->daddr); - dst_release(dst); - if (!n) - return -ENOMEM; - - *out_n = n; - return 0; -} - -static void gen_vxlan_header_ipv4(struct net_device *out_dev, - char buf[], int encap_size, - unsigned char h_dest[ETH_ALEN], - u8 tos, u8 ttl, - __be32 daddr, - __be32 saddr, - __be16 udp_dst_port, - __be32 vx_vni) -{ - struct ethhdr *eth = (struct ethhdr *)buf; - struct iphdr *ip = (struct iphdr *)((char *)eth + sizeof(struct ethhdr)); - struct udphdr *udp = (struct udphdr *)((char *)ip + sizeof(struct iphdr)); - struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr)); - - memset(buf, 0, encap_size); - - ether_addr_copy(eth->h_dest, h_dest); - ether_addr_copy(eth->h_source, out_dev->dev_addr); - eth->h_proto = htons(ETH_P_IP); - - ip->daddr = daddr; - ip->saddr = saddr; - - ip->tos = tos; - ip->ttl = ttl; - ip->protocol = IPPROTO_UDP; - ip->version = 0x4; - ip->ihl = 0x5; - - udp->dest = udp_dst_port; - vxh->vx_flags = VXLAN_HF_VNI; - vxh->vx_vni = vxlan_vni_field(vx_vni); -} - -static void gen_vxlan_header_ipv6(struct net_device *out_dev, - char buf[], int encap_size, - unsigned char h_dest[ETH_ALEN], - u8 tos, u8 ttl, - struct in6_addr *daddr, - struct in6_addr *saddr, - __be16 udp_dst_port, - __be32 vx_vni) -{ - struct ethhdr *eth = (struct ethhdr *)buf; - struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr)); - struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr)); - struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr)); - - memset(buf, 0, encap_size); - - ether_addr_copy(eth->h_dest, h_dest); - ether_addr_copy(eth->h_source, out_dev->dev_addr); - eth->h_proto = htons(ETH_P_IPV6); - - ip6_flow_hdr(ip6h, tos, 0); - /* the HW fills up ipv6 payload len */ - ip6h->nexthdr = IPPROTO_UDP; - ip6h->hop_limit = ttl; - ip6h->daddr = *daddr; - ip6h->saddr = *saddr; - - udp->dest = udp_dst_port; - vxh->vx_flags = VXLAN_HF_VNI; - vxh->vx_vni = vxlan_vni_field(vx_vni); -} - -static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv, - struct net_device *mirred_dev, - struct mlx5e_encap_entry *e) -{ - int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); - int ipv4_encap_size = ETH_HLEN + sizeof(struct iphdr) + VXLAN_HLEN; - struct ip_tunnel_key *tun_key = &e->tun_info.key; - struct net_device *out_dev; - struct neighbour *n = NULL; - struct flowi4 fl4 = {}; - u8 nud_state, tos, ttl; - char *encap_header; - int err; - if (max_encap_size < ipv4_encap_size) { - mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n", - ipv4_encap_size, max_encap_size); - return -EOPNOTSUPP; - } - - encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL); - if (!encap_header) - return -ENOMEM; - - switch (e->tunnel_type) { - case MLX5_REFORMAT_TYPE_L2_TO_VXLAN: - fl4.flowi4_proto = IPPROTO_UDP; - fl4.fl4_dport = tun_key->tp_dst; - break; - default: - err = -EOPNOTSUPP; - goto free_encap; - } - - tos = tun_key->tos; - ttl = tun_key->ttl; - - fl4.flowi4_tos = tun_key->tos; - fl4.daddr = tun_key->u.ipv4.dst; - fl4.saddr = tun_key->u.ipv4.src; - - err = mlx5e_route_lookup_ipv4(priv, mirred_dev, &out_dev, - &fl4, &n, &ttl); - if (err) - goto free_encap; - - /* used by mlx5e_detach_encap to lookup a neigh hash table - * entry in the neigh hash table when a user deletes a rule - */ - e->m_neigh.dev = n->dev; - e->m_neigh.family = n->ops->family; - memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); - e->out_dev = out_dev; - - /* It's importent to add the neigh to the hash table before checking - * the neigh validity state. So if we'll get a notification, in case the - * neigh changes it's validity state, we would find the relevant neigh - * in the hash. - */ - err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e); - if (err) - goto free_encap; - - read_lock_bh(&n->lock); - nud_state = n->nud_state; - ether_addr_copy(e->h_dest, n->ha); - read_unlock_bh(&n->lock); - - switch (e->tunnel_type) { - case MLX5_REFORMAT_TYPE_L2_TO_VXLAN: - gen_vxlan_header_ipv4(out_dev, encap_header, - ipv4_encap_size, e->h_dest, tos, ttl, - fl4.daddr, - fl4.saddr, tun_key->tp_dst, - tunnel_id_to_key32(tun_key->tun_id)); - break; - default: - err = -EOPNOTSUPP; - goto destroy_neigh_entry; - } - e->encap_size = ipv4_encap_size; - e->encap_header = encap_header; - - if (!(nud_state & NUD_VALID)) { - neigh_event_send(n, NULL); - err = -EAGAIN; - goto out; - } - - err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type, - ipv4_encap_size, encap_header, - MLX5_FLOW_NAMESPACE_FDB, - &e->encap_id); - if (err) - goto destroy_neigh_entry; - - e->flags |= MLX5_ENCAP_ENTRY_VALID; - mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev)); - neigh_release(n); - return err; - -destroy_neigh_entry: - mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); -free_encap: - kfree(encap_header); -out: - if (n) - neigh_release(n); - return err; -} - -static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv, - struct net_device *mirred_dev, - struct mlx5e_encap_entry *e) -{ - int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size); - int ipv6_encap_size = ETH_HLEN + sizeof(struct ipv6hdr) + VXLAN_HLEN; - struct ip_tunnel_key *tun_key = &e->tun_info.key; - struct net_device *out_dev; - struct neighbour *n = NULL; - struct flowi6 fl6 = {}; - u8 nud_state, tos, ttl; - char *encap_header; - int err; - - if (max_encap_size < ipv6_encap_size) { - mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n", - ipv6_encap_size, max_encap_size); - return -EOPNOTSUPP; - } - - encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL); - if (!encap_header) - return -ENOMEM; - - switch (e->tunnel_type) { - case MLX5_REFORMAT_TYPE_L2_TO_VXLAN: - fl6.flowi6_proto = IPPROTO_UDP; - fl6.fl6_dport = tun_key->tp_dst; - break; - default: - err = -EOPNOTSUPP; - goto free_encap; - } - - tos = tun_key->tos; - ttl = tun_key->ttl; - - fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label); - fl6.daddr = tun_key->u.ipv6.dst; - fl6.saddr = tun_key->u.ipv6.src; - - err = mlx5e_route_lookup_ipv6(priv, mirred_dev, &out_dev, - &fl6, &n, &ttl); - if (err) - goto free_encap; - - /* used by mlx5e_detach_encap to lookup a neigh hash table - * entry in the neigh hash table when a user deletes a rule - */ - e->m_neigh.dev = n->dev; - e->m_neigh.family = n->ops->family; - memcpy(&e->m_neigh.dst_ip, n->primary_key, n->tbl->key_len); - e->out_dev = out_dev; - - /* It's importent to add the neigh to the hash table before checking - * the neigh validity state. So if we'll get a notification, in case the - * neigh changes it's validity state, we would find the relevant neigh - * in the hash. - */ - err = mlx5e_rep_encap_entry_attach(netdev_priv(out_dev), e); - if (err) - goto free_encap; - - read_lock_bh(&n->lock); - nud_state = n->nud_state; - ether_addr_copy(e->h_dest, n->ha); - read_unlock_bh(&n->lock); - - switch (e->tunnel_type) { - case MLX5_REFORMAT_TYPE_L2_TO_VXLAN: - gen_vxlan_header_ipv6(out_dev, encap_header, - ipv6_encap_size, e->h_dest, tos, ttl, - &fl6.daddr, - &fl6.saddr, tun_key->tp_dst, - tunnel_id_to_key32(tun_key->tun_id)); - break; - default: - err = -EOPNOTSUPP; - goto destroy_neigh_entry; - } - - e->encap_size = ipv6_encap_size; - e->encap_header = encap_header; - - if (!(nud_state & NUD_VALID)) { - neigh_event_send(n, NULL); - err = -EAGAIN; - goto out; - } - - err = mlx5_packet_reformat_alloc(priv->mdev, e->tunnel_type, - ipv6_encap_size, encap_header, - MLX5_FLOW_NAMESPACE_FDB, - &e->encap_id); - if (err) - goto destroy_neigh_entry; - - e->flags |= MLX5_ENCAP_ENTRY_VALID; - mlx5e_rep_queue_neigh_stats_work(netdev_priv(out_dev)); - neigh_release(n); - return err; - -destroy_neigh_entry: - mlx5e_rep_encap_entry_detach(netdev_priv(e->out_dev), e); -free_encap: - kfree(encap_header); -out: - if (n) - neigh_release(n); - return err; -} static int mlx5e_attach_encap(struct mlx5e_priv *priv, struct ip_tunnel_info *tun_info, struct net_device *mirred_dev, struct net_device **encap_dev, struct mlx5e_tc_flow *flow, - struct netlink_ext_ack *extack) + struct netlink_ext_ack *extack, + int out_index) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; unsigned short family = ip_tunnel_info_af(tun_info); struct mlx5_esw_flow_attr *attr = flow->esw_attr; struct ip_tunnel_key *key = &tun_info->key; struct mlx5e_encap_entry *e; - int tunnel_type, err = 0; uintptr_t hash_key; bool found = false; - - /* udp dst port must be set */ - if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst))) - goto vxlan_encap_offload_err; - - /* setting udp src port isn't supported */ - if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) { -vxlan_encap_offload_err: - NL_SET_ERR_MSG_MOD(extack, - "must set udp dst port and not set udp src port"); - netdev_warn(priv->netdev, - "must set udp dst port and not set udp src port\n"); - return -EOPNOTSUPP; - } - - if (mlx5_vxlan_lookup_port(priv->mdev->vxlan, be16_to_cpu(key->tp_dst)) && - MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) { - tunnel_type = MLX5_REFORMAT_TYPE_L2_TO_VXLAN; - } else { - NL_SET_ERR_MSG_MOD(extack, - "port isn't an offloaded vxlan udp dport"); - netdev_warn(priv->netdev, - "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst)); - return -EOPNOTSUPP; - } + int err = 0; hash_key = hash_encap_info(key); @@ -2763,13 +2415,16 @@ vxlan_encap_offload_err: return -ENOMEM; e->tun_info = *tun_info; - e->tunnel_type = tunnel_type; + err = mlx5e_tc_tun_init_encap_attr(mirred_dev, priv, e, extack); + if (err) + goto out_err; + INIT_LIST_HEAD(&e->flows); if (family == AF_INET) - err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e); + err = mlx5e_tc_tun_create_header_ipv4(priv, mirred_dev, e); else if (family == AF_INET6) - err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e); + err = mlx5e_tc_tun_create_header_ipv6(priv, mirred_dev, e); if (err && err != -EAGAIN) goto out_err; @@ -2777,12 +2432,15 @@ vxlan_encap_offload_err: hash_add_rcu(esw->offloads.encap_tbl, &e->encap_hlist, hash_key); attach_flow: - list_add(&flow->encap, &e->flows); + list_add(&flow->encaps[out_index].list, &e->flows); + flow->encaps[out_index].index = out_index; *encap_dev = e->out_dev; - if (e->flags & MLX5_ENCAP_ENTRY_VALID) - attr->encap_id = e->encap_id; - else + if (e->flags & MLX5_ENCAP_ENTRY_VALID) { + attr->dests[out_index].encap_id = e->encap_id; + attr->dests[out_index].flags |= MLX5_ESW_DEST_ENCAP_VALID; + } else { err = -EAGAIN; + } return err; @@ -2851,7 +2509,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, struct mlx5e_rep_priv *rpriv = priv->ppriv; struct ip_tunnel_info *info = NULL; const struct tc_action *a; - LIST_HEAD(actions); bool encap = false; u32 action = 0; int err, i; @@ -2876,7 +2533,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, return err; action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; - attr->mirror_count = attr->out_count; + attr->split_count = attr->out_count; continue; } @@ -2894,6 +2551,13 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, struct net_device *out_dev; out_dev = tcf_mirred_dev(a); + if (!out_dev) { + /* out_dev is NULL when filters with + * non-existing mirred device are replayed to + * the driver. + */ + return -EINVAL; + } if (attr->out_count >= MLX5_MAX_FLOW_FWD_VPORTS) { NL_SET_ERR_MSG_MOD(extack, @@ -2903,23 +2567,47 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, return -EOPNOTSUPP; } + action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | + MLX5_FLOW_CONTEXT_ACTION_COUNT; if (switchdev_port_same_parent_id(priv->netdev, out_dev) || is_merged_eswitch_dev(priv, out_dev)) { - action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_COUNT; + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; + struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH); + struct net_device *uplink_upper = netdev_master_upper_dev_get(uplink_dev); + + if (uplink_upper && + netif_is_lag_master(uplink_upper) && + uplink_upper == out_dev) + out_dev = uplink_dev; + + if (!mlx5e_eswitch_rep(out_dev)) + return -EOPNOTSUPP; + out_priv = netdev_priv(out_dev); rpriv = out_priv->ppriv; - attr->out_rep[attr->out_count] = rpriv->rep; - attr->out_mdev[attr->out_count++] = out_priv->mdev; + attr->dests[attr->out_count].rep = rpriv->rep; + attr->dests[attr->out_count].mdev = out_priv->mdev; + attr->out_count++; } else if (encap) { - parse_attr->mirred_ifindex = out_dev->ifindex; - parse_attr->tun_info = *info; + parse_attr->mirred_ifindex[attr->out_count] = + out_dev->ifindex; + parse_attr->tun_info[attr->out_count] = *info; + encap = false; attr->parse_attr = parse_attr; - action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT | - MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | - MLX5_FLOW_CONTEXT_ACTION_COUNT; - /* attr->out_rep is resolved when we handle encap */ + attr->dests[attr->out_count].flags |= + MLX5_ESW_DEST_ENCAP; + attr->out_count++; + /* attr->dests[].rep is resolved when we + * handle encap + */ + } else if (parse_attr->filter_dev != priv->netdev) { + /* All mlx5 devices are called to configure + * high level device filters. Therefore, the + * *attempt* to install a filter on invalid + * eswitch should not trigger an explicit error + */ + return -EINVAL; } else { NL_SET_ERR_MSG_MOD(extack, "devices are not on same switch HW, can't offload forwarding"); @@ -2936,7 +2624,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, encap = true; else return -EOPNOTSUPP; - attr->mirror_count = attr->out_count; continue; } @@ -2946,7 +2633,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, if (err) return err; - attr->mirror_count = attr->out_count; + attr->split_count = attr->out_count; continue; } @@ -2988,7 +2675,7 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv, struct tcf_exts *exts, attr->action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; } - if (attr->mirror_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) { + if (attr->split_count > 0 && !mlx5_esw_has_fwd_fdb(priv->mdev)) { NL_SET_ERR_MSG_MOD(extack, "current firmware doesn't support split rule for port mirroring"); netdev_warn_once(priv->netdev, "current firmware doesn't support split rule for port mirroring\n"); @@ -3007,6 +2694,11 @@ static void get_flags(int flags, u16 *flow_flags) if (flags & MLX5E_TC_EGRESS) __flow_flags |= MLX5E_TC_FLOW_EGRESS; + if (flags & MLX5E_TC_ESW_OFFLOAD) + __flow_flags |= MLX5E_TC_FLOW_ESWITCH; + if (flags & MLX5E_TC_NIC_OFFLOAD) + __flow_flags |= MLX5E_TC_FLOW_NIC; + *flow_flags = __flow_flags; } @@ -3017,18 +2709,32 @@ static const struct rhashtable_params tc_ht_params = { .automatic_shrinking = true, }; -static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv) +static struct rhashtable *get_tc_ht(struct mlx5e_priv *priv, int flags) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5e_rep_priv *uplink_rpriv; - if (MLX5_VPORT_MANAGER(priv->mdev) && esw->mode == SRIOV_OFFLOADS) { + if (flags & MLX5E_TC_ESW_OFFLOAD) { uplink_rpriv = mlx5_eswitch_get_uplink_priv(esw, REP_ETH); - return &uplink_rpriv->tc_ht; - } else + return &uplink_rpriv->uplink_priv.tc_ht; + } else /* NIC offload */ return &priv->fs.tc.ht; } +static bool is_peer_flow_needed(struct mlx5e_tc_flow *flow) +{ + struct mlx5_esw_flow_attr *attr = flow->esw_attr; + bool is_rep_ingress = attr->in_rep->vport != FDB_UPLINK_VPORT && + flow->flags & MLX5E_TC_FLOW_INGRESS; + bool act_is_encap = !!(attr->action & + MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT); + bool esw_paired = mlx5_devcom_is_paired(attr->in_mdev->priv.devcom, + MLX5_DEVCOM_ESW_OFFLOADS); + + return esw_paired && mlx5_lag_is_sriov(attr->in_mdev) && + (is_rep_ingress || act_is_encap); +} + static int mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size, struct tc_cls_flower_offload *f, u16 flow_flags, @@ -3050,10 +2756,6 @@ mlx5e_alloc_flow(struct mlx5e_priv *priv, int attr_size, flow->flags = flow_flags; flow->priv = priv; - err = parse_cls_flower(priv, flow, &parse_attr->spec, f); - if (err) - goto err_free; - *__flow = flow; *__parse_attr = parse_attr; @@ -3066,12 +2768,16 @@ err_free: } static int -mlx5e_add_fdb_flow(struct mlx5e_priv *priv, - struct tc_cls_flower_offload *f, - u16 flow_flags, - struct mlx5e_tc_flow **__flow) +__mlx5e_add_fdb_flow(struct mlx5e_priv *priv, + struct tc_cls_flower_offload *f, + u16 flow_flags, + struct net_device *filter_dev, + struct mlx5_eswitch_rep *in_rep, + struct mlx5_core_dev *in_mdev, + struct mlx5e_tc_flow **__flow) { struct netlink_ext_ack *extack = f->common.extack; + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; struct mlx5e_tc_flow_parse_attr *parse_attr; struct mlx5e_tc_flow *flow; int attr_size, err; @@ -3082,6 +2788,12 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv, &parse_attr, &flow); if (err) goto out; + parse_attr->filter_dev = filter_dev; + flow->esw_attr->parse_attr = parse_attr; + err = parse_cls_flower(flow->priv, flow, &parse_attr->spec, + f, filter_dev); + if (err) + goto err_free; flow->esw_attr->chain = f->common.chain_index; flow->esw_attr->prio = TC_H_MAJ(f->common.prio) >> 16; @@ -3089,14 +2801,19 @@ mlx5e_add_fdb_flow(struct mlx5e_priv *priv, if (err) goto err_free; + flow->esw_attr->in_rep = in_rep; + flow->esw_attr->in_mdev = in_mdev; + + if (MLX5_CAP_ESW(esw->dev, counter_eswitch_affinity) == + MLX5_COUNTER_SOURCE_ESWITCH) + flow->esw_attr->counter_dev = in_mdev; + else + flow->esw_attr->counter_dev = priv->mdev; + err = mlx5e_tc_add_fdb_flow(priv, parse_attr, flow, extack); if (err) goto err_free; - if (!(flow->esw_attr->action & - MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT)) - kvfree(parse_attr); - *__flow = flow; return 0; @@ -3108,10 +2825,92 @@ out: return err; } +static int mlx5e_tc_add_fdb_peer_flow(struct tc_cls_flower_offload *f, + struct mlx5e_tc_flow *flow) +{ + struct mlx5e_priv *priv = flow->priv, *peer_priv; + struct mlx5_eswitch *esw = priv->mdev->priv.eswitch, *peer_esw; + struct mlx5_devcom *devcom = priv->mdev->priv.devcom; + struct mlx5e_tc_flow_parse_attr *parse_attr; + struct mlx5e_rep_priv *peer_urpriv; + struct mlx5e_tc_flow *peer_flow; + struct mlx5_core_dev *in_mdev; + int err = 0; + + peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); + if (!peer_esw) + return -ENODEV; + + peer_urpriv = mlx5_eswitch_get_uplink_priv(peer_esw, REP_ETH); + peer_priv = netdev_priv(peer_urpriv->netdev); + + /* in_mdev is assigned of which the packet originated from. + * So packets redirected to uplink use the same mdev of the + * original flow and packets redirected from uplink use the + * peer mdev. + */ + if (flow->esw_attr->in_rep->vport == FDB_UPLINK_VPORT) + in_mdev = peer_priv->mdev; + else + in_mdev = priv->mdev; + + parse_attr = flow->esw_attr->parse_attr; + err = __mlx5e_add_fdb_flow(peer_priv, f, flow->flags, + parse_attr->filter_dev, + flow->esw_attr->in_rep, in_mdev, &peer_flow); + if (err) + goto out; + + flow->peer_flow = peer_flow; + flow->flags |= MLX5E_TC_FLOW_DUP; + mutex_lock(&esw->offloads.peer_mutex); + list_add_tail(&flow->peer, &esw->offloads.peer_flows); + mutex_unlock(&esw->offloads.peer_mutex); + +out: + mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); + return err; +} + +static int +mlx5e_add_fdb_flow(struct mlx5e_priv *priv, + struct tc_cls_flower_offload *f, + u16 flow_flags, + struct net_device *filter_dev, + struct mlx5e_tc_flow **__flow) +{ + struct mlx5e_rep_priv *rpriv = priv->ppriv; + struct mlx5_eswitch_rep *in_rep = rpriv->rep; + struct mlx5_core_dev *in_mdev = priv->mdev; + struct mlx5e_tc_flow *flow; + int err; + + err = __mlx5e_add_fdb_flow(priv, f, flow_flags, filter_dev, in_rep, + in_mdev, &flow); + if (err) + goto out; + + if (is_peer_flow_needed(flow)) { + err = mlx5e_tc_add_fdb_peer_flow(f, flow); + if (err) { + mlx5e_tc_del_fdb_flow(priv, flow); + goto out; + } + } + + *__flow = flow; + + return 0; + +out: + return err; +} + static int mlx5e_add_nic_flow(struct mlx5e_priv *priv, struct tc_cls_flower_offload *f, u16 flow_flags, + struct net_device *filter_dev, struct mlx5e_tc_flow **__flow) { struct netlink_ext_ack *extack = f->common.extack; @@ -3130,6 +2929,12 @@ mlx5e_add_nic_flow(struct mlx5e_priv *priv, if (err) goto out; + parse_attr->filter_dev = filter_dev; + err = parse_cls_flower(flow->priv, flow, &parse_attr->spec, + f, filter_dev); + if (err) + goto err_free; + err = parse_tc_nic_actions(priv, f->exts, parse_attr, flow, extack); if (err) goto err_free; @@ -3155,6 +2960,7 @@ static int mlx5e_tc_add_flow(struct mlx5e_priv *priv, struct tc_cls_flower_offload *f, int flags, + struct net_device *filter_dev, struct mlx5e_tc_flow **flow) { struct mlx5_eswitch *esw = priv->mdev->priv.eswitch; @@ -3167,18 +2973,20 @@ mlx5e_tc_add_flow(struct mlx5e_priv *priv, return -EOPNOTSUPP; if (esw && esw->mode == SRIOV_OFFLOADS) - err = mlx5e_add_fdb_flow(priv, f, flow_flags, flow); + err = mlx5e_add_fdb_flow(priv, f, flow_flags, + filter_dev, flow); else - err = mlx5e_add_nic_flow(priv, f, flow_flags, flow); + err = mlx5e_add_nic_flow(priv, f, flow_flags, + filter_dev, flow); return err; } -int mlx5e_configure_flower(struct mlx5e_priv *priv, +int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, struct tc_cls_flower_offload *f, int flags) { struct netlink_ext_ack *extack = f->common.extack; - struct rhashtable *tc_ht = get_tc_ht(priv); + struct rhashtable *tc_ht = get_tc_ht(priv, flags); struct mlx5e_tc_flow *flow; int err = 0; @@ -3192,7 +3000,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, goto out; } - err = mlx5e_tc_add_flow(priv, f, flags, &flow); + err = mlx5e_tc_add_flow(priv, f, flags, dev, &flow); if (err) goto out; @@ -3220,10 +3028,10 @@ static bool same_flow_direction(struct mlx5e_tc_flow *flow, int flags) return false; } -int mlx5e_delete_flower(struct mlx5e_priv *priv, +int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv, struct tc_cls_flower_offload *f, int flags) { - struct rhashtable *tc_ht = get_tc_ht(priv); + struct rhashtable *tc_ht = get_tc_ht(priv, flags); struct mlx5e_tc_flow *flow; flow = rhashtable_lookup_fast(tc_ht, &f->cookie, tc_ht_params); @@ -3239,10 +3047,12 @@ int mlx5e_delete_flower(struct mlx5e_priv *priv, return 0; } -int mlx5e_stats_flower(struct mlx5e_priv *priv, +int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, struct tc_cls_flower_offload *f, int flags) { - struct rhashtable *tc_ht = get_tc_ht(priv); + struct mlx5_devcom *devcom = priv->mdev->priv.devcom; + struct rhashtable *tc_ht = get_tc_ht(priv, flags); + struct mlx5_eswitch *peer_esw; struct mlx5e_tc_flow *flow; struct mlx5_fc *counter; u64 bytes; @@ -3262,6 +3072,27 @@ int mlx5e_stats_flower(struct mlx5e_priv *priv, mlx5_fc_query_cached(counter, &bytes, &packets, &lastuse); + peer_esw = mlx5_devcom_get_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); + if (!peer_esw) + goto out; + + if ((flow->flags & MLX5E_TC_FLOW_DUP) && + (flow->peer_flow->flags & MLX5E_TC_FLOW_OFFLOADED)) { + u64 bytes2; + u64 packets2; + u64 lastuse2; + + counter = mlx5e_tc_get_counter(flow->peer_flow); + mlx5_fc_query_cached(counter, &bytes2, &packets2, &lastuse2); + + bytes += bytes2; + packets += packets2; + lastuse = max_t(u64, lastuse, lastuse2); + } + + mlx5_devcom_release_peer_data(devcom, MLX5_DEVCOM_ESW_OFFLOADS); + +out: tcf_exts_stats_update(f->exts, bytes, packets, lastuse); return 0; @@ -3350,7 +3181,7 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) if (tc->netdevice_nb.notifier_call) unregister_netdevice_notifier(&tc->netdevice_nb); - rhashtable_free_and_destroy(&tc->ht, _mlx5e_tc_del_flow, NULL); + rhashtable_destroy(&tc->ht); if (!IS_ERR_OR_NULL(tc->t)) { mlx5_destroy_flow_table(tc->t); @@ -3368,9 +3199,17 @@ void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht) rhashtable_free_and_destroy(tc_ht, _mlx5e_tc_del_flow, NULL); } -int mlx5e_tc_num_filters(struct mlx5e_priv *priv) +int mlx5e_tc_num_filters(struct mlx5e_priv *priv, int flags) { - struct rhashtable *tc_ht = get_tc_ht(priv); + struct rhashtable *tc_ht = get_tc_ht(priv, flags); return atomic_read(&tc_ht->nelems); } + +void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw) +{ + struct mlx5e_tc_flow *flow, *tmp; + + list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows, peer) + __mlx5e_tc_del_fdb_peer_flow(flow); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h index 49436bf3b80a..d2d87f978c06 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.h @@ -42,7 +42,9 @@ enum { MLX5E_TC_INGRESS = BIT(0), MLX5E_TC_EGRESS = BIT(1), - MLX5E_TC_LAST_EXPORTED_BIT = 1, + MLX5E_TC_NIC_OFFLOAD = BIT(2), + MLX5E_TC_ESW_OFFLOAD = BIT(3), + MLX5E_TC_LAST_EXPORTED_BIT = 3, }; int mlx5e_tc_nic_init(struct mlx5e_priv *priv); @@ -51,12 +53,12 @@ void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv); int mlx5e_tc_esw_init(struct rhashtable *tc_ht); void mlx5e_tc_esw_cleanup(struct rhashtable *tc_ht); -int mlx5e_configure_flower(struct mlx5e_priv *priv, +int mlx5e_configure_flower(struct net_device *dev, struct mlx5e_priv *priv, struct tc_cls_flower_offload *f, int flags); -int mlx5e_delete_flower(struct mlx5e_priv *priv, +int mlx5e_delete_flower(struct net_device *dev, struct mlx5e_priv *priv, struct tc_cls_flower_offload *f, int flags); -int mlx5e_stats_flower(struct mlx5e_priv *priv, +int mlx5e_stats_flower(struct net_device *dev, struct mlx5e_priv *priv, struct tc_cls_flower_offload *f, int flags); struct mlx5e_encap_entry; @@ -68,12 +70,13 @@ void mlx5e_tc_encap_flows_del(struct mlx5e_priv *priv, struct mlx5e_neigh_hash_entry; void mlx5e_tc_update_neigh_used_value(struct mlx5e_neigh_hash_entry *nhe); -int mlx5e_tc_num_filters(struct mlx5e_priv *priv); +int mlx5e_tc_num_filters(struct mlx5e_priv *priv, int flags); + #else /* CONFIG_MLX5_ESWITCH */ static inline int mlx5e_tc_nic_init(struct mlx5e_priv *priv) { return 0; } static inline void mlx5e_tc_nic_cleanup(struct mlx5e_priv *priv) {} -static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv) { return 0; } +static inline int mlx5e_tc_num_filters(struct mlx5e_priv *priv, int flags) { return 0; } #endif #endif /* __MLX5_EN_TC_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 6dacaeba2fbf..598ad7e4d5c9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -127,7 +127,7 @@ u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, else #endif if (skb_vlan_tag_present(skb)) - up = skb->vlan_tci >> VLAN_PRIO_SHIFT; + up = skb_vlan_tag_get_prio(skb); /* channel_ix can be larger than num_channels since * dev->num_real_tx_queues = num_channels * num_tc @@ -459,9 +459,10 @@ static void mlx5e_dump_error_cqe(struct mlx5e_txqsq *sq, u32 ci = mlx5_cqwq_get_ci(&sq->cq.wq); netdev_err(sq->channel->netdev, - "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n", - sq->cq.mcq.cqn, ci, sq->sqn, err_cqe->syndrome, - err_cqe->vendor_err_synd); + "Error cqe on cqn 0x%x, ci 0x%x, sqn 0x%x, opcode 0x%x, syndrome 0x%x, vendor syndrome 0x%x\n", + sq->cq.mcq.cqn, ci, sq->sqn, + get_cqe_opcode((struct mlx5_cqe64 *)err_cqe), + err_cqe->syndrome, err_cqe->vendor_err_synd); mlx5_dump_err_cqe(sq->cq.mdev, err_cqe); } @@ -507,7 +508,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) wqe_counter = be16_to_cpu(cqe->wqe_counter); - if (unlikely(cqe->op_own >> 4 == MLX5_CQE_REQ_ERR)) { + if (unlikely(get_cqe_opcode(cqe) == MLX5_CQE_REQ_ERR)) { if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) { mlx5e_dump_error_cqe(sq, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index 85d517360157..b4af5e19f6ac 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -76,6 +76,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) struct mlx5e_channel *c = container_of(napi, struct mlx5e_channel, napi); struct mlx5e_ch_stats *ch_stats = c->stats; + struct mlx5e_rq *rq = &c->rq; bool busy = false; int work_done = 0; int i; @@ -85,17 +86,17 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) for (i = 0; i < c->num_tc; i++) busy |= mlx5e_poll_tx_cq(&c->sq[i].cq, budget); - busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq); + busy |= mlx5e_poll_xdpsq_cq(&c->xdpsq.cq, NULL); if (c->xdp) - busy |= mlx5e_poll_xdpsq_cq(&c->rq.xdpsq.cq); + busy |= mlx5e_poll_xdpsq_cq(&rq->xdpsq.cq, rq); if (likely(budget)) { /* budget=0 means: don't poll rx rings */ - work_done = mlx5e_poll_rx_cq(&c->rq.cq, budget); + work_done = mlx5e_poll_rx_cq(&rq->cq, budget); busy |= work_done == budget; } - busy |= c->rq.post_wqes(&c->rq); + busy |= c->rq.post_wqes(rq); if (busy) { if (likely(mlx5e_channel_no_affinity_change(c))) @@ -115,9 +116,9 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) mlx5e_cq_arm(&c->sq[i].cq); } - mlx5e_handle_rx_dim(&c->rq); + mlx5e_handle_rx_dim(rq); - mlx5e_cq_arm(&c->rq.cq); + mlx5e_cq_arm(&rq->cq); mlx5e_cq_arm(&c->icosq.cq); mlx5e_cq_arm(&c->xdpsq.cq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index c1e1a16a9b07..ee04aab65a9f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -31,20 +31,22 @@ */ #include <linux/interrupt.h> +#include <linux/notifier.h> #include <linux/module.h> #include <linux/mlx5/driver.h> +#include <linux/mlx5/eq.h> #include <linux/mlx5/cmd.h> #ifdef CONFIG_RFS_ACCEL #include <linux/cpu_rmap.h> #endif #include "mlx5_core.h" +#include "lib/eq.h" #include "fpga/core.h" #include "eswitch.h" #include "lib/clock.h" #include "diag/fw_tracer.h" enum { - MLX5_EQE_SIZE = sizeof(struct mlx5_eqe), MLX5_EQE_OWNER_INIT_VAL = 0x1, }; @@ -55,14 +57,32 @@ enum { }; enum { - MLX5_NUM_SPARE_EQE = 0x80, - MLX5_NUM_ASYNC_EQE = 0x1000, - MLX5_NUM_CMD_EQE = 32, - MLX5_NUM_PF_DRAIN = 64, + MLX5_EQ_DOORBEL_OFFSET = 0x40, }; -enum { - MLX5_EQ_DOORBEL_OFFSET = 0x40, +struct mlx5_irq_info { + cpumask_var_t mask; + char name[MLX5_MAX_IRQ_NAME]; + void *context; /* dev_id provided to request_irq */ +}; + +struct mlx5_eq_table { + struct list_head comp_eqs_list; + struct mlx5_eq pages_eq; + struct mlx5_eq cmd_eq; + struct mlx5_eq async_eq; + + struct atomic_notifier_head nh[MLX5_EVENT_TYPE_MAX]; + + /* Since CQ DB is stored in async_eq */ + struct mlx5_nb cq_err_nb; + + struct mutex lock; /* sync async eqs creations */ + int num_comp_vectors; + struct mlx5_irq_info *irq_info; +#ifdef CONFIG_RFS_ACCEL + struct cpu_rmap *rmap; +#endif }; #define MLX5_ASYNC_EVENT_MASK ((1ull << MLX5_EVENT_TYPE_PATH_MIG) | \ @@ -78,17 +98,6 @@ enum { (1ull << MLX5_EVENT_TYPE_SRQ_LAST_WQE) | \ (1ull << MLX5_EVENT_TYPE_SRQ_RQ_LIMIT)) -struct map_eq_in { - u64 mask; - u32 reserved; - u32 unmap_eqn; -}; - -struct cre_des_eq { - u8 reserved[15]; - u8 eqn; -}; - static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn) { u32 out[MLX5_ST_SZ_DW(destroy_eq_out)] = {0}; @@ -99,213 +108,56 @@ static int mlx5_cmd_destroy_eq(struct mlx5_core_dev *dev, u8 eqn) return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } -static struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry) -{ - return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE); -} - -static struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq) -{ - struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1)); - - return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe; -} - -static const char *eqe_type_str(u8 type) -{ - switch (type) { - case MLX5_EVENT_TYPE_COMP: - return "MLX5_EVENT_TYPE_COMP"; - case MLX5_EVENT_TYPE_PATH_MIG: - return "MLX5_EVENT_TYPE_PATH_MIG"; - case MLX5_EVENT_TYPE_COMM_EST: - return "MLX5_EVENT_TYPE_COMM_EST"; - case MLX5_EVENT_TYPE_SQ_DRAINED: - return "MLX5_EVENT_TYPE_SQ_DRAINED"; - case MLX5_EVENT_TYPE_SRQ_LAST_WQE: - return "MLX5_EVENT_TYPE_SRQ_LAST_WQE"; - case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: - return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT"; - case MLX5_EVENT_TYPE_CQ_ERROR: - return "MLX5_EVENT_TYPE_CQ_ERROR"; - case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: - return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR"; - case MLX5_EVENT_TYPE_PATH_MIG_FAILED: - return "MLX5_EVENT_TYPE_PATH_MIG_FAILED"; - case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: - return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR"; - case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: - return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR"; - case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: - return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR"; - case MLX5_EVENT_TYPE_INTERNAL_ERROR: - return "MLX5_EVENT_TYPE_INTERNAL_ERROR"; - case MLX5_EVENT_TYPE_PORT_CHANGE: - return "MLX5_EVENT_TYPE_PORT_CHANGE"; - case MLX5_EVENT_TYPE_GPIO_EVENT: - return "MLX5_EVENT_TYPE_GPIO_EVENT"; - case MLX5_EVENT_TYPE_PORT_MODULE_EVENT: - return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT"; - case MLX5_EVENT_TYPE_TEMP_WARN_EVENT: - return "MLX5_EVENT_TYPE_TEMP_WARN_EVENT"; - case MLX5_EVENT_TYPE_REMOTE_CONFIG: - return "MLX5_EVENT_TYPE_REMOTE_CONFIG"; - case MLX5_EVENT_TYPE_DB_BF_CONGESTION: - return "MLX5_EVENT_TYPE_DB_BF_CONGESTION"; - case MLX5_EVENT_TYPE_STALL_EVENT: - return "MLX5_EVENT_TYPE_STALL_EVENT"; - case MLX5_EVENT_TYPE_CMD: - return "MLX5_EVENT_TYPE_CMD"; - case MLX5_EVENT_TYPE_PAGE_REQUEST: - return "MLX5_EVENT_TYPE_PAGE_REQUEST"; - case MLX5_EVENT_TYPE_PAGE_FAULT: - return "MLX5_EVENT_TYPE_PAGE_FAULT"; - case MLX5_EVENT_TYPE_PPS_EVENT: - return "MLX5_EVENT_TYPE_PPS_EVENT"; - case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE: - return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE"; - case MLX5_EVENT_TYPE_FPGA_ERROR: - return "MLX5_EVENT_TYPE_FPGA_ERROR"; - case MLX5_EVENT_TYPE_FPGA_QP_ERROR: - return "MLX5_EVENT_TYPE_FPGA_QP_ERROR"; - case MLX5_EVENT_TYPE_GENERAL_EVENT: - return "MLX5_EVENT_TYPE_GENERAL_EVENT"; - case MLX5_EVENT_TYPE_DEVICE_TRACER: - return "MLX5_EVENT_TYPE_DEVICE_TRACER"; - default: - return "Unrecognized event"; - } -} - -static enum mlx5_dev_event port_subtype_event(u8 subtype) -{ - switch (subtype) { - case MLX5_PORT_CHANGE_SUBTYPE_DOWN: - return MLX5_DEV_EVENT_PORT_DOWN; - case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: - return MLX5_DEV_EVENT_PORT_UP; - case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: - return MLX5_DEV_EVENT_PORT_INITIALIZED; - case MLX5_PORT_CHANGE_SUBTYPE_LID: - return MLX5_DEV_EVENT_LID_CHANGE; - case MLX5_PORT_CHANGE_SUBTYPE_PKEY: - return MLX5_DEV_EVENT_PKEY_CHANGE; - case MLX5_PORT_CHANGE_SUBTYPE_GUID: - return MLX5_DEV_EVENT_GUID_CHANGE; - case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: - return MLX5_DEV_EVENT_CLIENT_REREG; - } - return -1; -} - -static void eq_update_ci(struct mlx5_eq *eq, int arm) +/* caller must eventually call mlx5_cq_put on the returned cq */ +static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn) { - __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2); - u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24); - - __raw_writel((__force u32)cpu_to_be32(val), addr); - /* We still want ordering, just not swabbing, so add a barrier */ - mb(); -} + struct mlx5_cq_table *table = &eq->cq_table; + struct mlx5_core_cq *cq = NULL; -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING -static void eqe_pf_action(struct work_struct *work) -{ - struct mlx5_pagefault *pfault = container_of(work, - struct mlx5_pagefault, - work); - struct mlx5_eq *eq = pfault->eq; + spin_lock(&table->lock); + cq = radix_tree_lookup(&table->tree, cqn); + if (likely(cq)) + mlx5_cq_hold(cq); + spin_unlock(&table->lock); - mlx5_core_page_fault(eq->dev, pfault); - mempool_free(pfault, eq->pf_ctx.pool); + return cq; } -static void eq_pf_process(struct mlx5_eq *eq) +static irqreturn_t mlx5_eq_comp_int(int irq, void *eq_ptr) { - struct mlx5_core_dev *dev = eq->dev; - struct mlx5_eqe_page_fault *pf_eqe; - struct mlx5_pagefault *pfault; + struct mlx5_eq_comp *eq_comp = eq_ptr; + struct mlx5_eq *eq = eq_ptr; struct mlx5_eqe *eqe; int set_ci = 0; + u32 cqn = -1; while ((eqe = next_eqe_sw(eq))) { - pfault = mempool_alloc(eq->pf_ctx.pool, GFP_ATOMIC); - if (!pfault) { - schedule_work(&eq->pf_ctx.work); - break; - } - + struct mlx5_core_cq *cq; + /* Make sure we read EQ entry contents after we've + * checked the ownership bit. + */ dma_rmb(); - pf_eqe = &eqe->data.page_fault; - pfault->event_subtype = eqe->sub_type; - pfault->bytes_committed = be32_to_cpu(pf_eqe->bytes_committed); - - mlx5_core_dbg(dev, - "PAGE_FAULT: subtype: 0x%02x, bytes_committed: 0x%06x\n", - eqe->sub_type, pfault->bytes_committed); - - switch (eqe->sub_type) { - case MLX5_PFAULT_SUBTYPE_RDMA: - /* RDMA based event */ - pfault->type = - be32_to_cpu(pf_eqe->rdma.pftype_token) >> 24; - pfault->token = - be32_to_cpu(pf_eqe->rdma.pftype_token) & - MLX5_24BIT_MASK; - pfault->rdma.r_key = - be32_to_cpu(pf_eqe->rdma.r_key); - pfault->rdma.packet_size = - be16_to_cpu(pf_eqe->rdma.packet_length); - pfault->rdma.rdma_op_len = - be32_to_cpu(pf_eqe->rdma.rdma_op_len); - pfault->rdma.rdma_va = - be64_to_cpu(pf_eqe->rdma.rdma_va); - mlx5_core_dbg(dev, - "PAGE_FAULT: type:0x%x, token: 0x%06x, r_key: 0x%08x\n", - pfault->type, pfault->token, - pfault->rdma.r_key); - mlx5_core_dbg(dev, - "PAGE_FAULT: rdma_op_len: 0x%08x, rdma_va: 0x%016llx\n", - pfault->rdma.rdma_op_len, - pfault->rdma.rdma_va); - break; - - case MLX5_PFAULT_SUBTYPE_WQE: - /* WQE based event */ - pfault->type = - (be32_to_cpu(pf_eqe->wqe.pftype_wq) >> 24) & 0x7; - pfault->token = - be32_to_cpu(pf_eqe->wqe.token); - pfault->wqe.wq_num = - be32_to_cpu(pf_eqe->wqe.pftype_wq) & - MLX5_24BIT_MASK; - pfault->wqe.wqe_index = - be16_to_cpu(pf_eqe->wqe.wqe_index); - pfault->wqe.packet_size = - be16_to_cpu(pf_eqe->wqe.packet_length); - mlx5_core_dbg(dev, - "PAGE_FAULT: type:0x%x, token: 0x%06x, wq_num: 0x%06x, wqe_index: 0x%04x\n", - pfault->type, pfault->token, - pfault->wqe.wq_num, - pfault->wqe.wqe_index); - break; - - default: - mlx5_core_warn(dev, - "Unsupported page fault event sub-type: 0x%02hhx\n", - eqe->sub_type); - /* Unsupported page faults should still be - * resolved by the page fault handler - */ + /* Assume (eqe->type) is always MLX5_EVENT_TYPE_COMP */ + cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff; + + cq = mlx5_eq_cq_get(eq, cqn); + if (likely(cq)) { + ++cq->arm_sn; + cq->comp(cq); + mlx5_cq_put(cq); + } else { + mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn); } - pfault->eq = eq; - INIT_WORK(&pfault->work, eqe_pf_action); - queue_work(eq->pf_ctx.wq, &pfault->work); - ++eq->cons_index; ++set_ci; + /* The HCA will think the queue has overflowed if we + * don't tell it we've been processing events. We + * create our EQs with MLX5_NUM_SPARE_EQE extra + * entries, so we must update our consumer index at + * least that often. + */ if (unlikely(set_ci >= MLX5_NUM_SPARE_EQE)) { eq_update_ci(eq, 0); set_ci = 0; @@ -313,165 +165,41 @@ static void eq_pf_process(struct mlx5_eq *eq) } eq_update_ci(eq, 1); -} -static irqreturn_t mlx5_eq_pf_int(int irq, void *eq_ptr) -{ - struct mlx5_eq *eq = eq_ptr; - unsigned long flags; - - if (spin_trylock_irqsave(&eq->pf_ctx.lock, flags)) { - eq_pf_process(eq); - spin_unlock_irqrestore(&eq->pf_ctx.lock, flags); - } else { - schedule_work(&eq->pf_ctx.work); - } + if (cqn != -1) + tasklet_schedule(&eq_comp->tasklet_ctx.task); return IRQ_HANDLED; } -/* mempool_refill() was proposed but unfortunately wasn't accepted - * http://lkml.iu.edu/hypermail/linux/kernel/1512.1/05073.html - * Chip workaround. +/* Some architectures don't latch interrupts when they are disabled, so using + * mlx5_eq_poll_irq_disabled could end up losing interrupts while trying to + * avoid losing them. It is not recommended to use it, unless this is the last + * resort. */ -static void mempool_refill(mempool_t *pool) -{ - while (pool->curr_nr < pool->min_nr) - mempool_free(mempool_alloc(pool, GFP_KERNEL), pool); -} - -static void eq_pf_action(struct work_struct *work) -{ - struct mlx5_eq *eq = container_of(work, struct mlx5_eq, pf_ctx.work); - - mempool_refill(eq->pf_ctx.pool); - - spin_lock_irq(&eq->pf_ctx.lock); - eq_pf_process(eq); - spin_unlock_irq(&eq->pf_ctx.lock); -} - -static int init_pf_ctx(struct mlx5_eq_pagefault *pf_ctx, const char *name) -{ - spin_lock_init(&pf_ctx->lock); - INIT_WORK(&pf_ctx->work, eq_pf_action); - - pf_ctx->wq = alloc_ordered_workqueue(name, - WQ_MEM_RECLAIM); - if (!pf_ctx->wq) - return -ENOMEM; - - pf_ctx->pool = mempool_create_kmalloc_pool - (MLX5_NUM_PF_DRAIN, sizeof(struct mlx5_pagefault)); - if (!pf_ctx->pool) - goto err_wq; - - return 0; -err_wq: - destroy_workqueue(pf_ctx->wq); - return -ENOMEM; -} - -int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token, - u32 wq_num, u8 type, int error) -{ - u32 out[MLX5_ST_SZ_DW(page_fault_resume_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(page_fault_resume_in)] = {0}; - - MLX5_SET(page_fault_resume_in, in, opcode, - MLX5_CMD_OP_PAGE_FAULT_RESUME); - MLX5_SET(page_fault_resume_in, in, error, !!error); - MLX5_SET(page_fault_resume_in, in, page_fault_type, type); - MLX5_SET(page_fault_resume_in, in, wq_number, wq_num); - MLX5_SET(page_fault_resume_in, in, token, token); - - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); -} -EXPORT_SYMBOL_GPL(mlx5_core_page_fault_resume); -#endif - -static void general_event_handler(struct mlx5_core_dev *dev, - struct mlx5_eqe *eqe) -{ - switch (eqe->sub_type) { - case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT: - if (dev->event) - dev->event(dev, MLX5_DEV_EVENT_DELAY_DROP_TIMEOUT, 0); - break; - default: - mlx5_core_dbg(dev, "General event with unrecognized subtype: sub_type %d\n", - eqe->sub_type); - } -} - -static void mlx5_temp_warning_event(struct mlx5_core_dev *dev, - struct mlx5_eqe *eqe) -{ - u64 value_lsb; - u64 value_msb; - - value_lsb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb); - value_msb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb); - - mlx5_core_warn(dev, - "High temperature on sensors with bit set %llx %llx", - value_msb, value_lsb); -} - -/* caller must eventually call mlx5_cq_put on the returned cq */ -static struct mlx5_core_cq *mlx5_eq_cq_get(struct mlx5_eq *eq, u32 cqn) -{ - struct mlx5_cq_table *table = &eq->cq_table; - struct mlx5_core_cq *cq = NULL; - - spin_lock(&table->lock); - cq = radix_tree_lookup(&table->tree, cqn); - if (likely(cq)) - mlx5_cq_hold(cq); - spin_unlock(&table->lock); - - return cq; -} - -static void mlx5_eq_cq_completion(struct mlx5_eq *eq, u32 cqn) -{ - struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn); - - if (unlikely(!cq)) { - mlx5_core_warn(eq->dev, "Completion event for bogus CQ 0x%x\n", cqn); - return; - } - - ++cq->arm_sn; - - cq->comp(cq); - - mlx5_cq_put(cq); -} - -static void mlx5_eq_cq_event(struct mlx5_eq *eq, u32 cqn, int event_type) +u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq) { - struct mlx5_core_cq *cq = mlx5_eq_cq_get(eq, cqn); - - if (unlikely(!cq)) { - mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn); - return; - } + u32 count_eqe; - cq->event(cq, event_type); + disable_irq(eq->core.irqn); + count_eqe = eq->core.cons_index; + mlx5_eq_comp_int(eq->core.irqn, eq); + count_eqe = eq->core.cons_index - count_eqe; + enable_irq(eq->core.irqn); - mlx5_cq_put(cq); + return count_eqe; } -static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr) +static irqreturn_t mlx5_eq_async_int(int irq, void *eq_ptr) { struct mlx5_eq *eq = eq_ptr; - struct mlx5_core_dev *dev = eq->dev; + struct mlx5_eq_table *eqt; + struct mlx5_core_dev *dev; struct mlx5_eqe *eqe; int set_ci = 0; - u32 cqn = -1; - u32 rsn; - u8 port; + + dev = eq->dev; + eqt = dev->priv.eq_table; while ((eqe = next_eqe_sw(eq))) { /* @@ -480,116 +208,12 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr) */ dma_rmb(); - mlx5_core_dbg(eq->dev, "eqn %d, eqe type %s\n", - eq->eqn, eqe_type_str(eqe->type)); - switch (eqe->type) { - case MLX5_EVENT_TYPE_COMP: - cqn = be32_to_cpu(eqe->data.comp.cqn) & 0xffffff; - mlx5_eq_cq_completion(eq, cqn); - break; - case MLX5_EVENT_TYPE_DCT_DRAINED: - rsn = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff; - rsn |= (MLX5_RES_DCT << MLX5_USER_INDEX_LEN); - mlx5_rsc_event(dev, rsn, eqe->type); - break; - case MLX5_EVENT_TYPE_PATH_MIG: - case MLX5_EVENT_TYPE_COMM_EST: - case MLX5_EVENT_TYPE_SQ_DRAINED: - case MLX5_EVENT_TYPE_SRQ_LAST_WQE: - case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: - case MLX5_EVENT_TYPE_PATH_MIG_FAILED: - case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: - case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: - rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; - rsn |= (eqe->data.qp_srq.type << MLX5_USER_INDEX_LEN); - mlx5_core_dbg(dev, "event %s(%d) arrived on resource 0x%x\n", - eqe_type_str(eqe->type), eqe->type, rsn); - mlx5_rsc_event(dev, rsn, eqe->type); - break; - - case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: - case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: - rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; - mlx5_core_dbg(dev, "SRQ event %s(%d): srqn 0x%x\n", - eqe_type_str(eqe->type), eqe->type, rsn); - mlx5_srq_event(dev, rsn, eqe->type); - break; - - case MLX5_EVENT_TYPE_CMD: - mlx5_cmd_comp_handler(dev, be32_to_cpu(eqe->data.cmd.vector), false); - break; + if (likely(eqe->type < MLX5_EVENT_TYPE_MAX)) + atomic_notifier_call_chain(&eqt->nh[eqe->type], eqe->type, eqe); + else + mlx5_core_warn_once(dev, "notifier_call_chain is not setup for eqe: %d\n", eqe->type); - case MLX5_EVENT_TYPE_PORT_CHANGE: - port = (eqe->data.port.port >> 4) & 0xf; - switch (eqe->sub_type) { - case MLX5_PORT_CHANGE_SUBTYPE_DOWN: - case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: - case MLX5_PORT_CHANGE_SUBTYPE_LID: - case MLX5_PORT_CHANGE_SUBTYPE_PKEY: - case MLX5_PORT_CHANGE_SUBTYPE_GUID: - case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: - case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: - if (dev->event) - dev->event(dev, port_subtype_event(eqe->sub_type), - (unsigned long)port); - break; - default: - mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n", - port, eqe->sub_type); - } - break; - case MLX5_EVENT_TYPE_CQ_ERROR: - cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff; - mlx5_core_warn(dev, "CQ error on CQN 0x%x, syndrome 0x%x\n", - cqn, eqe->data.cq_err.syndrome); - mlx5_eq_cq_event(eq, cqn, eqe->type); - break; - - case MLX5_EVENT_TYPE_PAGE_REQUEST: - { - u16 func_id = be16_to_cpu(eqe->data.req_pages.func_id); - s32 npages = be32_to_cpu(eqe->data.req_pages.num_pages); - - mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n", - func_id, npages); - mlx5_core_req_pages_handler(dev, func_id, npages); - } - break; - - case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE: - mlx5_eswitch_vport_event(dev->priv.eswitch, eqe); - break; - - case MLX5_EVENT_TYPE_PORT_MODULE_EVENT: - mlx5_port_module_event(dev, eqe); - break; - - case MLX5_EVENT_TYPE_PPS_EVENT: - mlx5_pps_event(dev, eqe); - break; - - case MLX5_EVENT_TYPE_FPGA_ERROR: - case MLX5_EVENT_TYPE_FPGA_QP_ERROR: - mlx5_fpga_event(dev, eqe->type, &eqe->data.raw); - break; - - case MLX5_EVENT_TYPE_TEMP_WARN_EVENT: - mlx5_temp_warning_event(dev, eqe); - break; - - case MLX5_EVENT_TYPE_GENERAL_EVENT: - general_event_handler(dev, eqe); - break; - - case MLX5_EVENT_TYPE_DEVICE_TRACER: - mlx5_fw_tracer_event(dev, eqe); - break; - - default: - mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n", - eqe->type, eq->eqn); - break; - } + atomic_notifier_call_chain(&eqt->nh[MLX5_EVENT_TYPE_NOTIFY_ANY], eqe->type, eqe); ++eq->cons_index; ++set_ci; @@ -608,30 +232,9 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr) eq_update_ci(eq, 1); - if (cqn != -1) - tasklet_schedule(&eq->tasklet_ctx.task); - return IRQ_HANDLED; } -/* Some architectures don't latch interrupts when they are disabled, so using - * mlx5_eq_poll_irq_disabled could end up losing interrupts while trying to - * avoid losing them. It is not recommended to use it, unless this is the last - * resort. - */ -u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq) -{ - u32 count_eqe; - - disable_irq(eq->irqn); - count_eqe = eq->cons_index; - mlx5_eq_int(eq->irqn, eq); - count_eqe = eq->cons_index - count_eqe; - enable_irq(eq->irqn); - - return count_eqe; -} - static void init_eq_buf(struct mlx5_eq *eq) { struct mlx5_eqe *eqe; @@ -643,39 +246,35 @@ static void init_eq_buf(struct mlx5_eq *eq) } } -int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, - int nent, u64 mask, const char *name, - enum mlx5_eq_type type) +static int +create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, const char *name, + struct mlx5_eq_param *param) { + struct mlx5_eq_table *eq_table = dev->priv.eq_table; struct mlx5_cq_table *cq_table = &eq->cq_table; u32 out[MLX5_ST_SZ_DW(create_eq_out)] = {0}; struct mlx5_priv *priv = &dev->priv; - irq_handler_t handler; + u8 vecidx = param->index; __be64 *pas; void *eqc; int inlen; u32 *in; int err; + if (eq_table->irq_info[vecidx].context) + return -EEXIST; + /* Init CQ table */ memset(cq_table, 0, sizeof(*cq_table)); spin_lock_init(&cq_table->lock); INIT_RADIX_TREE(&cq_table->tree, GFP_ATOMIC); - eq->type = type; - eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE); + eq->nent = roundup_pow_of_two(param->nent + MLX5_NUM_SPARE_EQE); eq->cons_index = 0; err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf); if (err) return err; -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - if (type == MLX5_EQ_TYPE_PF) - handler = mlx5_eq_pf_int; - else -#endif - handler = mlx5_eq_int; - init_eq_buf(eq); inlen = MLX5_ST_SZ_BYTES(create_eq_in) + @@ -691,7 +290,7 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, mlx5_fill_page_array(&eq->buf, pas); MLX5_SET(create_eq_in, in, opcode, MLX5_CMD_OP_CREATE_EQ); - MLX5_SET64(create_eq_in, in, event_bitmask, mask); + MLX5_SET64(create_eq_in, in, event_bitmask, param->mask); eqc = MLX5_ADDR_OF(create_eq_in, in, eq_context_entry); MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent)); @@ -704,15 +303,17 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, if (err) goto err_in; - snprintf(priv->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s", + snprintf(eq_table->irq_info[vecidx].name, MLX5_MAX_IRQ_NAME, "%s@pci:%s", name, pci_name(dev->pdev)); + eq_table->irq_info[vecidx].context = param->context; + eq->vecidx = vecidx; eq->eqn = MLX5_GET(create_eq_out, out, eq_number); eq->irqn = pci_irq_vector(dev->pdev, vecidx); eq->dev = dev; eq->doorbell = priv->uar->map + MLX5_EQ_DOORBEL_OFFSET; - err = request_irq(eq->irqn, handler, 0, - priv->irq_info[vecidx].name, eq); + err = request_irq(eq->irqn, param->handler, 0, + eq_table->irq_info[vecidx].name, param->context); if (err) goto err_eq; @@ -720,21 +321,6 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, if (err) goto err_irq; -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - if (type == MLX5_EQ_TYPE_PF) { - err = init_pf_ctx(&eq->pf_ctx, name); - if (err) - goto err_irq; - } else -#endif - { - INIT_LIST_HEAD(&eq->tasklet_ctx.list); - INIT_LIST_HEAD(&eq->tasklet_ctx.process_list); - spin_lock_init(&eq->tasklet_ctx.lock); - tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb, - (unsigned long)&eq->tasklet_ctx); - } - /* EQs are created in ARMED state */ eq_update_ci(eq, 1); @@ -756,27 +342,25 @@ err_buf: return err; } -int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) +static int destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) { + struct mlx5_eq_table *eq_table = dev->priv.eq_table; + struct mlx5_irq_info *irq_info; int err; + irq_info = &eq_table->irq_info[eq->vecidx]; + mlx5_debug_eq_remove(dev, eq); - free_irq(eq->irqn, eq); + + free_irq(eq->irqn, irq_info->context); + irq_info->context = NULL; + err = mlx5_cmd_destroy_eq(dev, eq->eqn); if (err) mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", eq->eqn); synchronize_irq(eq->irqn); - if (eq->type == MLX5_EQ_TYPE_COMP) { - tasklet_disable(&eq->tasklet_ctx.task); -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - } else if (eq->type == MLX5_EQ_TYPE_PF) { - cancel_work_sync(&eq->pf_ctx.work); - destroy_workqueue(eq->pf_ctx.wq); - mempool_destroy(eq->pf_ctx.pool); -#endif - } mlx5_buf_free(dev, &eq->buf); return err; @@ -816,28 +400,106 @@ int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq) return 0; } -int mlx5_eq_init(struct mlx5_core_dev *dev) +int mlx5_eq_table_init(struct mlx5_core_dev *dev) { - int err; + struct mlx5_eq_table *eq_table; + int i, err; - spin_lock_init(&dev->priv.eq_table.lock); + eq_table = kvzalloc(sizeof(*eq_table), GFP_KERNEL); + if (!eq_table) + return -ENOMEM; + + dev->priv.eq_table = eq_table; err = mlx5_eq_debugfs_init(dev); + if (err) + goto kvfree_eq_table; + + mutex_init(&eq_table->lock); + for (i = 0; i < MLX5_EVENT_TYPE_MAX; i++) + ATOMIC_INIT_NOTIFIER_HEAD(&eq_table->nh[i]); + return 0; + +kvfree_eq_table: + kvfree(eq_table); + dev->priv.eq_table = NULL; return err; } -void mlx5_eq_cleanup(struct mlx5_core_dev *dev) +void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev) { mlx5_eq_debugfs_cleanup(dev); + kvfree(dev->priv.eq_table); } -int mlx5_start_eqs(struct mlx5_core_dev *dev) +/* Async EQs */ + +static int create_async_eq(struct mlx5_core_dev *dev, const char *name, + struct mlx5_eq *eq, struct mlx5_eq_param *param) { - struct mlx5_eq_table *table = &dev->priv.eq_table; - u64 async_event_mask = MLX5_ASYNC_EVENT_MASK; + struct mlx5_eq_table *eq_table = dev->priv.eq_table; + int err; + + mutex_lock(&eq_table->lock); + if (param->index >= MLX5_EQ_MAX_ASYNC_EQS) { + err = -ENOSPC; + goto unlock; + } + + err = create_map_eq(dev, eq, name, param); +unlock: + mutex_unlock(&eq_table->lock); + return err; +} + +static int destroy_async_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq) +{ + struct mlx5_eq_table *eq_table = dev->priv.eq_table; int err; + mutex_lock(&eq_table->lock); + err = destroy_unmap_eq(dev, eq); + mutex_unlock(&eq_table->lock); + return err; +} + +static int cq_err_event_notifier(struct notifier_block *nb, + unsigned long type, void *data) +{ + struct mlx5_eq_table *eqt; + struct mlx5_core_cq *cq; + struct mlx5_eqe *eqe; + struct mlx5_eq *eq; + u32 cqn; + + /* type == MLX5_EVENT_TYPE_CQ_ERROR */ + + eqt = mlx5_nb_cof(nb, struct mlx5_eq_table, cq_err_nb); + eq = &eqt->async_eq; + eqe = data; + + cqn = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff; + mlx5_core_warn(eq->dev, "CQ error on CQN 0x%x, syndrome 0x%x\n", + cqn, eqe->data.cq_err.syndrome); + + cq = mlx5_eq_cq_get(eq, cqn); + if (unlikely(!cq)) { + mlx5_core_warn(eq->dev, "Async event for bogus CQ 0x%x\n", cqn); + return NOTIFY_OK; + } + + cq->event(cq, type); + + mlx5_cq_put(cq); + + return NOTIFY_OK; +} + +static u64 gather_async_events_mask(struct mlx5_core_dev *dev) +{ + u64 async_event_mask = MLX5_ASYNC_EVENT_MASK; + if (MLX5_VPORT_MANAGER(dev)) async_event_mask |= (1ull << MLX5_EVENT_TYPE_NIC_VPORT_CHANGE); @@ -865,127 +527,521 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev) if (MLX5_CAP_MCAM_REG(dev, tracer_registers)) async_event_mask |= (1ull << MLX5_EVENT_TYPE_DEVICE_TRACER); - err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD, - MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD, - "mlx5_cmd_eq", MLX5_EQ_TYPE_ASYNC); + if (MLX5_CAP_GEN(dev, max_num_of_monitor_counters)) + async_event_mask |= (1ull << MLX5_EVENT_TYPE_MONITOR_COUNTER); + + return async_event_mask; +} + +static int create_async_eqs(struct mlx5_core_dev *dev) +{ + struct mlx5_eq_table *table = dev->priv.eq_table; + struct mlx5_eq_param param = {}; + int err; + + MLX5_NB_INIT(&table->cq_err_nb, cq_err_event_notifier, CQ_ERROR); + mlx5_eq_notifier_register(dev, &table->cq_err_nb); + + param = (struct mlx5_eq_param) { + .index = MLX5_EQ_CMD_IDX, + .mask = 1ull << MLX5_EVENT_TYPE_CMD, + .nent = MLX5_NUM_CMD_EQE, + .context = &table->cmd_eq, + .handler = mlx5_eq_async_int, + }; + err = create_async_eq(dev, "mlx5_cmd_eq", &table->cmd_eq, ¶m); if (err) { mlx5_core_warn(dev, "failed to create cmd EQ %d\n", err); - return err; + goto err0; } mlx5_cmd_use_events(dev); - err = mlx5_create_map_eq(dev, &table->async_eq, MLX5_EQ_VEC_ASYNC, - MLX5_NUM_ASYNC_EQE, async_event_mask, - "mlx5_async_eq", MLX5_EQ_TYPE_ASYNC); + param = (struct mlx5_eq_param) { + .index = MLX5_EQ_ASYNC_IDX, + .mask = gather_async_events_mask(dev), + .nent = MLX5_NUM_ASYNC_EQE, + .context = &table->async_eq, + .handler = mlx5_eq_async_int, + }; + err = create_async_eq(dev, "mlx5_async_eq", &table->async_eq, ¶m); if (err) { mlx5_core_warn(dev, "failed to create async EQ %d\n", err); goto err1; } - err = mlx5_create_map_eq(dev, &table->pages_eq, - MLX5_EQ_VEC_PAGES, - /* TODO: sriov max_vf + */ 1, - 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, "mlx5_pages_eq", - MLX5_EQ_TYPE_ASYNC); + param = (struct mlx5_eq_param) { + .index = MLX5_EQ_PAGEREQ_IDX, + .mask = 1 << MLX5_EVENT_TYPE_PAGE_REQUEST, + .nent = /* TODO: sriov max_vf + */ 1, + .context = &table->pages_eq, + .handler = mlx5_eq_async_int, + }; + err = create_async_eq(dev, "mlx5_pages_eq", &table->pages_eq, ¶m); if (err) { mlx5_core_warn(dev, "failed to create pages EQ %d\n", err); goto err2; } -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - if (MLX5_CAP_GEN(dev, pg)) { - err = mlx5_create_map_eq(dev, &table->pfault_eq, - MLX5_EQ_VEC_PFAULT, - MLX5_NUM_ASYNC_EQE, - 1 << MLX5_EVENT_TYPE_PAGE_FAULT, - "mlx5_page_fault_eq", - MLX5_EQ_TYPE_PF); - if (err) { - mlx5_core_warn(dev, "failed to create page fault EQ %d\n", - err); - goto err3; - } - } - - return err; -err3: - mlx5_destroy_unmap_eq(dev, &table->pages_eq); -#else return err; -#endif err2: - mlx5_destroy_unmap_eq(dev, &table->async_eq); + destroy_async_eq(dev, &table->async_eq); err1: mlx5_cmd_use_polling(dev); - mlx5_destroy_unmap_eq(dev, &table->cmd_eq); + destroy_async_eq(dev, &table->cmd_eq); +err0: + mlx5_eq_notifier_unregister(dev, &table->cq_err_nb); return err; } -void mlx5_stop_eqs(struct mlx5_core_dev *dev) +static void destroy_async_eqs(struct mlx5_core_dev *dev) { - struct mlx5_eq_table *table = &dev->priv.eq_table; + struct mlx5_eq_table *table = dev->priv.eq_table; int err; -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - if (MLX5_CAP_GEN(dev, pg)) { - err = mlx5_destroy_unmap_eq(dev, &table->pfault_eq); - if (err) - mlx5_core_err(dev, "failed to destroy page fault eq, err(%d)\n", - err); - } -#endif - - err = mlx5_destroy_unmap_eq(dev, &table->pages_eq); + err = destroy_async_eq(dev, &table->pages_eq); if (err) mlx5_core_err(dev, "failed to destroy pages eq, err(%d)\n", err); - err = mlx5_destroy_unmap_eq(dev, &table->async_eq); + err = destroy_async_eq(dev, &table->async_eq); if (err) mlx5_core_err(dev, "failed to destroy async eq, err(%d)\n", err); + mlx5_cmd_use_polling(dev); - err = mlx5_destroy_unmap_eq(dev, &table->cmd_eq); + err = destroy_async_eq(dev, &table->cmd_eq); if (err) mlx5_core_err(dev, "failed to destroy command eq, err(%d)\n", err); + + mlx5_eq_notifier_unregister(dev, &table->cq_err_nb); } -int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, - u32 *out, int outlen) +struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev) { - u32 in[MLX5_ST_SZ_DW(query_eq_in)] = {0}; + return &dev->priv.eq_table->async_eq; +} - MLX5_SET(query_eq_in, in, opcode, MLX5_CMD_OP_QUERY_EQ); - MLX5_SET(query_eq_in, in, eq_number, eq->eqn); - return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); +void mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev) +{ + synchronize_irq(dev->priv.eq_table->async_eq.irqn); +} + +void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev) +{ + synchronize_irq(dev->priv.eq_table->cmd_eq.irqn); +} + +/* Generic EQ API for mlx5_core consumers + * Needed For RDMA ODP EQ for now + */ +struct mlx5_eq * +mlx5_eq_create_generic(struct mlx5_core_dev *dev, const char *name, + struct mlx5_eq_param *param) +{ + struct mlx5_eq *eq = kvzalloc(sizeof(*eq), GFP_KERNEL); + int err; + + if (!eq) + return ERR_PTR(-ENOMEM); + + err = create_async_eq(dev, name, eq, param); + if (err) { + kvfree(eq); + eq = ERR_PTR(err); + } + + return eq; +} +EXPORT_SYMBOL(mlx5_eq_create_generic); + +int mlx5_eq_destroy_generic(struct mlx5_core_dev *dev, struct mlx5_eq *eq) +{ + int err; + + if (IS_ERR(eq)) + return -EINVAL; + + err = destroy_async_eq(dev, eq); + if (err) + goto out; + + kvfree(eq); +out: + return err; +} +EXPORT_SYMBOL(mlx5_eq_destroy_generic); + +struct mlx5_eqe *mlx5_eq_get_eqe(struct mlx5_eq *eq, u32 cc) +{ + u32 ci = eq->cons_index + cc; + struct mlx5_eqe *eqe; + + eqe = get_eqe(eq, ci & (eq->nent - 1)); + eqe = ((eqe->owner & 1) ^ !!(ci & eq->nent)) ? NULL : eqe; + /* Make sure we read EQ entry contents after we've + * checked the ownership bit. + */ + if (eqe) + dma_rmb(); + + return eqe; +} +EXPORT_SYMBOL(mlx5_eq_get_eqe); + +void mlx5_eq_update_ci(struct mlx5_eq *eq, u32 cc, bool arm) +{ + __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2); + u32 val; + + eq->cons_index += cc; + val = (eq->cons_index & 0xffffff) | (eq->eqn << 24); + + __raw_writel((__force u32)cpu_to_be32(val), addr); + /* We still want ordering, just not swabbing, so add a barrier */ + mb(); +} +EXPORT_SYMBOL(mlx5_eq_update_ci); + +/* Completion EQs */ + +static int set_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i) +{ + struct mlx5_priv *priv = &mdev->priv; + int vecidx = MLX5_EQ_VEC_COMP_BASE + i; + int irq = pci_irq_vector(mdev->pdev, vecidx); + struct mlx5_irq_info *irq_info = &priv->eq_table->irq_info[vecidx]; + + if (!zalloc_cpumask_var(&irq_info->mask, GFP_KERNEL)) { + mlx5_core_warn(mdev, "zalloc_cpumask_var failed"); + return -ENOMEM; + } + + cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node), + irq_info->mask); + + if (IS_ENABLED(CONFIG_SMP) && + irq_set_affinity_hint(irq, irq_info->mask)) + mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq); + + return 0; +} + +static void clear_comp_irq_affinity_hint(struct mlx5_core_dev *mdev, int i) +{ + int vecidx = MLX5_EQ_VEC_COMP_BASE + i; + struct mlx5_priv *priv = &mdev->priv; + int irq = pci_irq_vector(mdev->pdev, vecidx); + struct mlx5_irq_info *irq_info = &priv->eq_table->irq_info[vecidx]; + + irq_set_affinity_hint(irq, NULL); + free_cpumask_var(irq_info->mask); +} + +static int set_comp_irq_affinity_hints(struct mlx5_core_dev *mdev) +{ + int err; + int i; + + for (i = 0; i < mdev->priv.eq_table->num_comp_vectors; i++) { + err = set_comp_irq_affinity_hint(mdev, i); + if (err) + goto err_out; + } + + return 0; + +err_out: + for (i--; i >= 0; i--) + clear_comp_irq_affinity_hint(mdev, i); + + return err; +} + +static void clear_comp_irqs_affinity_hints(struct mlx5_core_dev *mdev) +{ + int i; + + for (i = 0; i < mdev->priv.eq_table->num_comp_vectors; i++) + clear_comp_irq_affinity_hint(mdev, i); +} + +static void destroy_comp_eqs(struct mlx5_core_dev *dev) +{ + struct mlx5_eq_table *table = dev->priv.eq_table; + struct mlx5_eq_comp *eq, *n; + + clear_comp_irqs_affinity_hints(dev); + +#ifdef CONFIG_RFS_ACCEL + if (table->rmap) { + free_irq_cpu_rmap(table->rmap); + table->rmap = NULL; + } +#endif + list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { + list_del(&eq->list); + if (destroy_unmap_eq(dev, &eq->core)) + mlx5_core_warn(dev, "failed to destroy comp EQ 0x%x\n", + eq->core.eqn); + tasklet_disable(&eq->tasklet_ctx.task); + kfree(eq); + } +} + +static int create_comp_eqs(struct mlx5_core_dev *dev) +{ + struct mlx5_eq_table *table = dev->priv.eq_table; + char name[MLX5_MAX_IRQ_NAME]; + struct mlx5_eq_comp *eq; + int ncomp_vec; + int nent; + int err; + int i; + + INIT_LIST_HEAD(&table->comp_eqs_list); + ncomp_vec = table->num_comp_vectors; + nent = MLX5_COMP_EQ_SIZE; +#ifdef CONFIG_RFS_ACCEL + table->rmap = alloc_irq_cpu_rmap(ncomp_vec); + if (!table->rmap) + return -ENOMEM; +#endif + for (i = 0; i < ncomp_vec; i++) { + int vecidx = i + MLX5_EQ_VEC_COMP_BASE; + struct mlx5_eq_param param = {}; + + eq = kzalloc(sizeof(*eq), GFP_KERNEL); + if (!eq) { + err = -ENOMEM; + goto clean; + } + + INIT_LIST_HEAD(&eq->tasklet_ctx.list); + INIT_LIST_HEAD(&eq->tasklet_ctx.process_list); + spin_lock_init(&eq->tasklet_ctx.lock); + tasklet_init(&eq->tasklet_ctx.task, mlx5_cq_tasklet_cb, + (unsigned long)&eq->tasklet_ctx); + +#ifdef CONFIG_RFS_ACCEL + irq_cpu_rmap_add(table->rmap, pci_irq_vector(dev->pdev, vecidx)); +#endif + snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i); + param = (struct mlx5_eq_param) { + .index = vecidx, + .mask = 0, + .nent = nent, + .context = &eq->core, + .handler = mlx5_eq_comp_int + }; + err = create_map_eq(dev, &eq->core, name, ¶m); + if (err) { + kfree(eq); + goto clean; + } + mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->core.eqn); + /* add tail, to keep the list ordered, for mlx5_vector2eqn to work */ + list_add_tail(&eq->list, &table->comp_eqs_list); + } + + err = set_comp_irq_affinity_hints(dev); + if (err) { + mlx5_core_err(dev, "Failed to alloc affinity hint cpumask\n"); + goto clean; + } + + return 0; + +clean: + destroy_comp_eqs(dev); + return err; +} + +int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, + unsigned int *irqn) +{ + struct mlx5_eq_table *table = dev->priv.eq_table; + struct mlx5_eq_comp *eq, *n; + int err = -ENOENT; + int i = 0; + + list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { + if (i++ == vector) { + *eqn = eq->core.eqn; + *irqn = eq->core.irqn; + err = 0; + break; + } + } + + return err; +} +EXPORT_SYMBOL(mlx5_vector2eqn); + +unsigned int mlx5_comp_vectors_count(struct mlx5_core_dev *dev) +{ + return dev->priv.eq_table->num_comp_vectors; +} +EXPORT_SYMBOL(mlx5_comp_vectors_count); + +struct cpumask * +mlx5_comp_irq_get_affinity_mask(struct mlx5_core_dev *dev, int vector) +{ + /* TODO: consider irq_get_affinity_mask(irq) */ + return dev->priv.eq_table->irq_info[vector + MLX5_EQ_VEC_COMP_BASE].mask; +} +EXPORT_SYMBOL(mlx5_comp_irq_get_affinity_mask); + +struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev) +{ +#ifdef CONFIG_RFS_ACCEL + return dev->priv.eq_table->rmap; +#else + return NULL; +#endif +} + +struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn) +{ + struct mlx5_eq_table *table = dev->priv.eq_table; + struct mlx5_eq_comp *eq; + + list_for_each_entry(eq, &table->comp_eqs_list, list) { + if (eq->core.eqn == eqn) + return eq; + } + + return ERR_PTR(-ENOENT); } /* This function should only be called after mlx5_cmd_force_teardown_hca */ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev) { - struct mlx5_eq_table *table = &dev->priv.eq_table; - struct mlx5_eq *eq; + struct mlx5_eq_table *table = dev->priv.eq_table; + int i, max_eqs; + + clear_comp_irqs_affinity_hints(dev); #ifdef CONFIG_RFS_ACCEL - if (dev->rmap) { - free_irq_cpu_rmap(dev->rmap); - dev->rmap = NULL; + if (table->rmap) { + free_irq_cpu_rmap(table->rmap); + table->rmap = NULL; } #endif - list_for_each_entry(eq, &table->comp_eqs_list, list) - free_irq(eq->irqn, eq); - - free_irq(table->pages_eq.irqn, &table->pages_eq); - free_irq(table->async_eq.irqn, &table->async_eq); - free_irq(table->cmd_eq.irqn, &table->cmd_eq); -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - if (MLX5_CAP_GEN(dev, pg)) - free_irq(table->pfault_eq.irqn, &table->pfault_eq); -#endif + + mutex_lock(&table->lock); /* sync with create/destroy_async_eq */ + max_eqs = table->num_comp_vectors + MLX5_EQ_VEC_COMP_BASE; + for (i = max_eqs - 1; i >= 0; i--) { + if (!table->irq_info[i].context) + continue; + free_irq(pci_irq_vector(dev->pdev, i), table->irq_info[i].context); + table->irq_info[i].context = NULL; + } + mutex_unlock(&table->lock); + pci_free_irq_vectors(dev->pdev); +} + +static int alloc_irq_vectors(struct mlx5_core_dev *dev) +{ + struct mlx5_priv *priv = &dev->priv; + struct mlx5_eq_table *table = priv->eq_table; + int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ? + MLX5_CAP_GEN(dev, max_num_eqs) : + 1 << MLX5_CAP_GEN(dev, log_max_eq); + int nvec; + int err; + + nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + + MLX5_EQ_VEC_COMP_BASE; + nvec = min_t(int, nvec, num_eqs); + if (nvec <= MLX5_EQ_VEC_COMP_BASE) + return -ENOMEM; + + table->irq_info = kcalloc(nvec, sizeof(*table->irq_info), GFP_KERNEL); + if (!table->irq_info) + return -ENOMEM; + + nvec = pci_alloc_irq_vectors(dev->pdev, MLX5_EQ_VEC_COMP_BASE + 1, + nvec, PCI_IRQ_MSIX); + if (nvec < 0) { + err = nvec; + goto err_free_irq_info; + } + + table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE; + + return 0; + +err_free_irq_info: + kfree(table->irq_info); + return err; +} + +static void free_irq_vectors(struct mlx5_core_dev *dev) +{ + struct mlx5_priv *priv = &dev->priv; + pci_free_irq_vectors(dev->pdev); + kfree(priv->eq_table->irq_info); +} + +int mlx5_eq_table_create(struct mlx5_core_dev *dev) +{ + int err; + + err = alloc_irq_vectors(dev); + if (err) { + mlx5_core_err(dev, "alloc irq vectors failed\n"); + return err; + } + + err = create_async_eqs(dev); + if (err) { + mlx5_core_err(dev, "Failed to create async EQs\n"); + goto err_async_eqs; + } + + err = create_comp_eqs(dev); + if (err) { + mlx5_core_err(dev, "Failed to create completion EQs\n"); + goto err_comp_eqs; + } + + return 0; +err_comp_eqs: + destroy_async_eqs(dev); +err_async_eqs: + free_irq_vectors(dev); + return err; +} + +void mlx5_eq_table_destroy(struct mlx5_core_dev *dev) +{ + destroy_comp_eqs(dev); + destroy_async_eqs(dev); + free_irq_vectors(dev); +} + +int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb) +{ + struct mlx5_eq_table *eqt = dev->priv.eq_table; + + if (nb->event_type >= MLX5_EVENT_TYPE_MAX) + return -EINVAL; + + return atomic_notifier_chain_register(&eqt->nh[nb->event_type], &nb->nb); +} + +int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb) +{ + struct mlx5_eq_table *eqt = dev->priv.eq_table; + + if (nb->event_type >= MLX5_EVENT_TYPE_MAX) + return -EINVAL; + + return atomic_notifier_chain_unregister(&eqt->nh[nb->event_type], &nb->nb); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index d004957328f9..a44ea7b85614 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -36,6 +36,7 @@ #include <linux/mlx5/vport.h> #include <linux/mlx5/fs.h> #include "mlx5_core.h" +#include "lib/eq.h" #include "eswitch.h" #include "fs_core.h" @@ -1567,7 +1568,6 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) /* Mark this vport as disabled to discard new events */ vport->enabled = false; - synchronize_irq(pci_irq_vector(esw->dev->pdev, MLX5_EQ_VEC_ASYNC)); /* Wait for current already scheduled events to complete */ flush_workqueue(esw->work_queue); /* Disable events from this vport */ @@ -1593,10 +1593,25 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num) mutex_unlock(&esw->state_lock); } +static int eswitch_vport_event(struct notifier_block *nb, + unsigned long type, void *data) +{ + struct mlx5_eswitch *esw = mlx5_nb_cof(nb, struct mlx5_eswitch, nb); + struct mlx5_eqe *eqe = data; + struct mlx5_vport *vport; + u16 vport_num; + + vport_num = be16_to_cpu(eqe->data.vport_change.vport_num); + vport = &esw->vports[vport_num]; + if (vport->enabled) + queue_work(esw->work_queue, &vport->vport_change_handler); + + return NOTIFY_OK; +} + /* Public E-Switch API */ #define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev)) - int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { int err; @@ -1615,13 +1630,16 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) esw_warn(esw->dev, "E-Switch engress ACL is not supported by FW\n"); esw_info(esw->dev, "E-Switch enable SRIOV: nvfs(%d) mode (%d)\n", nvfs, mode); + esw->mode = mode; + mlx5_lag_update(esw->dev); + if (mode == SRIOV_LEGACY) { err = esw_create_legacy_fdb_table(esw); } else { + mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH); mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); - err = esw_offloads_init(esw, nvfs + 1); } @@ -1640,6 +1658,11 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) for (i = 0; i <= nvfs; i++) esw_enable_vport(esw, i, enabled_events); + if (mode == SRIOV_LEGACY) { + MLX5_NB_INIT(&esw->nb, eswitch_vport_event, NIC_VPORT_CHANGE); + mlx5_eq_notifier_register(esw->dev, &esw->nb); + } + esw_info(esw->dev, "SRIOV enabled: active vports(%d)\n", esw->enabled_vports); return 0; @@ -1647,8 +1670,10 @@ int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) abort: esw->mode = SRIOV_NONE; - if (mode == SRIOV_OFFLOADS) + if (mode == SRIOV_OFFLOADS) { mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); + mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH); + } return err; } @@ -1669,6 +1694,9 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) mc_promisc = &esw->mc_promisc; nvports = esw->enabled_vports; + if (esw->mode == SRIOV_LEGACY) + mlx5_eq_notifier_unregister(esw->dev, &esw->nb); + for (i = 0; i < esw->total_vports; i++) esw_disable_vport(esw, i); @@ -1685,8 +1713,12 @@ void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) old_mode = esw->mode; esw->mode = SRIOV_NONE; - if (old_mode == SRIOV_OFFLOADS) + mlx5_lag_update(esw->dev); + + if (old_mode == SRIOV_OFFLOADS) { mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); + mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_ETH); + } } int mlx5_eswitch_init(struct mlx5_core_dev *dev) @@ -1777,23 +1809,6 @@ void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) kfree(esw); } -void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe) -{ - struct mlx5_eqe_vport_change *vc_eqe = &eqe->data.vport_change; - u16 vport_num = be16_to_cpu(vc_eqe->vport_num); - struct mlx5_vport *vport; - - if (!esw) { - pr_warn("MLX5 E-Switch: vport %d got an event while eswitch is not initialized\n", - vport_num); - return; - } - - vport = &esw->vports[vport_num]; - if (vport->enabled) - queue_work(esw->work_queue, &vport->vport_change_handler); -} - /* Vport Administration */ #define LEGAL_VPORT(esw, vport) (vport >= 0 && vport < esw->total_vports) @@ -2219,3 +2234,14 @@ u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw) return ESW_ALLOWED(esw) ? esw->mode : SRIOV_NONE; } EXPORT_SYMBOL_GPL(mlx5_eswitch_mode); + +bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) +{ + if ((dev0->priv.eswitch->mode == SRIOV_NONE && + dev1->priv.eswitch->mode == SRIOV_NONE) || + (dev0->priv.eswitch->mode == SRIOV_OFFLOADS && + dev1->priv.eswitch->mode == SRIOV_OFFLOADS)) + return true; + + return false; +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index aaafc9f17115..9c89eea9b2c3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -143,6 +143,8 @@ struct mlx5_eswitch_fdb { struct offloads_fdb { struct mlx5_flow_table *slow_fdb; struct mlx5_flow_group *send_to_vport_grp; + struct mlx5_flow_group *peer_miss_grp; + struct mlx5_flow_handle **peer_miss_rules; struct mlx5_flow_group *miss_grp; struct mlx5_flow_handle *miss_rule_uni; struct mlx5_flow_handle *miss_rule_multi; @@ -165,6 +167,8 @@ struct mlx5_esw_offload { struct mlx5_flow_table *ft_offloads; struct mlx5_flow_group *vport_rx_group; struct mlx5_eswitch_rep *vport_reps; + struct list_head peer_flows; + struct mutex peer_mutex; DECLARE_HASHTABLE(encap_tbl, 8); DECLARE_HASHTABLE(mod_hdr_tbl, 8); u8 inline_mode; @@ -181,6 +185,7 @@ struct esw_mc_addr { /* SRIOV only */ struct mlx5_eswitch { struct mlx5_core_dev *dev; + struct mlx5_nb nb; struct mlx5_eswitch_fdb fdb_table; struct hlist_head mc_table[MLX5_L2_ADDR_HASH_SIZE]; struct workqueue_struct *work_queue; @@ -211,7 +216,6 @@ int esw_offloads_init_reps(struct mlx5_eswitch *esw); /* E-Switch API */ int mlx5_eswitch_init(struct mlx5_core_dev *dev); void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw); -void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe); int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode); void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw); int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw, @@ -281,13 +285,17 @@ enum mlx5_flow_match_level { /* current maximum for flow based vport multicasting */ #define MLX5_MAX_FLOW_FWD_VPORTS 2 +enum { + MLX5_ESW_DEST_ENCAP = BIT(0), + MLX5_ESW_DEST_ENCAP_VALID = BIT(1), +}; + struct mlx5_esw_flow_attr { struct mlx5_eswitch_rep *in_rep; - struct mlx5_eswitch_rep *out_rep[MLX5_MAX_FLOW_FWD_VPORTS]; - struct mlx5_core_dev *out_mdev[MLX5_MAX_FLOW_FWD_VPORTS]; struct mlx5_core_dev *in_mdev; + struct mlx5_core_dev *counter_dev; - int mirror_count; + int split_count; int out_count; int action; @@ -296,7 +304,12 @@ struct mlx5_esw_flow_attr { u8 vlan_prio[MLX5_FS_VLAN_DEPTH]; u8 total_vlan; bool vlan_handled; - u32 encap_id; + struct { + u32 flags; + struct mlx5_eswitch_rep *rep; + struct mlx5_core_dev *mdev; + u32 encap_id; + } dests[MLX5_MAX_FLOW_FWD_VPORTS]; u32 mod_hdr_id; u8 match_level; struct mlx5_fc *counter; @@ -338,6 +351,9 @@ static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev MLX5_CAP_ESW_FLOWTABLE_FDB(dev, push_vlan_2); } +bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, + struct mlx5_core_dev *dev1); + #define MLX5_DEBUG_ESWITCH_MASK BIT(3) #define esw_info(dev, format, ...) \ @@ -352,9 +368,9 @@ static inline bool mlx5_eswitch_vlan_actions_supported(struct mlx5_core_dev *dev /* eswitch API stubs */ static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } static inline void mlx5_eswitch_cleanup(struct mlx5_eswitch *esw) {} -static inline void mlx5_eswitch_vport_event(struct mlx5_eswitch *esw, struct mlx5_eqe *eqe) {} static inline int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode) { return 0; } static inline void mlx5_eswitch_disable_sriov(struct mlx5_eswitch *esw) {} +static inline bool mlx5_esw_lag_prereq(struct mlx5_core_dev *dev0, struct mlx5_core_dev *dev1) { return true; } #define FDB_MAX_CHAIN 1 #define FDB_SLOW_PATH_CHAIN (FDB_MAX_CHAIN + 1) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 9eac137790f5..53065b6ae593 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -39,6 +39,7 @@ #include "eswitch.h" #include "en.h" #include "fs_core.h" +#include "lib/devcom.h" enum { FDB_FAST_PATH = 0, @@ -81,7 +82,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, { struct mlx5_flow_destination dest[MLX5_MAX_FLOW_FWD_VPORTS + 1] = {}; struct mlx5_flow_act flow_act = { .flags = FLOW_ACT_NO_APPEND, }; - bool mirror = !!(attr->mirror_count); + bool split = !!(attr->split_count); struct mlx5_flow_handle *rule; struct mlx5_flow_table *fdb; int j, i = 0; @@ -120,13 +121,21 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, dest[i].ft = ft; i++; } else { - for (j = attr->mirror_count; j < attr->out_count; j++) { + for (j = attr->split_count; j < attr->out_count; j++) { dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; - dest[i].vport.num = attr->out_rep[j]->vport; + dest[i].vport.num = attr->dests[j].rep->vport; dest[i].vport.vhca_id = - MLX5_CAP_GEN(attr->out_mdev[j], vhca_id); - dest[i].vport.vhca_id_valid = - !!MLX5_CAP_ESW(esw->dev, merged_eswitch); + MLX5_CAP_GEN(attr->dests[j].mdev, vhca_id); + if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) + dest[i].vport.flags |= + MLX5_FLOW_DEST_VPORT_VHCA_ID; + if (attr->dests[j].flags & MLX5_ESW_DEST_ENCAP) { + flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; + flow_act.reformat_id = attr->dests[j].encap_id; + dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID; + dest[i].vport.reformat_id = + attr->dests[j].encap_id; + } i++; } } @@ -163,10 +172,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) flow_act.modify_id = attr->mod_hdr_id; - if (flow_act.action & MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT) - flow_act.reformat_id = attr->encap_id; - - fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!mirror); + fdb = esw_get_prio_table(esw, attr->chain, attr->prio, !!split); if (IS_ERR(fdb)) { rule = ERR_CAST(fdb); goto err_esw_get; @@ -181,7 +187,7 @@ mlx5_eswitch_add_offloaded_rule(struct mlx5_eswitch *esw, return rule; err_add_rule: - esw_put_prio_table(esw, attr->chain, attr->prio, !!mirror); + esw_put_prio_table(esw, attr->chain, attr->prio, !!split); err_esw_get: if (attr->dest_chain) esw_put_prio_table(esw, attr->dest_chain, 1, 0); @@ -215,12 +221,17 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, } flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; - for (i = 0; i < attr->mirror_count; i++) { + for (i = 0; i < attr->split_count; i++) { dest[i].type = MLX5_FLOW_DESTINATION_TYPE_VPORT; - dest[i].vport.num = attr->out_rep[i]->vport; + dest[i].vport.num = attr->dests[i].rep->vport; dest[i].vport.vhca_id = - MLX5_CAP_GEN(attr->out_mdev[i], vhca_id); - dest[i].vport.vhca_id_valid = !!MLX5_CAP_ESW(esw->dev, merged_eswitch); + MLX5_CAP_GEN(attr->dests[i].mdev, vhca_id); + if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) + dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; + if (attr->dests[i].flags & MLX5_ESW_DEST_ENCAP) { + dest[i].vport.flags |= MLX5_FLOW_DEST_VPORT_REFORMAT_ID; + dest[i].vport.reformat_id = attr->dests[i].encap_id; + } } dest[i].type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE; dest[i].ft = fwd_fdb, @@ -268,7 +279,7 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw, struct mlx5_esw_flow_attr *attr, bool fwd_rule) { - bool mirror = (attr->mirror_count > 0); + bool split = (attr->split_count > 0); mlx5_del_flow_rules(rule); esw->offloads.num_flows--; @@ -277,7 +288,7 @@ __mlx5_eswitch_del_rule(struct mlx5_eswitch *esw, esw_put_prio_table(esw, attr->chain, attr->prio, 1); esw_put_prio_table(esw, attr->chain, attr->prio, 0); } else { - esw_put_prio_table(esw, attr->chain, attr->prio, !!mirror); + esw_put_prio_table(esw, attr->chain, attr->prio, !!split); if (attr->dest_chain) esw_put_prio_table(esw, attr->dest_chain, 1, 0); } @@ -325,7 +336,7 @@ esw_vlan_action_get_vport(struct mlx5_esw_flow_attr *attr, bool push, bool pop) struct mlx5_eswitch_rep *in_rep, *out_rep, *vport = NULL; in_rep = attr->in_rep; - out_rep = attr->out_rep[0]; + out_rep = attr->dests[0].rep; if (push) vport = in_rep; @@ -346,7 +357,7 @@ static int esw_add_vlan_action_check(struct mlx5_esw_flow_attr *attr, goto out_notsupp; in_rep = attr->in_rep; - out_rep = attr->out_rep[0]; + out_rep = attr->dests[0].rep; if (push && in_rep->vport == FDB_UPLINK_VPORT) goto out_notsupp; @@ -398,7 +409,7 @@ int mlx5_eswitch_add_vlan_action(struct mlx5_eswitch *esw, if (!push && !pop && fwd) { /* tracks VF --> wire rules without vlan push action */ - if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT) { + if (attr->dests[0].rep->vport == FDB_UPLINK_VPORT) { vport->vlan_refcount++; attr->vlan_handled = true; } @@ -458,7 +469,7 @@ int mlx5_eswitch_del_vlan_action(struct mlx5_eswitch *esw, if (!push && !pop && fwd) { /* tracks VF --> wire rules without vlan push action */ - if (attr->out_rep[0]->vport == FDB_UPLINK_VPORT) + if (attr->dests[0].rep->vport == FDB_UPLINK_VPORT) vport->vlan_refcount--; return 0; @@ -531,6 +542,98 @@ void mlx5_eswitch_del_send_to_vport_rule(struct mlx5_flow_handle *rule) mlx5_del_flow_rules(rule); } +static void peer_miss_rules_setup(struct mlx5_core_dev *peer_dev, + struct mlx5_flow_spec *spec, + struct mlx5_flow_destination *dest) +{ + void *misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, + misc_parameters); + + MLX5_SET(fte_match_set_misc, misc, source_eswitch_owner_vhca_id, + MLX5_CAP_GEN(peer_dev, vhca_id)); + + spec->match_criteria_enable = MLX5_MATCH_MISC_PARAMETERS; + + misc = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, + misc_parameters); + MLX5_SET_TO_ONES(fte_match_set_misc, misc, source_port); + MLX5_SET_TO_ONES(fte_match_set_misc, misc, + source_eswitch_owner_vhca_id); + + dest->type = MLX5_FLOW_DESTINATION_TYPE_VPORT; + dest->vport.num = 0; + dest->vport.vhca_id = MLX5_CAP_GEN(peer_dev, vhca_id); + dest->vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; +} + +static int esw_add_fdb_peer_miss_rules(struct mlx5_eswitch *esw, + struct mlx5_core_dev *peer_dev) +{ + struct mlx5_flow_destination dest = {}; + struct mlx5_flow_act flow_act = {0}; + struct mlx5_flow_handle **flows; + struct mlx5_flow_handle *flow; + struct mlx5_flow_spec *spec; + /* total vports is the same for both e-switches */ + int nvports = esw->total_vports; + void *misc; + int err, i; + + spec = kvzalloc(sizeof(*spec), GFP_KERNEL); + if (!spec) + return -ENOMEM; + + peer_miss_rules_setup(peer_dev, spec, &dest); + + flows = kvzalloc(nvports * sizeof(*flows), GFP_KERNEL); + if (!flows) { + err = -ENOMEM; + goto alloc_flows_err; + } + + flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; + misc = MLX5_ADDR_OF(fte_match_param, spec->match_value, + misc_parameters); + + for (i = 1; i < nvports; i++) { + MLX5_SET(fte_match_set_misc, misc, source_port, i); + flow = mlx5_add_flow_rules(esw->fdb_table.offloads.slow_fdb, + spec, &flow_act, &dest, 1); + if (IS_ERR(flow)) { + err = PTR_ERR(flow); + esw_warn(esw->dev, "FDB: Failed to add peer miss flow rule err %d\n", err); + goto add_flow_err; + } + flows[i] = flow; + } + + esw->fdb_table.offloads.peer_miss_rules = flows; + + kvfree(spec); + return 0; + +add_flow_err: + for (i--; i > 0; i--) + mlx5_del_flow_rules(flows[i]); + kvfree(flows); +alloc_flows_err: + kvfree(spec); + return err; +} + +static void esw_del_fdb_peer_miss_rules(struct mlx5_eswitch *esw) +{ + struct mlx5_flow_handle **flows; + int i; + + flows = esw->fdb_table.offloads.peer_miss_rules; + + for (i = 1; i < esw->total_vports; i++) + mlx5_del_flow_rules(flows[i]); + + kvfree(flows); +} + static int esw_add_fdb_miss_rule(struct mlx5_eswitch *esw) { struct mlx5_flow_act flow_act = {0}; @@ -801,7 +904,8 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) esw->fdb_table.offloads.fdb_left[i] = ESW_POOLS[i] <= fdb_max ? ESW_SIZE / ESW_POOLS[i] : 0; - table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 2; + table_size = nvports * MAX_SQ_NVPORTS + MAX_PF_SQ + 2 + + esw->total_vports; /* create the slow path fdb with encap set, so further table instances * can be created at run time while VFs are probed if the FW allows that. @@ -856,6 +960,34 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) } esw->fdb_table.offloads.send_to_vport_grp = g; + /* create peer esw miss group */ + memset(flow_group_in, 0, inlen); + MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, + MLX5_MATCH_MISC_PARAMETERS); + + match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, + match_criteria); + + MLX5_SET_TO_ONES(fte_match_param, match_criteria, + misc_parameters.source_port); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, + misc_parameters.source_eswitch_owner_vhca_id); + + MLX5_SET(create_flow_group_in, flow_group_in, + source_eswitch_owner_vhca_id_valid, 1); + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, + ix + esw->total_vports - 1); + ix += esw->total_vports; + + g = mlx5_create_flow_group(fdb, flow_group_in); + if (IS_ERR(g)) { + err = PTR_ERR(g); + esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err); + goto peer_miss_err; + } + esw->fdb_table.offloads.peer_miss_grp = g; + /* create miss group */ memset(flow_group_in, 0, inlen); MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, @@ -888,6 +1020,8 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) miss_rule_err: mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); miss_err: + mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); +peer_miss_err: mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); send_vport_err: esw_destroy_offloads_fast_fdb_tables(esw); @@ -907,6 +1041,7 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi); mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni); mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); + mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); mlx5_destroy_flow_table(esw->fdb_table.offloads.slow_fdb); @@ -1163,6 +1298,105 @@ err_reps: return err; } +#define ESW_OFFLOADS_DEVCOM_PAIR (0) +#define ESW_OFFLOADS_DEVCOM_UNPAIR (1) + +static int mlx5_esw_offloads_pair(struct mlx5_eswitch *esw, + struct mlx5_eswitch *peer_esw) +{ + int err; + + err = esw_add_fdb_peer_miss_rules(esw, peer_esw->dev); + if (err) + return err; + + return 0; +} + +void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw); + +static void mlx5_esw_offloads_unpair(struct mlx5_eswitch *esw) +{ + mlx5e_tc_clean_fdb_peer_flows(esw); + esw_del_fdb_peer_miss_rules(esw); +} + +static int mlx5_esw_offloads_devcom_event(int event, + void *my_data, + void *event_data) +{ + struct mlx5_eswitch *esw = my_data; + struct mlx5_eswitch *peer_esw = event_data; + struct mlx5_devcom *devcom = esw->dev->priv.devcom; + int err; + + switch (event) { + case ESW_OFFLOADS_DEVCOM_PAIR: + err = mlx5_esw_offloads_pair(esw, peer_esw); + if (err) + goto err_out; + + err = mlx5_esw_offloads_pair(peer_esw, esw); + if (err) + goto err_pair; + + mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, true); + break; + + case ESW_OFFLOADS_DEVCOM_UNPAIR: + if (!mlx5_devcom_is_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS)) + break; + + mlx5_devcom_set_paired(devcom, MLX5_DEVCOM_ESW_OFFLOADS, false); + mlx5_esw_offloads_unpair(peer_esw); + mlx5_esw_offloads_unpair(esw); + break; + } + + return 0; + +err_pair: + mlx5_esw_offloads_unpair(esw); + +err_out: + mlx5_core_err(esw->dev, "esw offloads devcom event failure, event %u err %d", + event, err); + return err; +} + +static void esw_offloads_devcom_init(struct mlx5_eswitch *esw) +{ + struct mlx5_devcom *devcom = esw->dev->priv.devcom; + + INIT_LIST_HEAD(&esw->offloads.peer_flows); + mutex_init(&esw->offloads.peer_mutex); + + if (!MLX5_CAP_ESW(esw->dev, merged_eswitch)) + return; + + mlx5_devcom_register_component(devcom, + MLX5_DEVCOM_ESW_OFFLOADS, + mlx5_esw_offloads_devcom_event, + esw); + + mlx5_devcom_send_event(devcom, + MLX5_DEVCOM_ESW_OFFLOADS, + ESW_OFFLOADS_DEVCOM_PAIR, esw); +} + +static void esw_offloads_devcom_cleanup(struct mlx5_eswitch *esw) +{ + struct mlx5_devcom *devcom = esw->dev->priv.devcom; + + if (!MLX5_CAP_ESW(esw->dev, merged_eswitch)) + return; + + mlx5_devcom_send_event(devcom, MLX5_DEVCOM_ESW_OFFLOADS, + ESW_OFFLOADS_DEVCOM_UNPAIR, esw); + + mlx5_devcom_unregister_component(devcom, MLX5_DEVCOM_ESW_OFFLOADS); +} + int esw_offloads_init(struct mlx5_eswitch *esw, int nvports) { int err; @@ -1185,6 +1419,7 @@ int esw_offloads_init(struct mlx5_eswitch *esw, int nvports) if (err) goto err_reps; + esw_offloads_devcom_init(esw); return 0; err_reps: @@ -1215,14 +1450,12 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw, } } - /* enable back PF RoCE */ - mlx5_reload_interface(esw->dev, MLX5_INTERFACE_PROTOCOL_IB); - return err; } void esw_offloads_cleanup(struct mlx5_eswitch *esw, int nvports) { + esw_offloads_devcom_cleanup(esw); esw_offloads_unload_reps(esw, nvports); esw_destroy_vport_rx_group(esw); esw_destroy_offloads_table(esw); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/events.c b/drivers/net/ethernet/mellanox/mlx5/core/events.c new file mode 100644 index 000000000000..fbc42b7252a9 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/events.c @@ -0,0 +1,325 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +// Copyright (c) 2018 Mellanox Technologies + +#include <linux/mlx5/driver.h> + +#include "mlx5_core.h" +#include "lib/eq.h" +#include "lib/mlx5.h" + +struct mlx5_event_nb { + struct mlx5_nb nb; + void *ctx; +}; + +/* General events handlers for the low level mlx5_core driver + * + * Other Major feature specific events such as + * clock/eswitch/fpga/FW trace and many others, are handled elsewhere, with + * separate notifiers callbacks, specifically by those mlx5 components. + */ +static int any_notifier(struct notifier_block *, unsigned long, void *); +static int temp_warn(struct notifier_block *, unsigned long, void *); +static int port_module(struct notifier_block *, unsigned long, void *); + +/* handler which forwards the event to events->nh, driver notifiers */ +static int forward_event(struct notifier_block *, unsigned long, void *); + +static struct mlx5_nb events_nbs_ref[] = { + /* Events to be proccessed by mlx5_core */ + {.nb.notifier_call = any_notifier, .event_type = MLX5_EVENT_TYPE_NOTIFY_ANY }, + {.nb.notifier_call = temp_warn, .event_type = MLX5_EVENT_TYPE_TEMP_WARN_EVENT }, + {.nb.notifier_call = port_module, .event_type = MLX5_EVENT_TYPE_PORT_MODULE_EVENT }, + + /* Events to be forwarded (as is) to mlx5 core interfaces (mlx5e/mlx5_ib) */ + {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_PORT_CHANGE }, + {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_GENERAL_EVENT }, + /* QP/WQ resource events to forward */ + {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_DCT_DRAINED }, + {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_PATH_MIG }, + {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_COMM_EST }, + {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_SQ_DRAINED }, + {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_SRQ_LAST_WQE }, + {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_WQ_CATAS_ERROR }, + {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_PATH_MIG_FAILED }, + {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR }, + {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_WQ_ACCESS_ERROR }, + /* SRQ events */ + {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_SRQ_CATAS_ERROR }, + {.nb.notifier_call = forward_event, .event_type = MLX5_EVENT_TYPE_SRQ_RQ_LIMIT }, +}; + +struct mlx5_events { + struct mlx5_core_dev *dev; + struct mlx5_event_nb notifiers[ARRAY_SIZE(events_nbs_ref)]; + /* driver notifier chain */ + struct atomic_notifier_head nh; + /* port module events stats */ + struct mlx5_pme_stats pme_stats; +}; + +static const char *eqe_type_str(u8 type) +{ + switch (type) { + case MLX5_EVENT_TYPE_COMP: + return "MLX5_EVENT_TYPE_COMP"; + case MLX5_EVENT_TYPE_PATH_MIG: + return "MLX5_EVENT_TYPE_PATH_MIG"; + case MLX5_EVENT_TYPE_COMM_EST: + return "MLX5_EVENT_TYPE_COMM_EST"; + case MLX5_EVENT_TYPE_SQ_DRAINED: + return "MLX5_EVENT_TYPE_SQ_DRAINED"; + case MLX5_EVENT_TYPE_SRQ_LAST_WQE: + return "MLX5_EVENT_TYPE_SRQ_LAST_WQE"; + case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT: + return "MLX5_EVENT_TYPE_SRQ_RQ_LIMIT"; + case MLX5_EVENT_TYPE_CQ_ERROR: + return "MLX5_EVENT_TYPE_CQ_ERROR"; + case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: + return "MLX5_EVENT_TYPE_WQ_CATAS_ERROR"; + case MLX5_EVENT_TYPE_PATH_MIG_FAILED: + return "MLX5_EVENT_TYPE_PATH_MIG_FAILED"; + case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: + return "MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR"; + case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: + return "MLX5_EVENT_TYPE_WQ_ACCESS_ERROR"; + case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR: + return "MLX5_EVENT_TYPE_SRQ_CATAS_ERROR"; + case MLX5_EVENT_TYPE_INTERNAL_ERROR: + return "MLX5_EVENT_TYPE_INTERNAL_ERROR"; + case MLX5_EVENT_TYPE_PORT_CHANGE: + return "MLX5_EVENT_TYPE_PORT_CHANGE"; + case MLX5_EVENT_TYPE_GPIO_EVENT: + return "MLX5_EVENT_TYPE_GPIO_EVENT"; + case MLX5_EVENT_TYPE_PORT_MODULE_EVENT: + return "MLX5_EVENT_TYPE_PORT_MODULE_EVENT"; + case MLX5_EVENT_TYPE_TEMP_WARN_EVENT: + return "MLX5_EVENT_TYPE_TEMP_WARN_EVENT"; + case MLX5_EVENT_TYPE_REMOTE_CONFIG: + return "MLX5_EVENT_TYPE_REMOTE_CONFIG"; + case MLX5_EVENT_TYPE_DB_BF_CONGESTION: + return "MLX5_EVENT_TYPE_DB_BF_CONGESTION"; + case MLX5_EVENT_TYPE_STALL_EVENT: + return "MLX5_EVENT_TYPE_STALL_EVENT"; + case MLX5_EVENT_TYPE_CMD: + return "MLX5_EVENT_TYPE_CMD"; + case MLX5_EVENT_TYPE_PAGE_REQUEST: + return "MLX5_EVENT_TYPE_PAGE_REQUEST"; + case MLX5_EVENT_TYPE_PAGE_FAULT: + return "MLX5_EVENT_TYPE_PAGE_FAULT"; + case MLX5_EVENT_TYPE_PPS_EVENT: + return "MLX5_EVENT_TYPE_PPS_EVENT"; + case MLX5_EVENT_TYPE_NIC_VPORT_CHANGE: + return "MLX5_EVENT_TYPE_NIC_VPORT_CHANGE"; + case MLX5_EVENT_TYPE_FPGA_ERROR: + return "MLX5_EVENT_TYPE_FPGA_ERROR"; + case MLX5_EVENT_TYPE_FPGA_QP_ERROR: + return "MLX5_EVENT_TYPE_FPGA_QP_ERROR"; + case MLX5_EVENT_TYPE_GENERAL_EVENT: + return "MLX5_EVENT_TYPE_GENERAL_EVENT"; + case MLX5_EVENT_TYPE_MONITOR_COUNTER: + return "MLX5_EVENT_TYPE_MONITOR_COUNTER"; + case MLX5_EVENT_TYPE_DEVICE_TRACER: + return "MLX5_EVENT_TYPE_DEVICE_TRACER"; + default: + return "Unrecognized event"; + } +} + +/* handles all FW events, type == eqe->type */ +static int any_notifier(struct notifier_block *nb, + unsigned long type, void *data) +{ + struct mlx5_event_nb *event_nb = mlx5_nb_cof(nb, struct mlx5_event_nb, nb); + struct mlx5_events *events = event_nb->ctx; + struct mlx5_eqe *eqe = data; + + mlx5_core_dbg(events->dev, "Async eqe type %s, subtype (%d)\n", + eqe_type_str(eqe->type), eqe->sub_type); + return NOTIFY_OK; +} + +/* type == MLX5_EVENT_TYPE_TEMP_WARN_EVENT */ +static int temp_warn(struct notifier_block *nb, unsigned long type, void *data) +{ + struct mlx5_event_nb *event_nb = mlx5_nb_cof(nb, struct mlx5_event_nb, nb); + struct mlx5_events *events = event_nb->ctx; + struct mlx5_eqe *eqe = data; + u64 value_lsb; + u64 value_msb; + + value_lsb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_lsb); + value_msb = be64_to_cpu(eqe->data.temp_warning.sensor_warning_msb); + + mlx5_core_warn(events->dev, + "High temperature on sensors with bit set %llx %llx", + value_msb, value_lsb); + + return NOTIFY_OK; +} + +/* MLX5_EVENT_TYPE_PORT_MODULE_EVENT */ +static const char *mlx5_pme_status_to_string(enum port_module_event_status_type status) +{ + switch (status) { + case MLX5_MODULE_STATUS_PLUGGED: + return "Cable plugged"; + case MLX5_MODULE_STATUS_UNPLUGGED: + return "Cable unplugged"; + case MLX5_MODULE_STATUS_ERROR: + return "Cable error"; + case MLX5_MODULE_STATUS_DISABLED: + return "Cable disabled"; + default: + return "Unknown status"; + } +} + +static const char *mlx5_pme_error_to_string(enum port_module_event_error_type error) +{ + switch (error) { + case MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED: + return "Power budget exceeded"; + case MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX: + return "Long Range for non MLNX cable"; + case MLX5_MODULE_EVENT_ERROR_BUS_STUCK: + return "Bus stuck (I2C or data shorted)"; + case MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT: + return "No EEPROM/retry timeout"; + case MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST: + return "Enforce part number list"; + case MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER: + return "Unknown identifier"; + case MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE: + return "High Temperature"; + case MLX5_MODULE_EVENT_ERROR_BAD_CABLE: + return "Bad or shorted cable/module"; + case MLX5_MODULE_EVENT_ERROR_PCIE_POWER_SLOT_EXCEEDED: + return "One or more network ports have been powered down due to insufficient/unadvertised power on the PCIe slot"; + default: + return "Unknown error"; + } +} + +/* type == MLX5_EVENT_TYPE_PORT_MODULE_EVENT */ +static int port_module(struct notifier_block *nb, unsigned long type, void *data) +{ + struct mlx5_event_nb *event_nb = mlx5_nb_cof(nb, struct mlx5_event_nb, nb); + struct mlx5_events *events = event_nb->ctx; + struct mlx5_eqe *eqe = data; + + enum port_module_event_status_type module_status; + enum port_module_event_error_type error_type; + struct mlx5_eqe_port_module *module_event_eqe; + const char *status_str, *error_str; + u8 module_num; + + module_event_eqe = &eqe->data.port_module; + module_num = module_event_eqe->module; + module_status = module_event_eqe->module_status & + PORT_MODULE_EVENT_MODULE_STATUS_MASK; + error_type = module_event_eqe->error_type & + PORT_MODULE_EVENT_ERROR_TYPE_MASK; + + if (module_status < MLX5_MODULE_STATUS_NUM) + events->pme_stats.status_counters[module_status]++; + status_str = mlx5_pme_status_to_string(module_status); + + if (module_status == MLX5_MODULE_STATUS_ERROR) { + if (error_type < MLX5_MODULE_EVENT_ERROR_NUM) + events->pme_stats.error_counters[error_type]++; + error_str = mlx5_pme_error_to_string(error_type); + } + + if (!printk_ratelimit()) + return NOTIFY_OK; + + if (module_status == MLX5_MODULE_STATUS_ERROR) + mlx5_core_err(events->dev, + "Port module event[error]: module %u, %s, %s\n", + module_num, status_str, error_str); + else + mlx5_core_info(events->dev, + "Port module event: module %u, %s\n", + module_num, status_str); + + return NOTIFY_OK; +} + +void mlx5_get_pme_stats(struct mlx5_core_dev *dev, struct mlx5_pme_stats *stats) +{ + *stats = dev->priv.events->pme_stats; +} + +/* forward event as is to registered interfaces (mlx5e/mlx5_ib) */ +static int forward_event(struct notifier_block *nb, unsigned long event, void *data) +{ + struct mlx5_event_nb *event_nb = mlx5_nb_cof(nb, struct mlx5_event_nb, nb); + struct mlx5_events *events = event_nb->ctx; + struct mlx5_eqe *eqe = data; + + mlx5_core_dbg(events->dev, "Async eqe type %s, subtype (%d) forward to interfaces\n", + eqe_type_str(eqe->type), eqe->sub_type); + atomic_notifier_call_chain(&events->nh, event, data); + return NOTIFY_OK; +} + +int mlx5_events_init(struct mlx5_core_dev *dev) +{ + struct mlx5_events *events = kzalloc(sizeof(*events), GFP_KERNEL); + + if (!events) + return -ENOMEM; + + ATOMIC_INIT_NOTIFIER_HEAD(&events->nh); + events->dev = dev; + dev->priv.events = events; + return 0; +} + +void mlx5_events_cleanup(struct mlx5_core_dev *dev) +{ + kvfree(dev->priv.events); +} + +void mlx5_events_start(struct mlx5_core_dev *dev) +{ + struct mlx5_events *events = dev->priv.events; + int i; + + for (i = 0; i < ARRAY_SIZE(events_nbs_ref); i++) { + events->notifiers[i].nb = events_nbs_ref[i]; + events->notifiers[i].ctx = events; + mlx5_eq_notifier_register(dev, &events->notifiers[i].nb); + } +} + +void mlx5_events_stop(struct mlx5_core_dev *dev) +{ + struct mlx5_events *events = dev->priv.events; + int i; + + for (i = ARRAY_SIZE(events_nbs_ref) - 1; i >= 0 ; i--) + mlx5_eq_notifier_unregister(dev, &events->notifiers[i].nb); +} + +int mlx5_notifier_register(struct mlx5_core_dev *dev, struct notifier_block *nb) +{ + struct mlx5_events *events = dev->priv.events; + + return atomic_notifier_chain_register(&events->nh, nb); +} +EXPORT_SYMBOL(mlx5_notifier_register); + +int mlx5_notifier_unregister(struct mlx5_core_dev *dev, struct notifier_block *nb) +{ + struct mlx5_events *events = dev->priv.events; + + return atomic_notifier_chain_unregister(&events->nh, nb); +} +EXPORT_SYMBOL(mlx5_notifier_unregister); + +int mlx5_notifier_call_chain(struct mlx5_events *events, unsigned int event, void *data) +{ + return atomic_notifier_call_chain(&events->nh, event, data); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c index 8ca1d1949d93..873541ef4c1b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/conn.c @@ -334,7 +334,7 @@ static void mlx5_fpga_conn_handle_cqe(struct mlx5_fpga_conn *conn, { u8 opcode, status = 0; - opcode = cqe->op_own >> 4; + opcode = get_cqe_opcode(cqe); switch (opcode) { case MLX5_CQE_REQ_ERR: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c index 436a8136f26f..27c5f6c7d36a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.c @@ -36,6 +36,7 @@ #include "mlx5_core.h" #include "lib/mlx5.h" +#include "lib/eq.h" #include "fpga/core.h" #include "fpga/conn.h" @@ -145,6 +146,22 @@ static int mlx5_fpga_device_brb(struct mlx5_fpga_device *fdev) return 0; } +static int mlx5_fpga_event(struct mlx5_fpga_device *, unsigned long, void *); + +static int fpga_err_event(struct notifier_block *nb, unsigned long event, void *eqe) +{ + struct mlx5_fpga_device *fdev = mlx5_nb_cof(nb, struct mlx5_fpga_device, fpga_err_nb); + + return mlx5_fpga_event(fdev, event, eqe); +} + +static int fpga_qp_err_event(struct notifier_block *nb, unsigned long event, void *eqe) +{ + struct mlx5_fpga_device *fdev = mlx5_nb_cof(nb, struct mlx5_fpga_device, fpga_qp_err_nb); + + return mlx5_fpga_event(fdev, event, eqe); +} + int mlx5_fpga_device_start(struct mlx5_core_dev *mdev) { struct mlx5_fpga_device *fdev = mdev->fpga; @@ -185,6 +202,11 @@ int mlx5_fpga_device_start(struct mlx5_core_dev *mdev) if (err) goto out; + MLX5_NB_INIT(&fdev->fpga_err_nb, fpga_err_event, FPGA_ERROR); + MLX5_NB_INIT(&fdev->fpga_qp_err_nb, fpga_qp_err_event, FPGA_QP_ERROR); + mlx5_eq_notifier_register(fdev->mdev, &fdev->fpga_err_nb); + mlx5_eq_notifier_register(fdev->mdev, &fdev->fpga_qp_err_nb); + err = mlx5_fpga_conn_device_init(fdev); if (err) goto err_rsvd_gid; @@ -201,6 +223,8 @@ err_conn_init: mlx5_fpga_conn_device_cleanup(fdev); err_rsvd_gid: + mlx5_eq_notifier_unregister(fdev->mdev, &fdev->fpga_err_nb); + mlx5_eq_notifier_unregister(fdev->mdev, &fdev->fpga_qp_err_nb); mlx5_core_unreserve_gids(mdev, max_num_qps); out: spin_lock_irqsave(&fdev->state_lock, flags); @@ -256,6 +280,9 @@ void mlx5_fpga_device_stop(struct mlx5_core_dev *mdev) } mlx5_fpga_conn_device_cleanup(fdev); + mlx5_eq_notifier_unregister(fdev->mdev, &fdev->fpga_err_nb); + mlx5_eq_notifier_unregister(fdev->mdev, &fdev->fpga_qp_err_nb); + max_num_qps = MLX5_CAP_FPGA(mdev, shell_caps.max_num_qps); mlx5_core_unreserve_gids(mdev, max_num_qps); } @@ -283,9 +310,10 @@ static const char *mlx5_fpga_qp_syndrome_to_string(u8 syndrome) return "Unknown"; } -void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data) +static int mlx5_fpga_event(struct mlx5_fpga_device *fdev, + unsigned long event, void *eqe) { - struct mlx5_fpga_device *fdev = mdev->fpga; + void *data = ((struct mlx5_eqe *)eqe)->data.raw; const char *event_name; bool teardown = false; unsigned long flags; @@ -303,9 +331,7 @@ void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data) fpga_qpn = MLX5_GET(fpga_qp_error_event, data, fpga_qpn); break; default: - mlx5_fpga_warn_ratelimited(fdev, "Unexpected event %u\n", - event); - return; + return NOTIFY_DONE; } spin_lock_irqsave(&fdev->state_lock, flags); @@ -326,4 +352,6 @@ void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data) */ if (teardown) mlx5_trigger_health_work(fdev->mdev); + + return NOTIFY_OK; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h index 3e2355c8df3f..7e2e871dbf83 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fpga/core.h @@ -35,11 +35,16 @@ #ifdef CONFIG_MLX5_FPGA +#include <linux/mlx5/eq.h> + +#include "lib/eq.h" #include "fpga/cmd.h" /* Represents an Innova device */ struct mlx5_fpga_device { struct mlx5_core_dev *mdev; + struct mlx5_nb fpga_err_nb; + struct mlx5_nb fpga_qp_err_nb; spinlock_t state_lock; /* Protects state transitions */ enum mlx5_fpga_status state; enum mlx5_fpga_image last_admin_image; @@ -82,7 +87,6 @@ int mlx5_fpga_init(struct mlx5_core_dev *mdev); void mlx5_fpga_cleanup(struct mlx5_core_dev *mdev); int mlx5_fpga_device_start(struct mlx5_core_dev *mdev); void mlx5_fpga_device_stop(struct mlx5_core_dev *mdev); -void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, void *data); #else @@ -104,11 +108,6 @@ static inline void mlx5_fpga_device_stop(struct mlx5_core_dev *mdev) { } -static inline void mlx5_fpga_event(struct mlx5_core_dev *mdev, u8 event, - void *data) -{ -} - #endif #endif /* __MLX5_FPGA_CORE_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index 08a891f9aade..c44ccb67c4a3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -308,22 +308,68 @@ static int mlx5_cmd_destroy_flow_group(struct mlx5_core_dev *dev, return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } +static int mlx5_set_extended_dest(struct mlx5_core_dev *dev, + struct fs_fte *fte, bool *extended_dest) +{ + int fw_log_max_fdb_encap_uplink = + MLX5_CAP_ESW(dev, log_max_fdb_encap_uplink); + int num_fwd_destinations = 0; + struct mlx5_flow_rule *dst; + int num_encap = 0; + + *extended_dest = false; + if (!(fte->action.action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST)) + return 0; + + list_for_each_entry(dst, &fte->node.children, node.list) { + if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_COUNTER) + continue; + if (dst->dest_attr.type == MLX5_FLOW_DESTINATION_TYPE_VPORT && + dst->dest_attr.vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) + num_encap++; + num_fwd_destinations++; + } + if (num_fwd_destinations > 1 && num_encap > 0) + *extended_dest = true; + + if (*extended_dest && !fw_log_max_fdb_encap_uplink) { + mlx5_core_warn(dev, "FW does not support extended destination"); + return -EOPNOTSUPP; + } + if (num_encap > (1 << fw_log_max_fdb_encap_uplink)) { + mlx5_core_warn(dev, "FW does not support more than %d encaps", + 1 << fw_log_max_fdb_encap_uplink); + return -EOPNOTSUPP; + } + + return 0; +} static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, int opmod, int modify_mask, struct mlx5_flow_table *ft, unsigned group_id, struct fs_fte *fte) { - unsigned int inlen = MLX5_ST_SZ_BYTES(set_fte_in) + - fte->dests_size * MLX5_ST_SZ_BYTES(dest_format_struct); u32 out[MLX5_ST_SZ_DW(set_fte_out)] = {0}; + bool extended_dest = false; struct mlx5_flow_rule *dst; void *in_flow_context, *vlan; void *in_match_value; + unsigned int inlen; + int dst_cnt_size; void *in_dests; u32 *in; int err; + if (mlx5_set_extended_dest(dev, fte, &extended_dest)) + return -EOPNOTSUPP; + + if (!extended_dest) + dst_cnt_size = MLX5_ST_SZ_BYTES(dest_format_struct); + else + dst_cnt_size = MLX5_ST_SZ_BYTES(extended_dest_format); + + inlen = MLX5_ST_SZ_BYTES(set_fte_in) + fte->dests_size * dst_cnt_size; in = kvzalloc(inlen, GFP_KERNEL); if (!in) return -ENOMEM; @@ -343,9 +389,20 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, MLX5_SET(flow_context, in_flow_context, group_id, group_id); MLX5_SET(flow_context, in_flow_context, flow_tag, fte->action.flow_tag); - MLX5_SET(flow_context, in_flow_context, action, fte->action.action); - MLX5_SET(flow_context, in_flow_context, packet_reformat_id, - fte->action.reformat_id); + MLX5_SET(flow_context, in_flow_context, extended_destination, + extended_dest); + if (extended_dest) { + u32 action; + + action = fte->action.action & + ~MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT; + MLX5_SET(flow_context, in_flow_context, action, action); + } else { + MLX5_SET(flow_context, in_flow_context, action, + fte->action.action); + MLX5_SET(flow_context, in_flow_context, packet_reformat_id, + fte->action.reformat_id); + } MLX5_SET(flow_context, in_flow_context, modify_header_id, fte->action.modify_id); @@ -387,10 +444,20 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, id = dst->dest_attr.vport.num; MLX5_SET(dest_format_struct, in_dests, destination_eswitch_owner_vhca_id_valid, - dst->dest_attr.vport.vhca_id_valid); + !!(dst->dest_attr.vport.flags & + MLX5_FLOW_DEST_VPORT_VHCA_ID)); MLX5_SET(dest_format_struct, in_dests, destination_eswitch_owner_vhca_id, dst->dest_attr.vport.vhca_id); + if (extended_dest) { + MLX5_SET(dest_format_struct, in_dests, + packet_reformat, + !!(dst->dest_attr.vport.flags & + MLX5_FLOW_DEST_VPORT_REFORMAT_ID)); + MLX5_SET(extended_dest_format, in_dests, + packet_reformat_id, + dst->dest_attr.vport.reformat_id); + } break; default: id = dst->dest_attr.tir_num; @@ -399,7 +466,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, MLX5_SET(dest_format_struct, in_dests, destination_type, type); MLX5_SET(dest_format_struct, in_dests, destination_id, id); - in_dests += MLX5_ST_SZ_BYTES(dest_format_struct); + in_dests += dst_cnt_size; list_size++; } @@ -420,7 +487,7 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev, MLX5_SET(flow_counter_list, in_dests, flow_counter_id, dst->dest_attr.counter_id); - in_dests += MLX5_ST_SZ_BYTES(dest_format_struct); + in_dests += dst_cnt_size; list_size++; } if (list_size > max_list_size) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 08233cf44871..79f122b45def 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -1373,7 +1373,10 @@ static bool mlx5_flow_dests_cmp(struct mlx5_flow_destination *d1, { if (d1->type == d2->type) { if ((d1->type == MLX5_FLOW_DESTINATION_TYPE_VPORT && - d1->vport.num == d2->vport.num) || + d1->vport.num == d2->vport.num && + d1->vport.flags == d2->vport.flags && + ((d1->vport.flags & MLX5_FLOW_DEST_VPORT_REFORMAT_ID) ? + (d1->vport.reformat_id == d2->vport.reformat_id) : true)) || (d1->type == MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE && d1->ft == d2->ft) || (d1->type == MLX5_FLOW_DESTINATION_TYPE_TIR && diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h index b51ad217da32..2dc86347af58 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.h @@ -145,29 +145,6 @@ struct mlx5_flow_table { struct rhltable fgs_hash; }; -struct mlx5_fc_cache { - u64 packets; - u64 bytes; - u64 lastuse; -}; - -struct mlx5_fc { - struct list_head list; - struct llist_node addlist; - struct llist_node dellist; - - /* last{packets,bytes} members are used when calculating the delta since - * last reading - */ - u64 lastpackets; - u64 lastbytes; - - u32 id; - bool aging; - - struct mlx5_fc_cache cache ____cacheline_aligned_in_smp; -}; - struct mlx5_ft_underlay_qp { struct list_head list; u32 qpn; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c index 32accd6b041b..c6c28f56aa29 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_counters.c @@ -41,6 +41,29 @@ /* Max number of counters to query in bulk read is 32K */ #define MLX5_SW_MAX_COUNTERS_BULK BIT(15) +struct mlx5_fc_cache { + u64 packets; + u64 bytes; + u64 lastuse; +}; + +struct mlx5_fc { + struct list_head list; + struct llist_node addlist; + struct llist_node dellist; + + /* last{packets,bytes} members are used when calculating the delta since + * last reading + */ + u64 lastpackets; + u64 lastbytes; + + u32 id; + bool aging; + + struct mlx5_fc_cache cache ____cacheline_aligned_in_smp; +}; + /* locking scheme: * * It is the responsibility of the user to prevent concurrent calls or bad diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index 43118de8ee99..196c07383082 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c @@ -38,6 +38,8 @@ #include <linux/mlx5/driver.h> #include <linux/mlx5/cmd.h> #include "mlx5_core.h" +#include "lib/eq.h" +#include "lib/mlx5.h" enum { MLX5_HEALTH_POLL_INTERVAL = 2 * HZ, @@ -78,29 +80,6 @@ void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state) &dev->iseg->cmdq_addr_l_sz); } -static void trigger_cmd_completions(struct mlx5_core_dev *dev) -{ - unsigned long flags; - u64 vector; - - /* wait for pending handlers to complete */ - synchronize_irq(pci_irq_vector(dev->pdev, MLX5_EQ_VEC_CMD)); - spin_lock_irqsave(&dev->cmd.alloc_lock, flags); - vector = ~dev->cmd.bitmask & ((1ul << (1 << dev->cmd.log_sz)) - 1); - if (!vector) - goto no_trig; - - vector |= MLX5_TRIGGERED_CMD_COMP; - spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); - - mlx5_core_dbg(dev, "vector 0x%llx\n", vector); - mlx5_cmd_comp_handler(dev, vector, true); - return; - -no_trig: - spin_unlock_irqrestore(&dev->cmd.alloc_lock, flags); -} - static int in_fatal(struct mlx5_core_dev *dev) { struct mlx5_core_health *health = &dev->priv.health; @@ -124,10 +103,10 @@ void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force) mlx5_core_err(dev, "start\n"); if (pci_channel_offline(dev->pdev) || in_fatal(dev) || force) { dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR; - trigger_cmd_completions(dev); + mlx5_cmd_trigger_completions(dev); } - mlx5_core_event(dev, MLX5_DEV_EVENT_SYS_ERROR, 1); + mlx5_notifier_call_chain(dev->priv.events, MLX5_DEV_EVENT_SYS_ERROR, (void *)1); mlx5_core_err(dev, "end\n"); unlock: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index 11dabd62e2c7..bfc0f6581729 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -87,7 +87,7 @@ int mlx5i_init(struct mlx5_core_dev *mdev, mlx5_query_port_max_mtu(mdev, &max_mtu, 1); netdev->mtu = max_mtu; - mlx5e_build_nic_params(mdev, &priv->channels.params, + mlx5e_build_nic_params(mdev, &priv->rss_params, &priv->channels.params, mlx5e_get_netdev_max_channels(netdev), netdev->mtu); mlx5i_build_nic_params(mdev, &priv->channels.params); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c index 582b2f18010a..3a6baed722d8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c @@ -34,11 +34,15 @@ #include <linux/mlx5/driver.h> #include <linux/mlx5/vport.h> #include "mlx5_core.h" +#include "eswitch.h" enum { - MLX5_LAG_FLAG_BONDED = 1 << 0, + MLX5_LAG_FLAG_ROCE = 1 << 0, + MLX5_LAG_FLAG_SRIOV = 1 << 1, }; +#define MLX5_LAG_MODE_FLAGS (MLX5_LAG_FLAG_ROCE | MLX5_LAG_FLAG_SRIOV) + struct lag_func { struct mlx5_core_dev *dev; struct net_device *netdev; @@ -61,11 +65,6 @@ struct mlx5_lag { struct lag_tracker tracker; struct delayed_work bond_work; struct notifier_block nb; - - /* Admin state. Allow lag only if allowed is true - * even if network conditions for lag were met - */ - bool allowed; }; /* General purpose, use for short periods of time. @@ -165,9 +164,19 @@ static int mlx5_lag_dev_get_netdev_idx(struct mlx5_lag *ldev, return -1; } -static bool mlx5_lag_is_bonded(struct mlx5_lag *ldev) +static bool __mlx5_lag_is_roce(struct mlx5_lag *ldev) +{ + return !!(ldev->flags & MLX5_LAG_FLAG_ROCE); +} + +static bool __mlx5_lag_is_sriov(struct mlx5_lag *ldev) +{ + return !!(ldev->flags & MLX5_LAG_FLAG_SRIOV); +} + +static bool __mlx5_lag_is_active(struct mlx5_lag *ldev) { - return !!(ldev->flags & MLX5_LAG_FLAG_BONDED); + return !!(ldev->flags & MLX5_LAG_MODE_FLAGS); } static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker, @@ -186,36 +195,131 @@ static void mlx5_infer_tx_affinity_mapping(struct lag_tracker *tracker, *port2 = 1; } -static void mlx5_activate_lag(struct mlx5_lag *ldev, - struct lag_tracker *tracker) +static void mlx5_modify_lag(struct mlx5_lag *ldev, + struct lag_tracker *tracker) { struct mlx5_core_dev *dev0 = ldev->pf[0].dev; + u8 v2p_port1, v2p_port2; int err; - ldev->flags |= MLX5_LAG_FLAG_BONDED; + mlx5_infer_tx_affinity_mapping(tracker, &v2p_port1, + &v2p_port2); + + if (v2p_port1 != ldev->v2p_map[0] || + v2p_port2 != ldev->v2p_map[1]) { + ldev->v2p_map[0] = v2p_port1; + ldev->v2p_map[1] = v2p_port2; + + mlx5_core_info(dev0, "modify lag map port 1:%d port 2:%d", + ldev->v2p_map[0], ldev->v2p_map[1]); + + err = mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2); + if (err) + mlx5_core_err(dev0, + "Failed to modify LAG (%d)\n", + err); + } +} + +static int mlx5_create_lag(struct mlx5_lag *ldev, + struct lag_tracker *tracker) +{ + struct mlx5_core_dev *dev0 = ldev->pf[0].dev; + int err; mlx5_infer_tx_affinity_mapping(tracker, &ldev->v2p_map[0], &ldev->v2p_map[1]); + mlx5_core_info(dev0, "lag map port 1:%d port 2:%d", + ldev->v2p_map[0], ldev->v2p_map[1]); + err = mlx5_cmd_create_lag(dev0, ldev->v2p_map[0], ldev->v2p_map[1]); if (err) mlx5_core_err(dev0, "Failed to create LAG (%d)\n", err); + return err; +} + +static int mlx5_activate_lag(struct mlx5_lag *ldev, + struct lag_tracker *tracker, + u8 flags) +{ + bool roce_lag = !!(flags & MLX5_LAG_FLAG_ROCE); + struct mlx5_core_dev *dev0 = ldev->pf[0].dev; + int err; + + err = mlx5_create_lag(ldev, tracker); + if (err) { + if (roce_lag) { + mlx5_core_err(dev0, + "Failed to activate RoCE LAG\n"); + } else { + mlx5_core_err(dev0, + "Failed to activate VF LAG\n" + "Make sure all VFs are unbound prior to VF LAG activation or deactivation\n"); + } + return err; + } + + ldev->flags |= flags; + return 0; } -static void mlx5_deactivate_lag(struct mlx5_lag *ldev) +static int mlx5_deactivate_lag(struct mlx5_lag *ldev) { struct mlx5_core_dev *dev0 = ldev->pf[0].dev; + bool roce_lag = __mlx5_lag_is_roce(ldev); int err; - ldev->flags &= ~MLX5_LAG_FLAG_BONDED; + ldev->flags &= ~MLX5_LAG_MODE_FLAGS; err = mlx5_cmd_destroy_lag(dev0); - if (err) - mlx5_core_err(dev0, - "Failed to destroy LAG (%d)\n", - err); + if (err) { + if (roce_lag) { + mlx5_core_err(dev0, + "Failed to deactivate RoCE LAG; driver restart required\n"); + } else { + mlx5_core_err(dev0, + "Failed to deactivate VF LAG; driver restart required\n" + "Make sure all VFs are unbound prior to VF LAG activation or deactivation\n"); + } + } + + return err; +} + +static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev) +{ + if (!ldev->pf[0].dev || !ldev->pf[1].dev) + return false; + +#ifdef CONFIG_MLX5_ESWITCH + return mlx5_esw_lag_prereq(ldev->pf[0].dev, ldev->pf[1].dev); +#else + return (!mlx5_sriov_is_enabled(ldev->pf[0].dev) && + !mlx5_sriov_is_enabled(ldev->pf[1].dev)); +#endif +} + +static void mlx5_lag_add_ib_devices(struct mlx5_lag *ldev) +{ + int i; + + for (i = 0; i < MLX5_MAX_PORTS; i++) + if (ldev->pf[i].dev) + mlx5_add_dev_by_protocol(ldev->pf[i].dev, + MLX5_INTERFACE_PROTOCOL_IB); +} + +static void mlx5_lag_remove_ib_devices(struct mlx5_lag *ldev) +{ + int i; + + for (i = 0; i < MLX5_MAX_PORTS; i++) + if (ldev->pf[i].dev) + mlx5_remove_dev_by_protocol(ldev->pf[i].dev, + MLX5_INTERFACE_PROTOCOL_IB); } static void mlx5_do_bond(struct mlx5_lag *ldev) @@ -223,9 +327,8 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) struct mlx5_core_dev *dev0 = ldev->pf[0].dev; struct mlx5_core_dev *dev1 = ldev->pf[1].dev; struct lag_tracker tracker; - u8 v2p_port1, v2p_port2; - int i, err; - bool do_bond; + bool do_bond, roce_lag; + int err; if (!dev0 || !dev1) return; @@ -234,42 +337,45 @@ static void mlx5_do_bond(struct mlx5_lag *ldev) tracker = ldev->tracker; mutex_unlock(&lag_mutex); - do_bond = tracker.is_bonded && ldev->allowed; + do_bond = tracker.is_bonded && mlx5_lag_check_prereq(ldev); - if (do_bond && !mlx5_lag_is_bonded(ldev)) { - for (i = 0; i < MLX5_MAX_PORTS; i++) - mlx5_remove_dev_by_protocol(ldev->pf[i].dev, - MLX5_INTERFACE_PROTOCOL_IB); + if (do_bond && !__mlx5_lag_is_active(ldev)) { + roce_lag = !mlx5_sriov_is_enabled(dev0) && + !mlx5_sriov_is_enabled(dev1); - mlx5_activate_lag(ldev, &tracker); + if (roce_lag) + mlx5_lag_remove_ib_devices(ldev); - mlx5_add_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB); - mlx5_nic_vport_enable_roce(dev1); - } else if (do_bond && mlx5_lag_is_bonded(ldev)) { - mlx5_infer_tx_affinity_mapping(&tracker, &v2p_port1, - &v2p_port2); + err = mlx5_activate_lag(ldev, &tracker, + roce_lag ? MLX5_LAG_FLAG_ROCE : + MLX5_LAG_FLAG_SRIOV); + if (err) { + if (roce_lag) + mlx5_lag_add_ib_devices(ldev); - if ((v2p_port1 != ldev->v2p_map[0]) || - (v2p_port2 != ldev->v2p_map[1])) { - ldev->v2p_map[0] = v2p_port1; - ldev->v2p_map[1] = v2p_port2; + return; + } - err = mlx5_cmd_modify_lag(dev0, v2p_port1, v2p_port2); - if (err) - mlx5_core_err(dev0, - "Failed to modify LAG (%d)\n", - err); + if (roce_lag) { + mlx5_add_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB); + mlx5_nic_vport_enable_roce(dev1); + } + } else if (do_bond && __mlx5_lag_is_active(ldev)) { + mlx5_modify_lag(ldev, &tracker); + } else if (!do_bond && __mlx5_lag_is_active(ldev)) { + roce_lag = __mlx5_lag_is_roce(ldev); + + if (roce_lag) { + mlx5_remove_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB); + mlx5_nic_vport_disable_roce(dev1); } - } else if (!do_bond && mlx5_lag_is_bonded(ldev)) { - mlx5_remove_dev_by_protocol(dev0, MLX5_INTERFACE_PROTOCOL_IB); - mlx5_nic_vport_disable_roce(dev1); - mlx5_deactivate_lag(ldev); + err = mlx5_deactivate_lag(ldev); + if (err) + return; - for (i = 0; i < MLX5_MAX_PORTS; i++) - if (ldev->pf[i].dev) - mlx5_add_dev_by_protocol(ldev->pf[i].dev, - MLX5_INTERFACE_PROTOCOL_IB); + if (roce_lag) + mlx5_lag_add_ib_devices(ldev); } } @@ -419,15 +525,6 @@ static int mlx5_lag_netdev_event(struct notifier_block *this, return NOTIFY_DONE; } -static bool mlx5_lag_check_prereq(struct mlx5_lag *ldev) -{ - if ((ldev->pf[0].dev && mlx5_sriov_is_enabled(ldev->pf[0].dev)) || - (ldev->pf[1].dev && mlx5_sriov_is_enabled(ldev->pf[1].dev))) - return false; - else - return true; -} - static struct mlx5_lag *mlx5_lag_dev_alloc(void) { struct mlx5_lag *ldev; @@ -437,7 +534,6 @@ static struct mlx5_lag *mlx5_lag_dev_alloc(void) return NULL; INIT_DELAYED_WORK(&ldev->bond_work, mlx5_do_bond_work); - ldev->allowed = mlx5_lag_check_prereq(ldev); return ldev; } @@ -462,7 +558,6 @@ static void mlx5_lag_dev_add_pf(struct mlx5_lag *ldev, ldev->tracker.netdev_state[fn].link_up = 0; ldev->tracker.netdev_state[fn].tx_enabled = 0; - ldev->allowed = mlx5_lag_check_prereq(ldev); dev->priv.lag = ldev; mutex_unlock(&lag_mutex); @@ -484,7 +579,6 @@ static void mlx5_lag_dev_remove_pf(struct mlx5_lag *ldev, memset(&ldev->pf[i], 0, sizeof(*ldev->pf)); dev->priv.lag = NULL; - ldev->allowed = mlx5_lag_check_prereq(ldev); mutex_unlock(&lag_mutex); } @@ -532,7 +626,7 @@ void mlx5_lag_remove(struct mlx5_core_dev *dev) if (!ldev) return; - if (mlx5_lag_is_bonded(ldev)) + if (__mlx5_lag_is_active(ldev)) mlx5_deactivate_lag(ldev); mlx5_lag_dev_remove_pf(ldev, dev); @@ -549,56 +643,61 @@ void mlx5_lag_remove(struct mlx5_core_dev *dev) } } -bool mlx5_lag_is_active(struct mlx5_core_dev *dev) +bool mlx5_lag_is_roce(struct mlx5_core_dev *dev) { struct mlx5_lag *ldev; bool res; mutex_lock(&lag_mutex); ldev = mlx5_lag_dev_get(dev); - res = ldev && mlx5_lag_is_bonded(ldev); + res = ldev && __mlx5_lag_is_roce(ldev); mutex_unlock(&lag_mutex); return res; } -EXPORT_SYMBOL(mlx5_lag_is_active); +EXPORT_SYMBOL(mlx5_lag_is_roce); -static int mlx5_lag_set_state(struct mlx5_core_dev *dev, bool allow) +bool mlx5_lag_is_active(struct mlx5_core_dev *dev) { struct mlx5_lag *ldev; - int ret = 0; - bool lag_active; - - mlx5_dev_list_lock(); + bool res; + mutex_lock(&lag_mutex); ldev = mlx5_lag_dev_get(dev); - if (!ldev) { - ret = -ENODEV; - goto unlock; - } - lag_active = mlx5_lag_is_bonded(ldev); - if (!mlx5_lag_check_prereq(ldev) && allow) { - ret = -EINVAL; - goto unlock; - } - if (ldev->allowed == allow) - goto unlock; - ldev->allowed = allow; - if ((lag_active && !allow) || allow) - mlx5_do_bond(ldev); -unlock: - mlx5_dev_list_unlock(); - return ret; + res = ldev && __mlx5_lag_is_active(ldev); + mutex_unlock(&lag_mutex); + + return res; } +EXPORT_SYMBOL(mlx5_lag_is_active); -int mlx5_lag_forbid(struct mlx5_core_dev *dev) +bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev) { - return mlx5_lag_set_state(dev, false); + struct mlx5_lag *ldev; + bool res; + + mutex_lock(&lag_mutex); + ldev = mlx5_lag_dev_get(dev); + res = ldev && __mlx5_lag_is_sriov(ldev); + mutex_unlock(&lag_mutex); + + return res; } +EXPORT_SYMBOL(mlx5_lag_is_sriov); -int mlx5_lag_allow(struct mlx5_core_dev *dev) +void mlx5_lag_update(struct mlx5_core_dev *dev) { - return mlx5_lag_set_state(dev, true); + struct mlx5_lag *ldev; + + mlx5_dev_list_lock(); + ldev = mlx5_lag_dev_get(dev); + if (!ldev) + goto unlock; + + mlx5_do_bond(ldev); + +unlock: + mlx5_dev_list_unlock(); } struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev) @@ -609,7 +708,7 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev) mutex_lock(&lag_mutex); ldev = mlx5_lag_dev_get(dev); - if (!(ldev && mlx5_lag_is_bonded(ldev))) + if (!(ldev && __mlx5_lag_is_roce(ldev))) goto unlock; if (ldev->tracker.tx_type == NETDEV_LAG_TX_TYPE_ACTIVEBACKUP) { @@ -638,7 +737,7 @@ bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv) return true; ldev = mlx5_lag_dev_get(dev); - if (!ldev || !mlx5_lag_is_bonded(ldev) || ldev->pf[0].dev == dev) + if (!ldev || !__mlx5_lag_is_roce(ldev) || ldev->pf[0].dev == dev) return true; /* If bonded, we do not add an IB device for PF1. */ @@ -665,7 +764,7 @@ int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, mutex_lock(&lag_mutex); ldev = mlx5_lag_dev_get(dev); - if (ldev && mlx5_lag_is_bonded(ldev)) { + if (ldev && __mlx5_lag_is_roce(ldev)) { num_ports = MLX5_MAX_PORTS; mdev[0] = ldev->pf[0].dev; mdev[1] = ldev->pf[1].dev; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index 0d90b1b4a3d3..ca0ee9916e9e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -33,6 +33,7 @@ #include <linux/clocksource.h> #include <linux/highmem.h> #include <rdma/mlx5-abi.h> +#include "lib/eq.h" #include "en.h" #include "clock.h" @@ -71,7 +72,7 @@ static u64 read_internal_timer(const struct cyclecounter *cc) struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, clock); - return mlx5_read_internal_timer(mdev) & cc->mask; + return mlx5_read_internal_timer(mdev, NULL) & cc->mask; } static void mlx5_update_clock_info_page(struct mlx5_core_dev *mdev) @@ -155,15 +156,19 @@ static int mlx5_ptp_settime(struct ptp_clock_info *ptp, return 0; } -static int mlx5_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts) +static int mlx5_ptp_gettimex(struct ptp_clock_info *ptp, struct timespec64 *ts, + struct ptp_system_timestamp *sts) { struct mlx5_clock *clock = container_of(ptp, struct mlx5_clock, ptp_info); - u64 ns; + struct mlx5_core_dev *mdev = container_of(clock, struct mlx5_core_dev, + clock); unsigned long flags; + u64 cycles, ns; write_seqlock_irqsave(&clock->lock, flags); - ns = timecounter_read(&clock->tc); + cycles = mlx5_read_internal_timer(mdev, sts); + ns = timecounter_cyc2time(&clock->tc, cycles); write_sequnlock_irqrestore(&clock->lock, flags); *ts = ns_to_timespec64(ns); @@ -306,7 +311,7 @@ static int mlx5_perout_configure(struct ptp_clock_info *ptp, ts.tv_sec = rq->perout.start.sec; ts.tv_nsec = rq->perout.start.nsec; ns = timespec64_to_ns(&ts); - cycles_now = mlx5_read_internal_timer(mdev); + cycles_now = mlx5_read_internal_timer(mdev, NULL); write_seqlock_irqsave(&clock->lock, flags); nsec_now = timecounter_cyc2time(&clock->tc, cycles_now); nsec_delta = ns - nsec_now; @@ -383,7 +388,7 @@ static const struct ptp_clock_info mlx5_ptp_clock_info = { .pps = 0, .adjfreq = mlx5_ptp_adjfreq, .adjtime = mlx5_ptp_adjtime, - .gettime64 = mlx5_ptp_gettime, + .gettimex64 = mlx5_ptp_gettimex, .settime64 = mlx5_ptp_settime, .enable = NULL, .verify = NULL, @@ -439,16 +444,17 @@ static void mlx5_get_pps_caps(struct mlx5_core_dev *mdev) clock->pps_info.pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode); } -void mlx5_pps_event(struct mlx5_core_dev *mdev, - struct mlx5_eqe *eqe) +static int mlx5_pps_event(struct notifier_block *nb, + unsigned long type, void *data) { - struct mlx5_clock *clock = &mdev->clock; + struct mlx5_clock *clock = mlx5_nb_cof(nb, struct mlx5_clock, pps_nb); + struct mlx5_core_dev *mdev = clock->mdev; struct ptp_clock_event ptp_event; - struct timespec64 ts; - u64 nsec_now, nsec_delta; u64 cycles_now, cycles_delta; + u64 nsec_now, nsec_delta, ns; + struct mlx5_eqe *eqe = data; int pin = eqe->data.pps.pin; - s64 ns; + struct timespec64 ts; unsigned long flags; switch (clock->ptp_info.pin_config[pin].func) { @@ -463,11 +469,12 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev, } else { ptp_event.type = PTP_CLOCK_EXTTS; } + /* TODOL clock->ptp can be NULL if ptp_clock_register failes */ ptp_clock_event(clock->ptp, &ptp_event); break; case PTP_PF_PEROUT: - mlx5_ptp_gettime(&clock->ptp_info, &ts); - cycles_now = mlx5_read_internal_timer(mdev); + mlx5_ptp_gettimex(&clock->ptp_info, &ts, NULL); + cycles_now = mlx5_read_internal_timer(mdev, NULL); ts.tv_sec += 1; ts.tv_nsec = 0; ns = timespec64_to_ns(&ts); @@ -481,8 +488,11 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev, write_sequnlock_irqrestore(&clock->lock, flags); break; default: - mlx5_core_err(mdev, " Unhandled event\n"); + mlx5_core_err(mdev, " Unhandled clock PPS event, func %d\n", + clock->ptp_info.pin_config[pin].func); } + + return NOTIFY_OK; } void mlx5_init_clock(struct mlx5_core_dev *mdev) @@ -511,14 +521,14 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev) ktime_to_ns(ktime_get_real())); /* Calculate period in seconds to call the overflow watchdog - to make - * sure counter is checked at least once every wrap around. + * sure counter is checked at least twice every wrap around. * The period is calculated as the minimum between max HW cycles count * (The clock source mask) and max amount of cycles that can be * multiplied by clock multiplier where the result doesn't exceed * 64bits. */ overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult); - overflow_cycles = min(overflow_cycles, clock->cycles.mask >> 1); + overflow_cycles = min(overflow_cycles, div_u64(clock->cycles.mask, 3)); ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles, frac, &frac); @@ -567,6 +577,9 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev) PTR_ERR(clock->ptp)); clock->ptp = NULL; } + + MLX5_NB_INIT(&clock->pps_nb, mlx5_pps_event, PPS_EVENT); + mlx5_eq_notifier_register(mdev, &clock->pps_nb); } void mlx5_cleanup_clock(struct mlx5_core_dev *mdev) @@ -576,6 +589,7 @@ void mlx5_cleanup_clock(struct mlx5_core_dev *mdev) if (!MLX5_CAP_GEN(mdev, device_frequency_khz)) return; + mlx5_eq_notifier_unregister(mdev, &clock->pps_nb); if (clock->ptp) { ptp_clock_unregister(clock->ptp); clock->ptp = NULL; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h index 263cb6e2aeee..31600924bdc3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.h @@ -36,7 +36,6 @@ #if IS_ENABLED(CONFIG_PTP_1588_CLOCK) void mlx5_init_clock(struct mlx5_core_dev *mdev); void mlx5_cleanup_clock(struct mlx5_core_dev *mdev); -void mlx5_pps_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe); static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev) { @@ -60,8 +59,6 @@ static inline ktime_t mlx5_timecounter_cyc2time(struct mlx5_clock *clock, #else static inline void mlx5_init_clock(struct mlx5_core_dev *mdev) {} static inline void mlx5_cleanup_clock(struct mlx5_core_dev *mdev) {} -static inline void mlx5_pps_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) {} - static inline int mlx5_clock_get_ptp_index(struct mlx5_core_dev *mdev) { return -1; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c new file mode 100644 index 000000000000..bced2efe9bef --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c @@ -0,0 +1,255 @@ +// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB +/* Copyright (c) 2018 Mellanox Technologies */ + +#include <linux/mlx5/vport.h> +#include "lib/devcom.h" + +static LIST_HEAD(devcom_list); + +#define devcom_for_each_component(priv, comp, iter) \ + for (iter = 0; \ + comp = &(priv)->components[iter], iter < MLX5_DEVCOM_NUM_COMPONENTS; \ + iter++) + +struct mlx5_devcom_component { + struct { + void *data; + } device[MLX5_MAX_PORTS]; + + mlx5_devcom_event_handler_t handler; + struct rw_semaphore sem; + bool paired; +}; + +struct mlx5_devcom_list { + struct list_head list; + + struct mlx5_devcom_component components[MLX5_DEVCOM_NUM_COMPONENTS]; + struct mlx5_core_dev *devs[MLX5_MAX_PORTS]; +}; + +struct mlx5_devcom { + struct mlx5_devcom_list *priv; + int idx; +}; + +static struct mlx5_devcom_list *mlx5_devcom_list_alloc(void) +{ + struct mlx5_devcom_component *comp; + struct mlx5_devcom_list *priv; + int i; + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) + return NULL; + + devcom_for_each_component(priv, comp, i) + init_rwsem(&comp->sem); + + return priv; +} + +static struct mlx5_devcom *mlx5_devcom_alloc(struct mlx5_devcom_list *priv, + u8 idx) +{ + struct mlx5_devcom *devcom; + + devcom = kzalloc(sizeof(*devcom), GFP_KERNEL); + if (!devcom) + return NULL; + + devcom->priv = priv; + devcom->idx = idx; + return devcom; +} + +/* Must be called with intf_mutex held */ +struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev) +{ + struct mlx5_devcom_list *priv = NULL, *iter; + struct mlx5_devcom *devcom = NULL; + bool new_priv = false; + u64 sguid0, sguid1; + int idx, i; + + if (!mlx5_core_is_pf(dev)) + return NULL; + + sguid0 = mlx5_query_nic_system_image_guid(dev); + list_for_each_entry(iter, &devcom_list, list) { + struct mlx5_core_dev *tmp_dev = NULL; + + idx = -1; + for (i = 0; i < MLX5_MAX_PORTS; i++) { + if (iter->devs[i]) + tmp_dev = iter->devs[i]; + else + idx = i; + } + + if (idx == -1) + continue; + + sguid1 = mlx5_query_nic_system_image_guid(tmp_dev); + if (sguid0 != sguid1) + continue; + + priv = iter; + break; + } + + if (!priv) { + priv = mlx5_devcom_list_alloc(); + if (!priv) + return ERR_PTR(-ENOMEM); + + idx = 0; + new_priv = true; + } + + priv->devs[idx] = dev; + devcom = mlx5_devcom_alloc(priv, idx); + if (!devcom) { + kfree(priv); + return ERR_PTR(-ENOMEM); + } + + if (new_priv) + list_add(&priv->list, &devcom_list); + + return devcom; +} + +/* Must be called with intf_mutex held */ +void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom) +{ + struct mlx5_devcom_list *priv; + int i; + + if (IS_ERR_OR_NULL(devcom)) + return; + + priv = devcom->priv; + priv->devs[devcom->idx] = NULL; + + kfree(devcom); + + for (i = 0; i < MLX5_MAX_PORTS; i++) + if (priv->devs[i]) + break; + + if (i != MLX5_MAX_PORTS) + return; + + list_del(&priv->list); + kfree(priv); +} + +void mlx5_devcom_register_component(struct mlx5_devcom *devcom, + enum mlx5_devcom_components id, + mlx5_devcom_event_handler_t handler, + void *data) +{ + struct mlx5_devcom_component *comp; + + if (IS_ERR_OR_NULL(devcom)) + return; + + WARN_ON(!data); + + comp = &devcom->priv->components[id]; + down_write(&comp->sem); + comp->handler = handler; + comp->device[devcom->idx].data = data; + up_write(&comp->sem); +} + +void mlx5_devcom_unregister_component(struct mlx5_devcom *devcom, + enum mlx5_devcom_components id) +{ + struct mlx5_devcom_component *comp; + + if (IS_ERR_OR_NULL(devcom)) + return; + + comp = &devcom->priv->components[id]; + down_write(&comp->sem); + comp->device[devcom->idx].data = NULL; + up_write(&comp->sem); +} + +int mlx5_devcom_send_event(struct mlx5_devcom *devcom, + enum mlx5_devcom_components id, + int event, + void *event_data) +{ + struct mlx5_devcom_component *comp; + int err = -ENODEV, i; + + if (IS_ERR_OR_NULL(devcom)) + return err; + + comp = &devcom->priv->components[id]; + down_write(&comp->sem); + for (i = 0; i < MLX5_MAX_PORTS; i++) + if (i != devcom->idx && comp->device[i].data) { + err = comp->handler(event, comp->device[i].data, + event_data); + break; + } + + up_write(&comp->sem); + return err; +} + +void mlx5_devcom_set_paired(struct mlx5_devcom *devcom, + enum mlx5_devcom_components id, + bool paired) +{ + struct mlx5_devcom_component *comp; + + comp = &devcom->priv->components[id]; + WARN_ON(!rwsem_is_locked(&comp->sem)); + + comp->paired = paired; +} + +bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom, + enum mlx5_devcom_components id) +{ + if (IS_ERR_OR_NULL(devcom)) + return false; + + return devcom->priv->components[id].paired; +} + +void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom, + enum mlx5_devcom_components id) +{ + struct mlx5_devcom_component *comp; + int i; + + if (IS_ERR_OR_NULL(devcom)) + return NULL; + + comp = &devcom->priv->components[id]; + down_read(&comp->sem); + if (!comp->paired) { + up_read(&comp->sem); + return NULL; + } + + for (i = 0; i < MLX5_MAX_PORTS; i++) + if (i != devcom->idx) + break; + + return comp->device[i].data; +} + +void mlx5_devcom_release_peer_data(struct mlx5_devcom *devcom, + enum mlx5_devcom_components id) +{ + struct mlx5_devcom_component *comp = &devcom->priv->components[id]; + + up_read(&comp->sem); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h new file mode 100644 index 000000000000..939d5bf1581b --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.h @@ -0,0 +1,46 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2018 Mellanox Technologies */ + +#ifndef __LIB_MLX5_DEVCOM_H__ +#define __LIB_MLX5_DEVCOM_H__ + +#include <linux/mlx5/driver.h> + +enum mlx5_devcom_components { + MLX5_DEVCOM_ESW_OFFLOADS, + + MLX5_DEVCOM_NUM_COMPONENTS, +}; + +typedef int (*mlx5_devcom_event_handler_t)(int event, + void *my_data, + void *event_data); + +struct mlx5_devcom *mlx5_devcom_register_device(struct mlx5_core_dev *dev); +void mlx5_devcom_unregister_device(struct mlx5_devcom *devcom); + +void mlx5_devcom_register_component(struct mlx5_devcom *devcom, + enum mlx5_devcom_components id, + mlx5_devcom_event_handler_t handler, + void *data); +void mlx5_devcom_unregister_component(struct mlx5_devcom *devcom, + enum mlx5_devcom_components id); + +int mlx5_devcom_send_event(struct mlx5_devcom *devcom, + enum mlx5_devcom_components id, + int event, + void *event_data); + +void mlx5_devcom_set_paired(struct mlx5_devcom *devcom, + enum mlx5_devcom_components id, + bool paired); +bool mlx5_devcom_is_paired(struct mlx5_devcom *devcom, + enum mlx5_devcom_components id); + +void *mlx5_devcom_get_peer_data(struct mlx5_devcom *devcom, + enum mlx5_devcom_components id); +void mlx5_devcom_release_peer_data(struct mlx5_devcom *devcom, + enum mlx5_devcom_components id); + +#endif + diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h new file mode 100644 index 000000000000..c0fb6d72b695 --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/eq.h @@ -0,0 +1,98 @@ +/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */ +/* Copyright (c) 2018 Mellanox Technologies */ + +#ifndef __LIB_MLX5_EQ_H__ +#define __LIB_MLX5_EQ_H__ +#include <linux/mlx5/driver.h> +#include <linux/mlx5/eq.h> +#include <linux/mlx5/cq.h> + +#define MLX5_MAX_IRQ_NAME (32) +#define MLX5_EQE_SIZE (sizeof(struct mlx5_eqe)) + +struct mlx5_eq_tasklet { + struct list_head list; + struct list_head process_list; + struct tasklet_struct task; + spinlock_t lock; /* lock completion tasklet list */ +}; + +struct mlx5_cq_table { + spinlock_t lock; /* protect radix tree */ + struct radix_tree_root tree; +}; + +struct mlx5_eq { + struct mlx5_core_dev *dev; + struct mlx5_cq_table cq_table; + __be32 __iomem *doorbell; + u32 cons_index; + struct mlx5_frag_buf buf; + int size; + unsigned int vecidx; + unsigned int irqn; + u8 eqn; + int nent; + struct mlx5_rsc_debug *dbg; +}; + +struct mlx5_eq_comp { + struct mlx5_eq core; /* Must be first */ + struct mlx5_eq_tasklet tasklet_ctx; + struct list_head list; +}; + +static inline struct mlx5_eqe *get_eqe(struct mlx5_eq *eq, u32 entry) +{ + return mlx5_buf_offset(&eq->buf, entry * MLX5_EQE_SIZE); +} + +static inline struct mlx5_eqe *next_eqe_sw(struct mlx5_eq *eq) +{ + struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1)); + + return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe; +} + +static inline void eq_update_ci(struct mlx5_eq *eq, int arm) +{ + __be32 __iomem *addr = eq->doorbell + (arm ? 0 : 2); + u32 val = (eq->cons_index & 0xffffff) | (eq->eqn << 24); + + __raw_writel((__force u32)cpu_to_be32(val), addr); + /* We still want ordering, just not swabbing, so add a barrier */ + mb(); +} + +int mlx5_eq_table_init(struct mlx5_core_dev *dev); +void mlx5_eq_table_cleanup(struct mlx5_core_dev *dev); +int mlx5_eq_table_create(struct mlx5_core_dev *dev); +void mlx5_eq_table_destroy(struct mlx5_core_dev *dev); + +int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq); +int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq); +struct mlx5_eq_comp *mlx5_eqn2comp_eq(struct mlx5_core_dev *dev, int eqn); +struct mlx5_eq *mlx5_get_async_eq(struct mlx5_core_dev *dev); +void mlx5_cq_tasklet_cb(unsigned long data); +struct cpumask *mlx5_eq_comp_cpumask(struct mlx5_core_dev *dev, int ix); + +u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq_comp *eq); +void mlx5_eq_synchronize_async_irq(struct mlx5_core_dev *dev); +void mlx5_eq_synchronize_cmd_irq(struct mlx5_core_dev *dev); + +int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); +void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); +int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev); +void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev); + +/* This function should only be called after mlx5_cmd_force_teardown_hca */ +void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev); + +#ifdef CONFIG_RFS_ACCEL +struct cpu_rmap *mlx5_eq_table_get_rmap(struct mlx5_core_dev *dev); +#endif + +int mlx5_eq_notifier_register(struct mlx5_core_dev *dev, struct mlx5_nb *nb); +int mlx5_eq_notifier_unregister(struct mlx5_core_dev *dev, struct mlx5_nb *nb); + +#endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h index 7550b1cc8c6a..397a2847867a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mlx5.h @@ -33,6 +33,8 @@ #ifndef __LIB_MLX5_H__ #define __LIB_MLX5_H__ +#include "mlx5_core.h" + void mlx5_init_reserved_gids(struct mlx5_core_dev *dev); void mlx5_cleanup_reserved_gids(struct mlx5_core_dev *dev); int mlx5_core_reserve_gids(struct mlx5_core_dev *dev, unsigned int count); @@ -40,4 +42,38 @@ void mlx5_core_unreserve_gids(struct mlx5_core_dev *dev, unsigned int count); int mlx5_core_reserved_gid_alloc(struct mlx5_core_dev *dev, int *gid_index); void mlx5_core_reserved_gid_free(struct mlx5_core_dev *dev, int gid_index); +/* TODO move to lib/events.h */ + +#define PORT_MODULE_EVENT_MODULE_STATUS_MASK 0xF +#define PORT_MODULE_EVENT_ERROR_TYPE_MASK 0xF + +enum port_module_event_status_type { + MLX5_MODULE_STATUS_PLUGGED = 0x1, + MLX5_MODULE_STATUS_UNPLUGGED = 0x2, + MLX5_MODULE_STATUS_ERROR = 0x3, + MLX5_MODULE_STATUS_DISABLED = 0x4, + MLX5_MODULE_STATUS_NUM, +}; + +enum port_module_event_error_type { + MLX5_MODULE_EVENT_ERROR_POWER_BUDGET_EXCEEDED = 0x0, + MLX5_MODULE_EVENT_ERROR_LONG_RANGE_FOR_NON_MLNX = 0x1, + MLX5_MODULE_EVENT_ERROR_BUS_STUCK = 0x2, + MLX5_MODULE_EVENT_ERROR_NO_EEPROM_RETRY_TIMEOUT = 0x3, + MLX5_MODULE_EVENT_ERROR_ENFORCE_PART_NUMBER_LIST = 0x4, + MLX5_MODULE_EVENT_ERROR_UNKNOWN_IDENTIFIER = 0x5, + MLX5_MODULE_EVENT_ERROR_HIGH_TEMPERATURE = 0x6, + MLX5_MODULE_EVENT_ERROR_BAD_CABLE = 0x7, + MLX5_MODULE_EVENT_ERROR_PCIE_POWER_SLOT_EXCEEDED = 0xc, + MLX5_MODULE_EVENT_ERROR_NUM, +}; + +struct mlx5_pme_stats { + u64 status_counters[MLX5_MODULE_STATUS_NUM]; + u64 error_counters[MLX5_MODULE_EVENT_ERROR_NUM]; +}; + +void mlx5_get_pme_stats(struct mlx5_core_dev *dev, struct mlx5_pme_stats *stats); +int mlx5_notifier_call_chain(struct mlx5_events *events, unsigned int event, void *data); + #endif diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 28132c7dc05f..77896c11f6f3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -43,7 +43,6 @@ #include <linux/mlx5/driver.h> #include <linux/mlx5/cq.h> #include <linux/mlx5/qp.h> -#include <linux/mlx5/srq.h> #include <linux/debugfs.h> #include <linux/kmod.h> #include <linux/mlx5/mlx5_ifc.h> @@ -53,6 +52,7 @@ #endif #include <net/devlink.h> #include "mlx5_core.h" +#include "lib/eq.h" #include "fs_core.h" #include "lib/mpfs.h" #include "eswitch.h" @@ -63,6 +63,7 @@ #include "accel/tls.h" #include "lib/clock.h" #include "lib/vxlan.h" +#include "lib/devcom.h" #include "diag/fw_tracer.h" MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); @@ -319,51 +320,6 @@ static void release_bar(struct pci_dev *pdev) pci_release_regions(pdev); } -static int mlx5_alloc_irq_vectors(struct mlx5_core_dev *dev) -{ - struct mlx5_priv *priv = &dev->priv; - struct mlx5_eq_table *table = &priv->eq_table; - int num_eqs = MLX5_CAP_GEN(dev, max_num_eqs) ? - MLX5_CAP_GEN(dev, max_num_eqs) : - 1 << MLX5_CAP_GEN(dev, log_max_eq); - int nvec; - int err; - - nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() + - MLX5_EQ_VEC_COMP_BASE; - nvec = min_t(int, nvec, num_eqs); - if (nvec <= MLX5_EQ_VEC_COMP_BASE) - return -ENOMEM; - - priv->irq_info = kcalloc(nvec, sizeof(*priv->irq_info), GFP_KERNEL); - if (!priv->irq_info) - return -ENOMEM; - - nvec = pci_alloc_irq_vectors(dev->pdev, - MLX5_EQ_VEC_COMP_BASE + 1, nvec, - PCI_IRQ_MSIX); - if (nvec < 0) { - err = nvec; - goto err_free_irq_info; - } - - table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE; - - return 0; - -err_free_irq_info: - kfree(priv->irq_info); - return err; -} - -static void mlx5_free_irq_vectors(struct mlx5_core_dev *dev) -{ - struct mlx5_priv *priv = &dev->priv; - - pci_free_irq_vectors(dev->pdev); - kfree(priv->irq_info); -} - struct mlx5_reg_host_endianness { u8 he; u8 rsvd[15]; @@ -624,188 +580,24 @@ int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id) return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); } -u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev) +u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev, + struct ptp_system_timestamp *sts) { u32 timer_h, timer_h1, timer_l; timer_h = ioread32be(&dev->iseg->internal_timer_h); + ptp_read_system_prets(sts); timer_l = ioread32be(&dev->iseg->internal_timer_l); + ptp_read_system_postts(sts); timer_h1 = ioread32be(&dev->iseg->internal_timer_h); - if (timer_h != timer_h1) /* wrap around */ + if (timer_h != timer_h1) { + /* wrap around */ + ptp_read_system_prets(sts); timer_l = ioread32be(&dev->iseg->internal_timer_l); - - return (u64)timer_l | (u64)timer_h1 << 32; -} - -static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i) -{ - struct mlx5_priv *priv = &mdev->priv; - int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i); - - if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) { - mlx5_core_warn(mdev, "zalloc_cpumask_var failed"); - return -ENOMEM; + ptp_read_system_postts(sts); } - cpumask_set_cpu(cpumask_local_spread(i, priv->numa_node), - priv->irq_info[i].mask); - - if (IS_ENABLED(CONFIG_SMP) && - irq_set_affinity_hint(irq, priv->irq_info[i].mask)) - mlx5_core_warn(mdev, "irq_set_affinity_hint failed, irq 0x%.4x", irq); - - return 0; -} - -static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i) -{ - struct mlx5_priv *priv = &mdev->priv; - int irq = pci_irq_vector(mdev->pdev, MLX5_EQ_VEC_COMP_BASE + i); - - irq_set_affinity_hint(irq, NULL); - free_cpumask_var(priv->irq_info[i].mask); -} - -static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev) -{ - int err; - int i; - - for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) { - err = mlx5_irq_set_affinity_hint(mdev, i); - if (err) - goto err_out; - } - - return 0; - -err_out: - for (i--; i >= 0; i--) - mlx5_irq_clear_affinity_hint(mdev, i); - - return err; -} - -static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev) -{ - int i; - - for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) - mlx5_irq_clear_affinity_hint(mdev, i); -} - -int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn, - unsigned int *irqn) -{ - struct mlx5_eq_table *table = &dev->priv.eq_table; - struct mlx5_eq *eq, *n; - int err = -ENOENT; - - spin_lock(&table->lock); - list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { - if (eq->index == vector) { - *eqn = eq->eqn; - *irqn = eq->irqn; - err = 0; - break; - } - } - spin_unlock(&table->lock); - - return err; -} -EXPORT_SYMBOL(mlx5_vector2eqn); - -struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn) -{ - struct mlx5_eq_table *table = &dev->priv.eq_table; - struct mlx5_eq *eq; - - spin_lock(&table->lock); - list_for_each_entry(eq, &table->comp_eqs_list, list) - if (eq->eqn == eqn) { - spin_unlock(&table->lock); - return eq; - } - - spin_unlock(&table->lock); - - return ERR_PTR(-ENOENT); -} - -static void free_comp_eqs(struct mlx5_core_dev *dev) -{ - struct mlx5_eq_table *table = &dev->priv.eq_table; - struct mlx5_eq *eq, *n; - -#ifdef CONFIG_RFS_ACCEL - if (dev->rmap) { - free_irq_cpu_rmap(dev->rmap); - dev->rmap = NULL; - } -#endif - spin_lock(&table->lock); - list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) { - list_del(&eq->list); - spin_unlock(&table->lock); - if (mlx5_destroy_unmap_eq(dev, eq)) - mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n", - eq->eqn); - kfree(eq); - spin_lock(&table->lock); - } - spin_unlock(&table->lock); -} - -static int alloc_comp_eqs(struct mlx5_core_dev *dev) -{ - struct mlx5_eq_table *table = &dev->priv.eq_table; - char name[MLX5_MAX_IRQ_NAME]; - struct mlx5_eq *eq; - int ncomp_vec; - int nent; - int err; - int i; - - INIT_LIST_HEAD(&table->comp_eqs_list); - ncomp_vec = table->num_comp_vectors; - nent = MLX5_COMP_EQ_SIZE; -#ifdef CONFIG_RFS_ACCEL - dev->rmap = alloc_irq_cpu_rmap(ncomp_vec); - if (!dev->rmap) - return -ENOMEM; -#endif - for (i = 0; i < ncomp_vec; i++) { - eq = kzalloc(sizeof(*eq), GFP_KERNEL); - if (!eq) { - err = -ENOMEM; - goto clean; - } - -#ifdef CONFIG_RFS_ACCEL - irq_cpu_rmap_add(dev->rmap, pci_irq_vector(dev->pdev, - MLX5_EQ_VEC_COMP_BASE + i)); -#endif - snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i); - err = mlx5_create_map_eq(dev, eq, - i + MLX5_EQ_VEC_COMP_BASE, nent, 0, - name, MLX5_EQ_TYPE_COMP); - if (err) { - kfree(eq); - goto clean; - } - mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn); - eq->index = i; - spin_lock(&table->lock); - list_add_tail(&eq->list, &table->comp_eqs_list); - spin_unlock(&table->lock); - } - - return 0; - -clean: - free_comp_eqs(dev); - return err; + return (u64)timer_l | (u64)timer_h1 << 32; } static int mlx5_core_set_issi(struct mlx5_core_dev *dev) @@ -938,28 +730,37 @@ static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) struct pci_dev *pdev = dev->pdev; int err; + priv->devcom = mlx5_devcom_register_device(dev); + if (IS_ERR(priv->devcom)) + dev_err(&pdev->dev, "failed to register with devcom (0x%p)\n", + priv->devcom); + err = mlx5_query_board_id(dev); if (err) { dev_err(&pdev->dev, "query board id failed\n"); - goto out; + goto err_devcom; } - err = mlx5_eq_init(dev); + err = mlx5_eq_table_init(dev); if (err) { dev_err(&pdev->dev, "failed to initialize eq\n"); - goto out; + goto err_devcom; + } + + err = mlx5_events_init(dev); + if (err) { + dev_err(&pdev->dev, "failed to initialize events\n"); + goto err_eq_cleanup; } err = mlx5_cq_debugfs_init(dev); if (err) { dev_err(&pdev->dev, "failed to initialize cq debugfs\n"); - goto err_eq_cleanup; + goto err_events_cleanup; } mlx5_init_qp_table(dev); - mlx5_init_srq_table(dev); - mlx5_init_mkey_table(dev); mlx5_init_reserved_gids(dev); @@ -1013,14 +814,15 @@ err_rl_cleanup: err_tables_cleanup: mlx5_vxlan_destroy(dev->vxlan); mlx5_cleanup_mkey_table(dev); - mlx5_cleanup_srq_table(dev); mlx5_cleanup_qp_table(dev); mlx5_cq_debugfs_cleanup(dev); - +err_events_cleanup: + mlx5_events_cleanup(dev); err_eq_cleanup: - mlx5_eq_cleanup(dev); + mlx5_eq_table_cleanup(dev); +err_devcom: + mlx5_devcom_unregister_device(dev->priv.devcom); -out: return err; } @@ -1036,10 +838,11 @@ static void mlx5_cleanup_once(struct mlx5_core_dev *dev) mlx5_cleanup_clock(dev); mlx5_cleanup_reserved_gids(dev); mlx5_cleanup_mkey_table(dev); - mlx5_cleanup_srq_table(dev); mlx5_cleanup_qp_table(dev); mlx5_cq_debugfs_cleanup(dev); - mlx5_eq_cleanup(dev); + mlx5_events_cleanup(dev); + mlx5_eq_table_cleanup(dev); + mlx5_devcom_unregister_device(dev->priv.devcom); } static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, @@ -1131,16 +934,10 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, goto reclaim_boot_pages; } - err = mlx5_pagealloc_start(dev); - if (err) { - dev_err(&pdev->dev, "mlx5_pagealloc_start failed\n"); - goto reclaim_boot_pages; - } - err = mlx5_cmd_init_hca(dev, sw_owner_id); if (err) { dev_err(&pdev->dev, "init hca failed\n"); - goto err_pagealloc_stop; + goto reclaim_boot_pages; } mlx5_set_driver_version(dev); @@ -1161,23 +958,20 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, } } - err = mlx5_alloc_irq_vectors(dev); - if (err) { - dev_err(&pdev->dev, "alloc irq vectors failed\n"); - goto err_cleanup_once; - } - dev->priv.uar = mlx5_get_uars_page(dev); if (IS_ERR(dev->priv.uar)) { dev_err(&pdev->dev, "Failed allocating uar, aborting\n"); err = PTR_ERR(dev->priv.uar); - goto err_disable_msix; + goto err_get_uars; } - err = mlx5_start_eqs(dev); + mlx5_events_start(dev); + mlx5_pagealloc_start(dev); + + err = mlx5_eq_table_create(dev); if (err) { - dev_err(&pdev->dev, "Failed to start pages and async EQs\n"); - goto err_put_uars; + dev_err(&pdev->dev, "Failed to create EQs\n"); + goto err_eq_table; } err = mlx5_fw_tracer_init(dev->tracer); @@ -1186,18 +980,6 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, goto err_fw_tracer; } - err = alloc_comp_eqs(dev); - if (err) { - dev_err(&pdev->dev, "Failed to alloc completion EQs\n"); - goto err_comp_eqs; - } - - err = mlx5_irq_set_affinity_hints(dev); - if (err) { - dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n"); - goto err_affinity_hints; - } - err = mlx5_fpga_device_start(dev); if (err) { dev_err(&pdev->dev, "fpga device start failed %d\n", err); @@ -1266,24 +1048,17 @@ err_ipsec_start: mlx5_fpga_device_stop(dev); err_fpga_start: - mlx5_irq_clear_affinity_hints(dev); - -err_affinity_hints: - free_comp_eqs(dev); - -err_comp_eqs: mlx5_fw_tracer_cleanup(dev->tracer); err_fw_tracer: - mlx5_stop_eqs(dev); + mlx5_eq_table_destroy(dev); -err_put_uars: +err_eq_table: + mlx5_pagealloc_stop(dev); + mlx5_events_stop(dev); mlx5_put_uars_page(dev, priv->uar); -err_disable_msix: - mlx5_free_irq_vectors(dev); - -err_cleanup_once: +err_get_uars: if (boot) mlx5_cleanup_once(dev); @@ -1294,9 +1069,6 @@ err_stop_poll: goto out_err; } -err_pagealloc_stop: - mlx5_pagealloc_stop(dev); - reclaim_boot_pages: mlx5_reclaim_startup_pages(dev); @@ -1340,21 +1112,20 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, mlx5_accel_ipsec_cleanup(dev); mlx5_accel_tls_cleanup(dev); mlx5_fpga_device_stop(dev); - mlx5_irq_clear_affinity_hints(dev); - free_comp_eqs(dev); mlx5_fw_tracer_cleanup(dev->tracer); - mlx5_stop_eqs(dev); + mlx5_eq_table_destroy(dev); + mlx5_pagealloc_stop(dev); + mlx5_events_stop(dev); mlx5_put_uars_page(dev, priv->uar); - mlx5_free_irq_vectors(dev); if (cleanup) mlx5_cleanup_once(dev); mlx5_stop_health_poll(dev, cleanup); + err = mlx5_cmd_teardown_hca(dev); if (err) { dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n"); goto out; } - mlx5_pagealloc_stop(dev); mlx5_reclaim_startup_pages(dev); mlx5_core_disable_hca(dev, 0); mlx5_cmd_cleanup(dev); @@ -1364,12 +1135,6 @@ out: return err; } -struct mlx5_core_event_handler { - void (*event)(struct mlx5_core_dev *dev, - enum mlx5_dev_event event, - void *data); -}; - static const struct devlink_ops mlx5_devlink_ops = { #ifdef CONFIG_MLX5_ESWITCH .eswitch_mode_set = mlx5_devlink_eswitch_mode_set, @@ -1403,7 +1168,6 @@ static int init_one(struct pci_dev *pdev, pci_set_drvdata(pdev, dev); dev->pdev = pdev; - dev->event = mlx5_core_event; dev->profile = &profile[prof_sel]; INIT_LIST_HEAD(&priv->ctx_list); @@ -1411,17 +1175,6 @@ static int init_one(struct pci_dev *pdev, mutex_init(&dev->pci_status_mutex); mutex_init(&dev->intf_state_mutex); - INIT_LIST_HEAD(&priv->waiting_events_list); - priv->is_accum_events = false; - -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - err = init_srcu_struct(&priv->pfault_srcu); - if (err) { - dev_err(&pdev->dev, "init_srcu_struct failed with error code %d\n", - err); - goto clean_dev; - } -#endif mutex_init(&priv->bfregs.reg_head.lock); mutex_init(&priv->bfregs.wc_head.lock); INIT_LIST_HEAD(&priv->bfregs.reg_head.list); @@ -1430,7 +1183,7 @@ static int init_one(struct pci_dev *pdev, err = mlx5_pci_init(dev, priv); if (err) { dev_err(&pdev->dev, "mlx5_pci_init failed with error code %d\n", err); - goto clean_srcu; + goto clean_dev; } err = mlx5_health_init(dev); @@ -1439,12 +1192,14 @@ static int init_one(struct pci_dev *pdev, goto close_pci; } - mlx5_pagealloc_init(dev); + err = mlx5_pagealloc_init(dev); + if (err) + goto err_pagealloc_init; err = mlx5_load_one(dev, priv, true); if (err) { dev_err(&pdev->dev, "mlx5_load_one failed with error code %d\n", err); - goto clean_health; + goto err_load_one; } request_module_nowait(MLX5_IB_MOD); @@ -1458,16 +1213,13 @@ static int init_one(struct pci_dev *pdev, clean_load: mlx5_unload_one(dev, priv, true); -clean_health: +err_load_one: mlx5_pagealloc_cleanup(dev); +err_pagealloc_init: mlx5_health_cleanup(dev); close_pci: mlx5_pci_close(dev, priv); -clean_srcu: -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - cleanup_srcu_struct(&priv->pfault_srcu); clean_dev: -#endif devlink_free(devlink); return err; @@ -1491,9 +1243,6 @@ static void remove_one(struct pci_dev *pdev) mlx5_pagealloc_cleanup(dev); mlx5_health_cleanup(dev); mlx5_pci_close(dev, priv); -#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING - cleanup_srcu_struct(&priv->pfault_srcu); -#endif devlink_free(devlink); } @@ -1637,7 +1386,6 @@ succeed: * kexec. There is no need to cleanup the mlx5_core software * contexts. */ - mlx5_irq_clear_affinity_hints(dev); mlx5_core_eq_free_irqs(dev); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index 0594d0961cb3..c68dcea5985b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -38,6 +38,7 @@ #include <linux/sched.h> #include <linux/if_link.h> #include <linux/firmware.h> +#include <linux/ptp_clock_kernel.h> #include <linux/mlx5/cq.h> #include <linux/mlx5/fs.h> @@ -78,6 +79,11 @@ do { \ __func__, __LINE__, current->pid, \ ##__VA_ARGS__) +#define mlx5_core_warn_once(__dev, format, ...) \ + dev_warn_once(&(__dev)->pdev->dev, "%s:%d:(pid %d): " format, \ + __func__, __LINE__, current->pid, \ + ##__VA_ARGS__) + #define mlx5_core_info(__dev, format, ...) \ dev_info(&(__dev)->pdev->dev, format, ##__VA_ARGS__) @@ -97,12 +103,6 @@ int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, uint32_t *sw_owner_id); int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev); int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev); int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev); - -void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event, - unsigned long param); -void mlx5_core_page_fault(struct mlx5_core_dev *dev, - struct mlx5_pagefault *pfault); -void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe); void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force); void mlx5_disable_device(struct mlx5_core_dev *dev); void mlx5_recover_device(struct mlx5_core_dev *dev); @@ -122,30 +122,10 @@ int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, u32 element_id); int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev); -u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev); - -int mlx5_eq_init(struct mlx5_core_dev *dev); -void mlx5_eq_cleanup(struct mlx5_core_dev *dev); -int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx, - int nent, u64 mask, const char *name, - enum mlx5_eq_type type); -int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq); -int mlx5_eq_add_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq); -int mlx5_eq_del_cq(struct mlx5_eq *eq, struct mlx5_core_cq *cq); -int mlx5_core_eq_query(struct mlx5_core_dev *dev, struct mlx5_eq *eq, - u32 *out, int outlen); -int mlx5_start_eqs(struct mlx5_core_dev *dev); -void mlx5_stop_eqs(struct mlx5_core_dev *dev); -/* This function should only be called after mlx5_cmd_force_teardown_hca */ -void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev); -struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn); -u32 mlx5_eq_poll_irq_disabled(struct mlx5_eq *eq); -void mlx5_cq_tasklet_cb(unsigned long data); -void mlx5_cmd_comp_handler(struct mlx5_core_dev *dev, u64 vec, bool forced); -int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq); -void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq); -int mlx5_eq_debugfs_init(struct mlx5_core_dev *dev); -void mlx5_eq_debugfs_cleanup(struct mlx5_core_dev *dev); +u64 mlx5_read_internal_timer(struct mlx5_core_dev *dev, + struct ptp_system_timestamp *sts); + +void mlx5_cmd_trigger_completions(struct mlx5_core_dev *dev); int mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); @@ -159,6 +139,11 @@ int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam, void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev); void mlx5_lag_remove(struct mlx5_core_dev *dev); +int mlx5_events_init(struct mlx5_core_dev *dev); +void mlx5_events_cleanup(struct mlx5_core_dev *dev); +void mlx5_events_start(struct mlx5_core_dev *dev); +void mlx5_events_stop(struct mlx5_core_dev *dev); + void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv); void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv); void mlx5_attach_device(struct mlx5_core_dev *dev); @@ -202,10 +187,8 @@ static inline int mlx5_lag_is_lacp_owner(struct mlx5_core_dev *dev) MLX5_CAP_GEN(dev, lag_master); } -int mlx5_lag_allow(struct mlx5_core_dev *dev); -int mlx5_lag_forbid(struct mlx5_core_dev *dev); - void mlx5_reload_interface(struct mlx5_core_dev *mdev, int protocol); +void mlx5_lag_update(struct mlx5_core_dev *dev); enum { MLX5_NIC_IFC_FULL = 0, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index e36d3e3675f9..a83b517b0714 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -37,6 +37,7 @@ #include <linux/mlx5/driver.h> #include <linux/mlx5/cmd.h> #include "mlx5_core.h" +#include "lib/eq.h" enum { MLX5_PAGES_CANT_GIVE = 0, @@ -433,15 +434,28 @@ static void pages_work_handler(struct work_struct *work) kfree(req); } -void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, - s32 npages) +static int req_pages_handler(struct notifier_block *nb, + unsigned long type, void *data) { struct mlx5_pages_req *req; - + struct mlx5_core_dev *dev; + struct mlx5_priv *priv; + struct mlx5_eqe *eqe; + u16 func_id; + s32 npages; + + priv = mlx5_nb_cof(nb, struct mlx5_priv, pg_nb); + dev = container_of(priv, struct mlx5_core_dev, priv); + eqe = data; + + func_id = be16_to_cpu(eqe->data.req_pages.func_id); + npages = be32_to_cpu(eqe->data.req_pages.num_pages); + mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n", + func_id, npages); req = kzalloc(sizeof(*req), GFP_ATOMIC); if (!req) { mlx5_core_warn(dev, "failed to allocate pages request\n"); - return; + return NOTIFY_DONE; } req->dev = dev; @@ -449,6 +463,7 @@ void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, req->npages = npages; INIT_WORK(&req->work, pages_work_handler); queue_work(dev->priv.pg_wq, &req->work); + return NOTIFY_OK; } int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot) @@ -524,29 +539,32 @@ int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev) return 0; } -void mlx5_pagealloc_init(struct mlx5_core_dev *dev) +int mlx5_pagealloc_init(struct mlx5_core_dev *dev) { dev->priv.page_root = RB_ROOT; INIT_LIST_HEAD(&dev->priv.free_list); + dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator"); + if (!dev->priv.pg_wq) + return -ENOMEM; + + return 0; } void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev) { - /* nothing */ + destroy_workqueue(dev->priv.pg_wq); } -int mlx5_pagealloc_start(struct mlx5_core_dev *dev) +void mlx5_pagealloc_start(struct mlx5_core_dev *dev) { - dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator"); - if (!dev->priv.pg_wq) - return -ENOMEM; - - return 0; + MLX5_NB_INIT(&dev->priv.pg_nb, req_pages_handler, PAGE_REQUEST); + mlx5_eq_notifier_register(dev, &dev->priv.pg_nb); } void mlx5_pagealloc_stop(struct mlx5_core_dev *dev) { - destroy_workqueue(dev->priv.pg_wq); + mlx5_eq_notifier_unregister(dev, &dev->priv.pg_nb); + flush_workqueue(dev->priv.pg_wq); } int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 31a9cbd85689..2b82f35f4c35 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -915,63 +915,6 @@ void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported, *enabled = !!(MLX5_GET(pcmr_reg, out, fcs_chk)); } -static const char *mlx5_pme_status[MLX5_MODULE_STATUS_NUM] = { - "Cable plugged", /* MLX5_MODULE_STATUS_PLUGGED = 0x1 */ - "Cable unplugged", /* MLX5_MODULE_STATUS_UNPLUGGED = 0x2 */ - "Cable error", /* MLX5_MODULE_STATUS_ERROR = 0x3 */ -}; - -static const char *mlx5_pme_error[MLX5_MODULE_EVENT_ERROR_NUM] = { - "Power budget exceeded", - "Long Range for non MLNX cable", - "Bus stuck(I2C or data shorted)", - "No EEPROM/retry timeout", - "Enforce part number list", - "Unknown identifier", - "High Temperature", - "Bad or shorted cable/module", - "Unknown status", -}; - -void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe) -{ - enum port_module_event_status_type module_status; - enum port_module_event_error_type error_type; - struct mlx5_eqe_port_module *module_event_eqe; - struct mlx5_priv *priv = &dev->priv; - u8 module_num; - - module_event_eqe = &eqe->data.port_module; - module_num = module_event_eqe->module; - module_status = module_event_eqe->module_status & - PORT_MODULE_EVENT_MODULE_STATUS_MASK; - error_type = module_event_eqe->error_type & - PORT_MODULE_EVENT_ERROR_TYPE_MASK; - - if (module_status < MLX5_MODULE_STATUS_ERROR) { - priv->pme_stats.status_counters[module_status - 1]++; - } else if (module_status == MLX5_MODULE_STATUS_ERROR) { - if (error_type >= MLX5_MODULE_EVENT_ERROR_UNKNOWN) - /* Unknown error type */ - error_type = MLX5_MODULE_EVENT_ERROR_UNKNOWN; - priv->pme_stats.error_counters[error_type]++; - } - - if (!printk_ratelimit()) - return; - - if (module_status < MLX5_MODULE_STATUS_ERROR) - mlx5_core_info(dev, - "Port module event: module %u, %s\n", - module_num, mlx5_pme_status[module_status - 1]); - - else if (module_status == MLX5_MODULE_STATUS_ERROR) - mlx5_core_info(dev, - "Port module event[error]: module %u, %s, %s\n", - module_num, mlx5_pme_status[module_status - 1], - mlx5_pme_error[error_type]); -} - int mlx5_query_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size) { u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0}; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/qp.c b/drivers/net/ethernet/mellanox/mlx5/core/qp.c index 91b8139a388d..388f205a497f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/qp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/qp.c @@ -38,11 +38,11 @@ #include <linux/mlx5/transobj.h> #include "mlx5_core.h" +#include "lib/eq.h" -static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev, - u32 rsn) +static struct mlx5_core_rsc_common * +mlx5_get_rsc(struct mlx5_qp_table *table, u32 rsn) { - struct mlx5_qp_table *table = &dev->priv.qp_table; struct mlx5_core_rsc_common *common; spin_lock(&table->lock); @@ -53,11 +53,6 @@ static struct mlx5_core_rsc_common *mlx5_get_rsc(struct mlx5_core_dev *dev, spin_unlock(&table->lock); - if (!common) { - mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n", - rsn); - return NULL; - } return common; } @@ -120,19 +115,57 @@ static bool is_event_type_allowed(int rsc_type, int event_type) } } -void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type) +static int rsc_event_notifier(struct notifier_block *nb, + unsigned long type, void *data) { - struct mlx5_core_rsc_common *common = mlx5_get_rsc(dev, rsn); + struct mlx5_core_rsc_common *common; + struct mlx5_qp_table *table; + struct mlx5_core_dev *dev; struct mlx5_core_dct *dct; + u8 event_type = (u8)type; struct mlx5_core_qp *qp; + struct mlx5_priv *priv; + struct mlx5_eqe *eqe; + u32 rsn; + + switch (event_type) { + case MLX5_EVENT_TYPE_DCT_DRAINED: + eqe = data; + rsn = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff; + rsn |= (MLX5_RES_DCT << MLX5_USER_INDEX_LEN); + break; + case MLX5_EVENT_TYPE_PATH_MIG: + case MLX5_EVENT_TYPE_COMM_EST: + case MLX5_EVENT_TYPE_SQ_DRAINED: + case MLX5_EVENT_TYPE_SRQ_LAST_WQE: + case MLX5_EVENT_TYPE_WQ_CATAS_ERROR: + case MLX5_EVENT_TYPE_PATH_MIG_FAILED: + case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR: + case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR: + eqe = data; + rsn = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff; + rsn |= (eqe->data.qp_srq.type << MLX5_USER_INDEX_LEN); + break; + default: + return NOTIFY_DONE; + } + + table = container_of(nb, struct mlx5_qp_table, nb); + priv = container_of(table, struct mlx5_priv, qp_table); + dev = container_of(priv, struct mlx5_core_dev, priv); + + mlx5_core_dbg(dev, "event (%d) arrived on resource 0x%x\n", eqe->type, rsn); - if (!common) - return; + common = mlx5_get_rsc(table, rsn); + if (!common) { + mlx5_core_warn(dev, "Async event for bogus resource 0x%x\n", rsn); + return NOTIFY_OK; + } if (!is_event_type_allowed((rsn >> MLX5_USER_INDEX_LEN), event_type)) { mlx5_core_warn(dev, "event 0x%.2x is not allowed on resource 0x%.8x\n", event_type, rsn); - return; + goto out; } switch (common->res) { @@ -150,8 +183,10 @@ void mlx5_rsc_event(struct mlx5_core_dev *dev, u32 rsn, int event_type) default: mlx5_core_warn(dev, "invalid resource type for 0x%x\n", rsn); } - +out: mlx5_core_put_rsc(common); + + return NOTIFY_OK; } static int create_resource_common(struct mlx5_core_dev *dev, @@ -487,10 +522,16 @@ void mlx5_init_qp_table(struct mlx5_core_dev *dev) spin_lock_init(&table->lock); INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); mlx5_qp_debugfs_init(dev); + + table->nb.notifier_call = rsc_event_notifier; + mlx5_notifier_register(dev, &table->nb); } void mlx5_cleanup_qp_table(struct mlx5_core_dev *dev) { + struct mlx5_qp_table *table = &dev->priv.qp_table; + + mlx5_notifier_unregister(dev, &table->nb); mlx5_qp_debugfs_cleanup(dev); } @@ -670,3 +711,20 @@ int mlx5_core_query_q_counter(struct mlx5_core_dev *dev, u16 counter_id, return mlx5_cmd_exec(dev, in, sizeof(in), out, out_size); } EXPORT_SYMBOL_GPL(mlx5_core_query_q_counter); + +struct mlx5_core_rsc_common *mlx5_core_res_hold(struct mlx5_core_dev *dev, + int res_num, + enum mlx5_res_type res_type) +{ + u32 rsn = res_num | (res_type << MLX5_USER_INDEX_LEN); + struct mlx5_qp_table *table = &dev->priv.qp_table; + + return mlx5_get_rsc(table, rsn); +} +EXPORT_SYMBOL_GPL(mlx5_core_res_hold); + +void mlx5_core_res_put(struct mlx5_core_rsc_common *res) +{ + mlx5_core_put_rsc(res); +} +EXPORT_SYMBOL_GPL(mlx5_core_res_put); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c index a0674962f02c..6e178030d8fb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c @@ -216,20 +216,10 @@ int mlx5_core_sriov_configure(struct pci_dev *pdev, int num_vfs) if (!mlx5_core_is_pf(dev)) return -EPERM; - if (num_vfs) { - int ret; - - ret = mlx5_lag_forbid(dev); - if (ret && (ret != -ENODEV)) - return ret; - } - - if (num_vfs) { + if (num_vfs) err = mlx5_sriov_enable(pdev, num_vfs); - } else { + else mlx5_sriov_disable(pdev); - mlx5_lag_allow(dev); - } return err ? err : num_vfs; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/srq.c b/drivers/net/ethernet/mellanox/mlx5/core/srq.c deleted file mode 100644 index 6a6fc9be01e6..000000000000 --- a/drivers/net/ethernet/mellanox/mlx5/core/srq.c +++ /dev/null @@ -1,716 +0,0 @@ -/* - * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/mlx5/driver.h> -#include <linux/mlx5/cmd.h> -#include <linux/mlx5/srq.h> -#include <rdma/ib_verbs.h> -#include "mlx5_core.h" -#include <linux/mlx5/transobj.h> - -void mlx5_srq_event(struct mlx5_core_dev *dev, u32 srqn, int event_type) -{ - struct mlx5_srq_table *table = &dev->priv.srq_table; - struct mlx5_core_srq *srq; - - spin_lock(&table->lock); - - srq = radix_tree_lookup(&table->tree, srqn); - if (srq) - atomic_inc(&srq->refcount); - - spin_unlock(&table->lock); - - if (!srq) { - mlx5_core_warn(dev, "Async event for bogus SRQ 0x%08x\n", srqn); - return; - } - - srq->event(srq, event_type); - - if (atomic_dec_and_test(&srq->refcount)) - complete(&srq->free); -} - -static int get_pas_size(struct mlx5_srq_attr *in) -{ - u32 log_page_size = in->log_page_size + 12; - u32 log_srq_size = in->log_size; - u32 log_rq_stride = in->wqe_shift; - u32 page_offset = in->page_offset; - u32 po_quanta = 1 << (log_page_size - 6); - u32 rq_sz = 1 << (log_srq_size + 4 + log_rq_stride); - u32 page_size = 1 << log_page_size; - u32 rq_sz_po = rq_sz + (page_offset * po_quanta); - u32 rq_num_pas = DIV_ROUND_UP(rq_sz_po, page_size); - - return rq_num_pas * sizeof(u64); -} - -static void set_wq(void *wq, struct mlx5_srq_attr *in) -{ - MLX5_SET(wq, wq, wq_signature, !!(in->flags - & MLX5_SRQ_FLAG_WQ_SIG)); - MLX5_SET(wq, wq, log_wq_pg_sz, in->log_page_size); - MLX5_SET(wq, wq, log_wq_stride, in->wqe_shift + 4); - MLX5_SET(wq, wq, log_wq_sz, in->log_size); - MLX5_SET(wq, wq, page_offset, in->page_offset); - MLX5_SET(wq, wq, lwm, in->lwm); - MLX5_SET(wq, wq, pd, in->pd); - MLX5_SET64(wq, wq, dbr_addr, in->db_record); -} - -static void set_srqc(void *srqc, struct mlx5_srq_attr *in) -{ - MLX5_SET(srqc, srqc, wq_signature, !!(in->flags - & MLX5_SRQ_FLAG_WQ_SIG)); - MLX5_SET(srqc, srqc, log_page_size, in->log_page_size); - MLX5_SET(srqc, srqc, log_rq_stride, in->wqe_shift); - MLX5_SET(srqc, srqc, log_srq_size, in->log_size); - MLX5_SET(srqc, srqc, page_offset, in->page_offset); - MLX5_SET(srqc, srqc, lwm, in->lwm); - MLX5_SET(srqc, srqc, pd, in->pd); - MLX5_SET64(srqc, srqc, dbr_addr, in->db_record); - MLX5_SET(srqc, srqc, xrcd, in->xrcd); - MLX5_SET(srqc, srqc, cqn, in->cqn); -} - -static void get_wq(void *wq, struct mlx5_srq_attr *in) -{ - if (MLX5_GET(wq, wq, wq_signature)) - in->flags &= MLX5_SRQ_FLAG_WQ_SIG; - in->log_page_size = MLX5_GET(wq, wq, log_wq_pg_sz); - in->wqe_shift = MLX5_GET(wq, wq, log_wq_stride) - 4; - in->log_size = MLX5_GET(wq, wq, log_wq_sz); - in->page_offset = MLX5_GET(wq, wq, page_offset); - in->lwm = MLX5_GET(wq, wq, lwm); - in->pd = MLX5_GET(wq, wq, pd); - in->db_record = MLX5_GET64(wq, wq, dbr_addr); -} - -static void get_srqc(void *srqc, struct mlx5_srq_attr *in) -{ - if (MLX5_GET(srqc, srqc, wq_signature)) - in->flags &= MLX5_SRQ_FLAG_WQ_SIG; - in->log_page_size = MLX5_GET(srqc, srqc, log_page_size); - in->wqe_shift = MLX5_GET(srqc, srqc, log_rq_stride); - in->log_size = MLX5_GET(srqc, srqc, log_srq_size); - in->page_offset = MLX5_GET(srqc, srqc, page_offset); - in->lwm = MLX5_GET(srqc, srqc, lwm); - in->pd = MLX5_GET(srqc, srqc, pd); - in->db_record = MLX5_GET64(srqc, srqc, dbr_addr); -} - -struct mlx5_core_srq *mlx5_core_get_srq(struct mlx5_core_dev *dev, u32 srqn) -{ - struct mlx5_srq_table *table = &dev->priv.srq_table; - struct mlx5_core_srq *srq; - - spin_lock(&table->lock); - - srq = radix_tree_lookup(&table->tree, srqn); - if (srq) - atomic_inc(&srq->refcount); - - spin_unlock(&table->lock); - - return srq; -} -EXPORT_SYMBOL(mlx5_core_get_srq); - -static int create_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_srq_attr *in) -{ - u32 create_out[MLX5_ST_SZ_DW(create_srq_out)] = {0}; - void *create_in; - void *srqc; - void *pas; - int pas_size; - int inlen; - int err; - - pas_size = get_pas_size(in); - inlen = MLX5_ST_SZ_BYTES(create_srq_in) + pas_size; - create_in = kvzalloc(inlen, GFP_KERNEL); - if (!create_in) - return -ENOMEM; - - MLX5_SET(create_srq_in, create_in, uid, in->uid); - srqc = MLX5_ADDR_OF(create_srq_in, create_in, srq_context_entry); - pas = MLX5_ADDR_OF(create_srq_in, create_in, pas); - - set_srqc(srqc, in); - memcpy(pas, in->pas, pas_size); - - MLX5_SET(create_srq_in, create_in, opcode, - MLX5_CMD_OP_CREATE_SRQ); - - err = mlx5_cmd_exec(dev, create_in, inlen, create_out, - sizeof(create_out)); - kvfree(create_in); - if (!err) { - srq->srqn = MLX5_GET(create_srq_out, create_out, srqn); - srq->uid = in->uid; - } - - return err; -} - -static int destroy_srq_cmd(struct mlx5_core_dev *dev, - struct mlx5_core_srq *srq) -{ - u32 srq_in[MLX5_ST_SZ_DW(destroy_srq_in)] = {0}; - u32 srq_out[MLX5_ST_SZ_DW(destroy_srq_out)] = {0}; - - MLX5_SET(destroy_srq_in, srq_in, opcode, - MLX5_CMD_OP_DESTROY_SRQ); - MLX5_SET(destroy_srq_in, srq_in, srqn, srq->srqn); - MLX5_SET(destroy_srq_in, srq_in, uid, srq->uid); - - return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in), - srq_out, sizeof(srq_out)); -} - -static int arm_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - u16 lwm, int is_srq) -{ - u32 srq_in[MLX5_ST_SZ_DW(arm_rq_in)] = {0}; - u32 srq_out[MLX5_ST_SZ_DW(arm_rq_out)] = {0}; - - MLX5_SET(arm_rq_in, srq_in, opcode, MLX5_CMD_OP_ARM_RQ); - MLX5_SET(arm_rq_in, srq_in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_SRQ); - MLX5_SET(arm_rq_in, srq_in, srq_number, srq->srqn); - MLX5_SET(arm_rq_in, srq_in, lwm, lwm); - MLX5_SET(arm_rq_in, srq_in, uid, srq->uid); - - return mlx5_cmd_exec(dev, srq_in, sizeof(srq_in), - srq_out, sizeof(srq_out)); -} - -static int query_srq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_srq_attr *out) -{ - u32 srq_in[MLX5_ST_SZ_DW(query_srq_in)] = {0}; - u32 *srq_out; - void *srqc; - int err; - - srq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_srq_out), GFP_KERNEL); - if (!srq_out) - return -ENOMEM; - - MLX5_SET(query_srq_in, srq_in, opcode, - MLX5_CMD_OP_QUERY_SRQ); - MLX5_SET(query_srq_in, srq_in, srqn, srq->srqn); - err = mlx5_cmd_exec(dev, srq_in, sizeof(srq_in), - srq_out, MLX5_ST_SZ_BYTES(query_srq_out)); - if (err) - goto out; - - srqc = MLX5_ADDR_OF(query_srq_out, srq_out, srq_context_entry); - get_srqc(srqc, out); - if (MLX5_GET(srqc, srqc, state) != MLX5_SRQC_STATE_GOOD) - out->flags |= MLX5_SRQ_FLAG_ERR; -out: - kvfree(srq_out); - return err; -} - -static int create_xrc_srq_cmd(struct mlx5_core_dev *dev, - struct mlx5_core_srq *srq, - struct mlx5_srq_attr *in) -{ - u32 create_out[MLX5_ST_SZ_DW(create_xrc_srq_out)]; - void *create_in; - void *xrc_srqc; - void *pas; - int pas_size; - int inlen; - int err; - - pas_size = get_pas_size(in); - inlen = MLX5_ST_SZ_BYTES(create_xrc_srq_in) + pas_size; - create_in = kvzalloc(inlen, GFP_KERNEL); - if (!create_in) - return -ENOMEM; - - MLX5_SET(create_xrc_srq_in, create_in, uid, in->uid); - xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, create_in, - xrc_srq_context_entry); - pas = MLX5_ADDR_OF(create_xrc_srq_in, create_in, pas); - - set_srqc(xrc_srqc, in); - MLX5_SET(xrc_srqc, xrc_srqc, user_index, in->user_index); - memcpy(pas, in->pas, pas_size); - MLX5_SET(create_xrc_srq_in, create_in, opcode, - MLX5_CMD_OP_CREATE_XRC_SRQ); - - memset(create_out, 0, sizeof(create_out)); - err = mlx5_cmd_exec(dev, create_in, inlen, create_out, - sizeof(create_out)); - if (err) - goto out; - - srq->srqn = MLX5_GET(create_xrc_srq_out, create_out, xrc_srqn); - srq->uid = in->uid; -out: - kvfree(create_in); - return err; -} - -static int destroy_xrc_srq_cmd(struct mlx5_core_dev *dev, - struct mlx5_core_srq *srq) -{ - u32 xrcsrq_in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {0}; - u32 xrcsrq_out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)] = {0}; - - MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, opcode, - MLX5_CMD_OP_DESTROY_XRC_SRQ); - MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn); - MLX5_SET(destroy_xrc_srq_in, xrcsrq_in, uid, srq->uid); - - return mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in), - xrcsrq_out, sizeof(xrcsrq_out)); -} - -static int arm_xrc_srq_cmd(struct mlx5_core_dev *dev, - struct mlx5_core_srq *srq, u16 lwm) -{ - u32 xrcsrq_in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0}; - u32 xrcsrq_out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0}; - - MLX5_SET(arm_xrc_srq_in, xrcsrq_in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ); - MLX5_SET(arm_xrc_srq_in, xrcsrq_in, op_mod, MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ); - MLX5_SET(arm_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn); - MLX5_SET(arm_xrc_srq_in, xrcsrq_in, lwm, lwm); - MLX5_SET(arm_xrc_srq_in, xrcsrq_in, uid, srq->uid); - - return mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in), - xrcsrq_out, sizeof(xrcsrq_out)); -} - -static int query_xrc_srq_cmd(struct mlx5_core_dev *dev, - struct mlx5_core_srq *srq, - struct mlx5_srq_attr *out) -{ - u32 xrcsrq_in[MLX5_ST_SZ_DW(query_xrc_srq_in)]; - u32 *xrcsrq_out; - void *xrc_srqc; - int err; - - xrcsrq_out = kvzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out), GFP_KERNEL); - if (!xrcsrq_out) - return -ENOMEM; - memset(xrcsrq_in, 0, sizeof(xrcsrq_in)); - - MLX5_SET(query_xrc_srq_in, xrcsrq_in, opcode, - MLX5_CMD_OP_QUERY_XRC_SRQ); - MLX5_SET(query_xrc_srq_in, xrcsrq_in, xrc_srqn, srq->srqn); - - err = mlx5_cmd_exec(dev, xrcsrq_in, sizeof(xrcsrq_in), xrcsrq_out, - MLX5_ST_SZ_BYTES(query_xrc_srq_out)); - if (err) - goto out; - - xrc_srqc = MLX5_ADDR_OF(query_xrc_srq_out, xrcsrq_out, - xrc_srq_context_entry); - get_srqc(xrc_srqc, out); - if (MLX5_GET(xrc_srqc, xrc_srqc, state) != MLX5_XRC_SRQC_STATE_GOOD) - out->flags |= MLX5_SRQ_FLAG_ERR; - -out: - kvfree(xrcsrq_out); - return err; -} - -static int create_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_srq_attr *in) -{ - void *create_in; - void *rmpc; - void *wq; - int pas_size; - int inlen; - int err; - - pas_size = get_pas_size(in); - inlen = MLX5_ST_SZ_BYTES(create_rmp_in) + pas_size; - create_in = kvzalloc(inlen, GFP_KERNEL); - if (!create_in) - return -ENOMEM; - - rmpc = MLX5_ADDR_OF(create_rmp_in, create_in, ctx); - wq = MLX5_ADDR_OF(rmpc, rmpc, wq); - - MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY); - MLX5_SET(create_rmp_in, create_in, uid, in->uid); - set_wq(wq, in); - memcpy(MLX5_ADDR_OF(rmpc, rmpc, wq.pas), in->pas, pas_size); - - err = mlx5_core_create_rmp(dev, create_in, inlen, &srq->srqn); - if (!err) - srq->uid = in->uid; - - kvfree(create_in); - return err; -} - -static int destroy_rmp_cmd(struct mlx5_core_dev *dev, - struct mlx5_core_srq *srq) -{ - u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)] = {}; - u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)] = {}; - - MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP); - MLX5_SET(destroy_rmp_in, in, rmpn, srq->srqn); - MLX5_SET(destroy_rmp_in, in, uid, srq->uid); - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); -} - -static int arm_rmp_cmd(struct mlx5_core_dev *dev, - struct mlx5_core_srq *srq, - u16 lwm) -{ - void *in; - void *rmpc; - void *wq; - void *bitmask; - int err; - - in = kvzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in), GFP_KERNEL); - if (!in) - return -ENOMEM; - - rmpc = MLX5_ADDR_OF(modify_rmp_in, in, ctx); - bitmask = MLX5_ADDR_OF(modify_rmp_in, in, bitmask); - wq = MLX5_ADDR_OF(rmpc, rmpc, wq); - - MLX5_SET(modify_rmp_in, in, rmp_state, MLX5_RMPC_STATE_RDY); - MLX5_SET(modify_rmp_in, in, rmpn, srq->srqn); - MLX5_SET(modify_rmp_in, in, uid, srq->uid); - MLX5_SET(wq, wq, lwm, lwm); - MLX5_SET(rmp_bitmask, bitmask, lwm, 1); - MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY); - - err = mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in)); - - kvfree(in); - return err; -} - -static int query_rmp_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_srq_attr *out) -{ - u32 *rmp_out; - void *rmpc; - int err; - - rmp_out = kvzalloc(MLX5_ST_SZ_BYTES(query_rmp_out), GFP_KERNEL); - if (!rmp_out) - return -ENOMEM; - - err = mlx5_core_query_rmp(dev, srq->srqn, rmp_out); - if (err) - goto out; - - rmpc = MLX5_ADDR_OF(query_rmp_out, rmp_out, rmp_context); - get_wq(MLX5_ADDR_OF(rmpc, rmpc, wq), out); - if (MLX5_GET(rmpc, rmpc, state) != MLX5_RMPC_STATE_RDY) - out->flags |= MLX5_SRQ_FLAG_ERR; - -out: - kvfree(rmp_out); - return err; -} - -static int create_xrq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_srq_attr *in) -{ - u32 create_out[MLX5_ST_SZ_DW(create_xrq_out)] = {0}; - void *create_in; - void *xrqc; - void *wq; - int pas_size; - int inlen; - int err; - - pas_size = get_pas_size(in); - inlen = MLX5_ST_SZ_BYTES(create_xrq_in) + pas_size; - create_in = kvzalloc(inlen, GFP_KERNEL); - if (!create_in) - return -ENOMEM; - - xrqc = MLX5_ADDR_OF(create_xrq_in, create_in, xrq_context); - wq = MLX5_ADDR_OF(xrqc, xrqc, wq); - - set_wq(wq, in); - memcpy(MLX5_ADDR_OF(xrqc, xrqc, wq.pas), in->pas, pas_size); - - if (in->type == IB_SRQT_TM) { - MLX5_SET(xrqc, xrqc, topology, MLX5_XRQC_TOPOLOGY_TAG_MATCHING); - if (in->flags & MLX5_SRQ_FLAG_RNDV) - MLX5_SET(xrqc, xrqc, offload, MLX5_XRQC_OFFLOAD_RNDV); - MLX5_SET(xrqc, xrqc, - tag_matching_topology_context.log_matching_list_sz, - in->tm_log_list_size); - } - MLX5_SET(xrqc, xrqc, user_index, in->user_index); - MLX5_SET(xrqc, xrqc, cqn, in->cqn); - MLX5_SET(create_xrq_in, create_in, opcode, MLX5_CMD_OP_CREATE_XRQ); - MLX5_SET(create_xrq_in, create_in, uid, in->uid); - err = mlx5_cmd_exec(dev, create_in, inlen, create_out, - sizeof(create_out)); - kvfree(create_in); - if (!err) { - srq->srqn = MLX5_GET(create_xrq_out, create_out, xrqn); - srq->uid = in->uid; - } - - return err; -} - -static int destroy_xrq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq) -{ - u32 in[MLX5_ST_SZ_DW(destroy_xrq_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(destroy_xrq_out)] = {0}; - - MLX5_SET(destroy_xrq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRQ); - MLX5_SET(destroy_xrq_in, in, xrqn, srq->srqn); - MLX5_SET(destroy_xrq_in, in, uid, srq->uid); - - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); -} - -static int arm_xrq_cmd(struct mlx5_core_dev *dev, - struct mlx5_core_srq *srq, - u16 lwm) -{ - u32 out[MLX5_ST_SZ_DW(arm_rq_out)] = {0}; - u32 in[MLX5_ST_SZ_DW(arm_rq_in)] = {0}; - - MLX5_SET(arm_rq_in, in, opcode, MLX5_CMD_OP_ARM_RQ); - MLX5_SET(arm_rq_in, in, op_mod, MLX5_ARM_RQ_IN_OP_MOD_XRQ); - MLX5_SET(arm_rq_in, in, srq_number, srq->srqn); - MLX5_SET(arm_rq_in, in, lwm, lwm); - MLX5_SET(arm_rq_in, in, uid, srq->uid); - - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); -} - -static int query_xrq_cmd(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_srq_attr *out) -{ - u32 in[MLX5_ST_SZ_DW(query_xrq_in)] = {0}; - u32 *xrq_out; - int outlen = MLX5_ST_SZ_BYTES(query_xrq_out); - void *xrqc; - int err; - - xrq_out = kvzalloc(outlen, GFP_KERNEL); - if (!xrq_out) - return -ENOMEM; - - MLX5_SET(query_xrq_in, in, opcode, MLX5_CMD_OP_QUERY_XRQ); - MLX5_SET(query_xrq_in, in, xrqn, srq->srqn); - - err = mlx5_cmd_exec(dev, in, sizeof(in), xrq_out, outlen); - if (err) - goto out; - - xrqc = MLX5_ADDR_OF(query_xrq_out, xrq_out, xrq_context); - get_wq(MLX5_ADDR_OF(xrqc, xrqc, wq), out); - if (MLX5_GET(xrqc, xrqc, state) != MLX5_XRQC_STATE_GOOD) - out->flags |= MLX5_SRQ_FLAG_ERR; - out->tm_next_tag = - MLX5_GET(xrqc, xrqc, - tag_matching_topology_context.append_next_index); - out->tm_hw_phase_cnt = - MLX5_GET(xrqc, xrqc, - tag_matching_topology_context.hw_phase_cnt); - out->tm_sw_phase_cnt = - MLX5_GET(xrqc, xrqc, - tag_matching_topology_context.sw_phase_cnt); - -out: - kvfree(xrq_out); - return err; -} - -static int create_srq_split(struct mlx5_core_dev *dev, - struct mlx5_core_srq *srq, - struct mlx5_srq_attr *in) -{ - if (!dev->issi) - return create_srq_cmd(dev, srq, in); - switch (srq->common.res) { - case MLX5_RES_XSRQ: - return create_xrc_srq_cmd(dev, srq, in); - case MLX5_RES_XRQ: - return create_xrq_cmd(dev, srq, in); - default: - return create_rmp_cmd(dev, srq, in); - } -} - -static int destroy_srq_split(struct mlx5_core_dev *dev, - struct mlx5_core_srq *srq) -{ - if (!dev->issi) - return destroy_srq_cmd(dev, srq); - switch (srq->common.res) { - case MLX5_RES_XSRQ: - return destroy_xrc_srq_cmd(dev, srq); - case MLX5_RES_XRQ: - return destroy_xrq_cmd(dev, srq); - default: - return destroy_rmp_cmd(dev, srq); - } -} - -int mlx5_core_create_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_srq_attr *in) -{ - int err; - struct mlx5_srq_table *table = &dev->priv.srq_table; - - switch (in->type) { - case IB_SRQT_XRC: - srq->common.res = MLX5_RES_XSRQ; - break; - case IB_SRQT_TM: - srq->common.res = MLX5_RES_XRQ; - break; - default: - srq->common.res = MLX5_RES_SRQ; - } - - err = create_srq_split(dev, srq, in); - if (err) - return err; - - atomic_set(&srq->refcount, 1); - init_completion(&srq->free); - - spin_lock_irq(&table->lock); - err = radix_tree_insert(&table->tree, srq->srqn, srq); - spin_unlock_irq(&table->lock); - if (err) { - mlx5_core_warn(dev, "err %d, srqn 0x%x\n", err, srq->srqn); - goto err_destroy_srq_split; - } - - return 0; - -err_destroy_srq_split: - destroy_srq_split(dev, srq); - - return err; -} -EXPORT_SYMBOL(mlx5_core_create_srq); - -int mlx5_core_destroy_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq) -{ - struct mlx5_srq_table *table = &dev->priv.srq_table; - struct mlx5_core_srq *tmp; - int err; - - spin_lock_irq(&table->lock); - tmp = radix_tree_delete(&table->tree, srq->srqn); - spin_unlock_irq(&table->lock); - if (!tmp) { - mlx5_core_warn(dev, "srq 0x%x not found in tree\n", srq->srqn); - return -EINVAL; - } - if (tmp != srq) { - mlx5_core_warn(dev, "corruption on srqn 0x%x\n", srq->srqn); - return -EINVAL; - } - - err = destroy_srq_split(dev, srq); - if (err) - return err; - - if (atomic_dec_and_test(&srq->refcount)) - complete(&srq->free); - wait_for_completion(&srq->free); - - return 0; -} -EXPORT_SYMBOL(mlx5_core_destroy_srq); - -int mlx5_core_query_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - struct mlx5_srq_attr *out) -{ - if (!dev->issi) - return query_srq_cmd(dev, srq, out); - switch (srq->common.res) { - case MLX5_RES_XSRQ: - return query_xrc_srq_cmd(dev, srq, out); - case MLX5_RES_XRQ: - return query_xrq_cmd(dev, srq, out); - default: - return query_rmp_cmd(dev, srq, out); - } -} -EXPORT_SYMBOL(mlx5_core_query_srq); - -int mlx5_core_arm_srq(struct mlx5_core_dev *dev, struct mlx5_core_srq *srq, - u16 lwm, int is_srq) -{ - if (!dev->issi) - return arm_srq_cmd(dev, srq, lwm, is_srq); - switch (srq->common.res) { - case MLX5_RES_XSRQ: - return arm_xrc_srq_cmd(dev, srq, lwm); - case MLX5_RES_XRQ: - return arm_xrq_cmd(dev, srq, lwm); - default: - return arm_rmp_cmd(dev, srq, lwm); - } -} -EXPORT_SYMBOL(mlx5_core_arm_srq); - -void mlx5_init_srq_table(struct mlx5_core_dev *dev) -{ - struct mlx5_srq_table *table = &dev->priv.srq_table; - - memset(table, 0, sizeof(*table)); - spin_lock_init(&table->lock); - INIT_RADIX_TREE(&table->tree, GFP_ATOMIC); -} - -void mlx5_cleanup_srq_table(struct mlx5_core_dev *dev) -{ - /* nothing */ -} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c index a1ee9a8a769e..c4d4b76096dc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/transobj.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/transobj.c @@ -258,115 +258,6 @@ void mlx5_core_destroy_tis(struct mlx5_core_dev *dev, u32 tisn) } EXPORT_SYMBOL(mlx5_core_destroy_tis); -int mlx5_core_create_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen, - u32 *rmpn) -{ - u32 out[MLX5_ST_SZ_DW(create_rmp_out)] = {0}; - int err; - - MLX5_SET(create_rmp_in, in, opcode, MLX5_CMD_OP_CREATE_RMP); - err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); - if (!err) - *rmpn = MLX5_GET(create_rmp_out, out, rmpn); - - return err; -} - -int mlx5_core_modify_rmp(struct mlx5_core_dev *dev, u32 *in, int inlen) -{ - u32 out[MLX5_ST_SZ_DW(modify_rmp_out)] = {0}; - - MLX5_SET(modify_rmp_in, in, opcode, MLX5_CMD_OP_MODIFY_RMP); - return mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); -} - -int mlx5_core_destroy_rmp(struct mlx5_core_dev *dev, u32 rmpn) -{ - u32 in[MLX5_ST_SZ_DW(destroy_rmp_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(destroy_rmp_out)] = {0}; - - MLX5_SET(destroy_rmp_in, in, opcode, MLX5_CMD_OP_DESTROY_RMP); - MLX5_SET(destroy_rmp_in, in, rmpn, rmpn); - return mlx5_cmd_exec(dev, in, sizeof(in), out, - sizeof(out)); -} - -int mlx5_core_query_rmp(struct mlx5_core_dev *dev, u32 rmpn, u32 *out) -{ - u32 in[MLX5_ST_SZ_DW(query_rmp_in)] = {0}; - int outlen = MLX5_ST_SZ_BYTES(query_rmp_out); - - MLX5_SET(query_rmp_in, in, opcode, MLX5_CMD_OP_QUERY_RMP); - MLX5_SET(query_rmp_in, in, rmpn, rmpn); - return mlx5_cmd_exec(dev, in, sizeof(in), out, outlen); -} - -int mlx5_core_arm_rmp(struct mlx5_core_dev *dev, u32 rmpn, u16 lwm) -{ - void *in; - void *rmpc; - void *wq; - void *bitmask; - int err; - - in = kvzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in), GFP_KERNEL); - if (!in) - return -ENOMEM; - - rmpc = MLX5_ADDR_OF(modify_rmp_in, in, ctx); - bitmask = MLX5_ADDR_OF(modify_rmp_in, in, bitmask); - wq = MLX5_ADDR_OF(rmpc, rmpc, wq); - - MLX5_SET(modify_rmp_in, in, rmp_state, MLX5_RMPC_STATE_RDY); - MLX5_SET(modify_rmp_in, in, rmpn, rmpn); - MLX5_SET(wq, wq, lwm, lwm); - MLX5_SET(rmp_bitmask, bitmask, lwm, 1); - MLX5_SET(rmpc, rmpc, state, MLX5_RMPC_STATE_RDY); - - err = mlx5_core_modify_rmp(dev, in, MLX5_ST_SZ_BYTES(modify_rmp_in)); - - kvfree(in); - - return err; -} - -int mlx5_core_create_xsrq(struct mlx5_core_dev *dev, u32 *in, int inlen, - u32 *xsrqn) -{ - u32 out[MLX5_ST_SZ_DW(create_xrc_srq_out)] = {0}; - int err; - - MLX5_SET(create_xrc_srq_in, in, opcode, MLX5_CMD_OP_CREATE_XRC_SRQ); - err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out)); - if (!err) - *xsrqn = MLX5_GET(create_xrc_srq_out, out, xrc_srqn); - - return err; -} - -int mlx5_core_destroy_xsrq(struct mlx5_core_dev *dev, u32 xsrqn) -{ - u32 in[MLX5_ST_SZ_DW(destroy_xrc_srq_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(destroy_xrc_srq_out)] = {0}; - - MLX5_SET(destroy_xrc_srq_in, in, opcode, MLX5_CMD_OP_DESTROY_XRC_SRQ); - MLX5_SET(destroy_xrc_srq_in, in, xrc_srqn, xsrqn); - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); -} - -int mlx5_core_arm_xsrq(struct mlx5_core_dev *dev, u32 xsrqn, u16 lwm) -{ - u32 in[MLX5_ST_SZ_DW(arm_xrc_srq_in)] = {0}; - u32 out[MLX5_ST_SZ_DW(arm_xrc_srq_out)] = {0}; - - MLX5_SET(arm_xrc_srq_in, in, opcode, MLX5_CMD_OP_ARM_XRC_SRQ); - MLX5_SET(arm_xrc_srq_in, in, xrc_srqn, xsrqn); - MLX5_SET(arm_xrc_srq_in, in, lwm, lwm); - MLX5_SET(arm_xrc_srq_in, in, op_mod, - MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ); - return mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out)); -} - int mlx5_core_create_rqt(struct mlx5_core_dev *dev, u32 *in, int inlen, u32 *rqtn) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c index cfbea66b4879..9b150ce9d315 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c @@ -1204,9 +1204,19 @@ EXPORT_SYMBOL_GPL(mlx5_nic_vport_unaffiliate_multiport); u64 mlx5_query_nic_system_image_guid(struct mlx5_core_dev *mdev) { - if (!mdev->sys_image_guid) - mlx5_query_nic_vport_system_image_guid(mdev, &mdev->sys_image_guid); + int port_type_cap = MLX5_CAP_GEN(mdev, port_type); + u64 tmp = 0; - return mdev->sys_image_guid; + if (mdev->sys_image_guid) + return mdev->sys_image_guid; + + if (port_type_cap == MLX5_CAP_PORT_TYPE_ETH) + mlx5_query_nic_vport_system_image_guid(mdev, &tmp); + else + mlx5_query_hca_vport_system_image_guid(mdev, &tmp); + + mdev->sys_image_guid = tmp; + + return tmp; } EXPORT_SYMBOL_GPL(mlx5_query_nic_system_image_guid); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c index 2dcbf1ebfd6a..953cc8efba69 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c @@ -155,7 +155,8 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, void *cqc, struct mlx5_cqwq *wq, struct mlx5_wq_ctrl *wq_ctrl) { - u8 log_wq_stride = MLX5_GET(cqc, cqc, cqe_sz) + 6; + /* CQE_STRIDE_128 and CQE_STRIDE_128_PAD both mean 128B stride */ + u8 log_wq_stride = MLX5_GET(cqc, cqc, cqe_sz) == CQE_STRIDE_64 ? 6 : 7; u8 log_wq_sz = MLX5_GET(cqc, cqc, log_cq_size); int err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h index b1293d153a58..ea934a48c90a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h @@ -177,9 +177,14 @@ static inline u32 mlx5_cqwq_get_ci(struct mlx5_cqwq *wq) return mlx5_cqwq_ctr2ix(wq, wq->cc); } -static inline void *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix) +static inline struct mlx5_cqe64 *mlx5_cqwq_get_wqe(struct mlx5_cqwq *wq, u32 ix) { - return mlx5_frag_buf_get_wqe(&wq->fbc, ix); + struct mlx5_cqe64 *cqe = mlx5_frag_buf_get_wqe(&wq->fbc, ix); + + /* For 128B CQEs the data is in the last 64B */ + cqe += wq->fbc.log_stride == 7; + + return cqe; } static inline u32 mlx5_cqwq_get_ctr_wrap_cnt(struct mlx5_cqwq *wq, u32 ctr) diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig index 8a291eb36c64..080ddd1942ec 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig +++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig @@ -80,6 +80,7 @@ config MLXSW_SPECTRUM depends on IPV6_GRE || IPV6_GRE=n select GENERIC_ALLOCATOR select PARMAN + select OBJAGG select MLXFW default m ---help--- diff --git a/drivers/net/ethernet/mellanox/mlxsw/Makefile b/drivers/net/ethernet/mellanox/mlxsw/Makefile index 1f77e97e2d7a..bbf45f10c208 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/Makefile +++ b/drivers/net/ethernet/mellanox/mlxsw/Makefile @@ -20,7 +20,7 @@ mlxsw_spectrum-objs := spectrum.o spectrum_buffers.o \ spectrum_acl_tcam.o spectrum_acl_ctcam.o \ spectrum_acl_atcam.o spectrum_acl_erp.o \ spectrum1_acl_tcam.o spectrum2_acl_tcam.o \ - spectrum_acl.o \ + spectrum_acl_bloom_filter.o spectrum_acl.o \ spectrum_flower.o spectrum_cnt.o \ spectrum_fid.o spectrum_ipip.o \ spectrum_acl_flex_actions.o \ diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c index f7154f358f27..ddedf8ab5b64 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core.c @@ -970,10 +970,11 @@ static const struct devlink_ops mlxsw_devlink_ops = { .sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get, }; -int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, - const struct mlxsw_bus *mlxsw_bus, - void *bus_priv, bool reload, - struct devlink *devlink) +static int +__mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, + const struct mlxsw_bus *mlxsw_bus, + void *bus_priv, bool reload, + struct devlink *devlink) { const char *device_kind = mlxsw_bus_info->device_kind; struct mlxsw_core *mlxsw_core; @@ -1040,6 +1041,12 @@ int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, goto err_devlink_register; } + if (mlxsw_driver->params_register && !reload) { + err = mlxsw_driver->params_register(mlxsw_core); + if (err) + goto err_register_params; + } + err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon); if (err) goto err_hwmon_init; @@ -1062,6 +1069,9 @@ err_driver_init: err_thermal_init: mlxsw_hwmon_fini(mlxsw_core->hwmon); err_hwmon_init: + if (mlxsw_driver->params_unregister && !reload) + mlxsw_driver->params_unregister(mlxsw_core); +err_register_params: if (!reload) devlink_unregister(devlink); err_devlink_register: @@ -1081,6 +1091,29 @@ err_bus_init: err_devlink_alloc: return err; } + +int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info, + const struct mlxsw_bus *mlxsw_bus, + void *bus_priv, bool reload, + struct devlink *devlink) +{ + bool called_again = false; + int err; + +again: + err = __mlxsw_core_bus_device_register(mlxsw_bus_info, mlxsw_bus, + bus_priv, reload, devlink); + /* -EAGAIN is returned in case the FW was updated. FW needs + * a reset, so lets try to call __mlxsw_core_bus_device_register() + * again. + */ + if (err == -EAGAIN && !called_again) { + called_again = true; + goto again; + } + + return err; +} EXPORT_SYMBOL(mlxsw_core_bus_device_register); void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, @@ -1102,6 +1135,8 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, mlxsw_core->driver->fini(mlxsw_core); mlxsw_thermal_fini(mlxsw_core->thermal); mlxsw_hwmon_fini(mlxsw_core->hwmon); + if (mlxsw_core->driver->params_unregister && !reload) + mlxsw_core->driver->params_unregister(mlxsw_core); if (!reload) devlink_unregister(devlink); mlxsw_emad_fini(mlxsw_core); @@ -1114,6 +1149,8 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core, return; reload_fail_deinit: + if (mlxsw_core->driver->params_unregister) + mlxsw_core->driver->params_unregister(mlxsw_core); devlink_unregister(devlink); devlink_resources_unregister(devlink, NULL); devlink_free(devlink); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.h b/drivers/net/ethernet/mellanox/mlxsw/core.h index c4e4971764e5..4e114f35ee0d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core.h @@ -282,6 +282,8 @@ struct mlxsw_driver { const struct mlxsw_config_profile *profile, u64 *p_single_size, u64 *p_double_size, u64 *p_linear_size); + int (*params_register)(struct mlxsw_core *mlxsw_core); + void (*params_unregister)(struct mlxsw_core *mlxsw_core); u8 txhdr_len; const struct mlxsw_config_profile *profile; bool res_query_enabled; diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c index 785bf01fe2be..df78d23b3ec3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c @@ -426,15 +426,17 @@ mlxsw_sp_afk_encode_one(const struct mlxsw_afk_element_inst *elinst, void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk, struct mlxsw_afk_key_info *key_info, struct mlxsw_afk_element_values *values, - char *key, char *mask, int block_start, int block_end) + char *key, char *mask) { + unsigned int blocks_count = + mlxsw_afk_key_info_blocks_count_get(key_info); char block_mask[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE]; char block_key[MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE]; const struct mlxsw_afk_element_inst *elinst; enum mlxsw_afk_element element; int block_index, i; - for (i = block_start; i <= block_end; i++) { + for (i = 0; i < blocks_count; i++) { memset(block_key, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE); memset(block_mask, 0, MLXSW_SP_AFK_KEY_BLOCK_MAX_SIZE); @@ -451,10 +453,18 @@ void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk, values->storage.mask); } - if (key) - mlxsw_afk->ops->encode_block(block_key, i, key); - if (mask) - mlxsw_afk->ops->encode_block(block_mask, i, mask); + mlxsw_afk->ops->encode_block(key, i, block_key); + mlxsw_afk->ops->encode_block(mask, i, block_mask); } } EXPORT_SYMBOL(mlxsw_afk_encode); + +void mlxsw_afk_clear(struct mlxsw_afk *mlxsw_afk, char *key, + int block_start, int block_end) +{ + int i; + + for (i = block_start; i <= block_end; i++) + mlxsw_afk->ops->clear_block(key, i); +} +EXPORT_SYMBOL(mlxsw_afk_clear); diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h index c29c045d826d..4a625cdf3e7c 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h +++ b/drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h @@ -33,6 +33,8 @@ enum mlxsw_afk_element { MLXSW_AFK_ELEMENT_IP_TTL_, MLXSW_AFK_ELEMENT_IP_ECN, MLXSW_AFK_ELEMENT_IP_DSCP, + MLXSW_AFK_ELEMENT_VIRT_ROUTER_8_10, + MLXSW_AFK_ELEMENT_VIRT_ROUTER_0_7, MLXSW_AFK_ELEMENT_MAX, }; @@ -87,6 +89,8 @@ static const struct mlxsw_afk_element_info mlxsw_afk_element_infos[] = { MLXSW_AFK_ELEMENT_INFO_U32(IP_TTL_, 0x18, 0, 8), MLXSW_AFK_ELEMENT_INFO_U32(IP_ECN, 0x18, 9, 2), MLXSW_AFK_ELEMENT_INFO_U32(IP_DSCP, 0x18, 11, 6), + MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_8_10, 0x18, 17, 3), + MLXSW_AFK_ELEMENT_INFO_U32(VIRT_ROUTER_0_7, 0x18, 20, 8), MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_96_127, 0x20, 4), MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_64_95, 0x24, 4), MLXSW_AFK_ELEMENT_INFO_BUF(SRC_IP_32_63, 0x28, 4), @@ -188,7 +192,8 @@ struct mlxsw_afk; struct mlxsw_afk_ops { const struct mlxsw_afk_block *blocks; unsigned int blocks_count; - void (*encode_block)(char *block, int block_index, char *output); + void (*encode_block)(char *output, int block_index, char *block); + void (*clear_block)(char *output, int block_index); }; struct mlxsw_afk *mlxsw_afk_create(unsigned int max_blocks, @@ -228,6 +233,8 @@ void mlxsw_afk_values_add_buf(struct mlxsw_afk_element_values *values, void mlxsw_afk_encode(struct mlxsw_afk *mlxsw_afk, struct mlxsw_afk_key_info *key_info, struct mlxsw_afk_element_values *values, - char *key, char *mask, int block_start, int block_end); + char *key, char *mask); +void mlxsw_afk_clear(struct mlxsw_afk *mlxsw_afk, char *key, + int block_start, int block_end); #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c index 6d29dc428608..61f897b40f82 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c +++ b/drivers/net/ethernet/mellanox/mlxsw/core_thermal.c @@ -16,6 +16,15 @@ #define MLXSW_THERMAL_MAX_TEMP 110000 /* 110C */ #define MLXSW_THERMAL_MAX_STATE 10 #define MLXSW_THERMAL_MAX_DUTY 255 +/* Minimum and maximum fan allowed speed in percent: from 20% to 100%. Values + * MLXSW_THERMAL_MAX_STATE + x, where x is between 2 and 10 are used for + * setting fan speed dynamic minimum. For example, if value is set to 14 (40%) + * cooling levels vector will be set to 4, 4, 4, 4, 4, 5, 6, 7, 8, 9, 10 to + * introduce PWM speed in percent: 40, 40, 40, 40, 40, 50, 60. 70, 80, 90, 100. + */ +#define MLXSW_THERMAL_SPEED_MIN (MLXSW_THERMAL_MAX_STATE + 2) +#define MLXSW_THERMAL_SPEED_MAX (MLXSW_THERMAL_MAX_STATE * 2) +#define MLXSW_THERMAL_SPEED_MIN_LEVEL 2 /* 20% */ struct mlxsw_thermal_trip { int type; @@ -68,6 +77,7 @@ struct mlxsw_thermal { const struct mlxsw_bus_info *bus_info; struct thermal_zone_device *tzdev; struct thermal_cooling_device *cdevs[MLXSW_MFCR_PWMS_MAX]; + u8 cooling_levels[MLXSW_THERMAL_MAX_STATE + 1]; struct mlxsw_thermal_trip trips[MLXSW_THERMAL_NUM_TRIPS]; enum thermal_device_mode mode; }; @@ -285,12 +295,51 @@ static int mlxsw_thermal_set_cur_state(struct thermal_cooling_device *cdev, struct mlxsw_thermal *thermal = cdev->devdata; struct device *dev = thermal->bus_info->dev; char mfsc_pl[MLXSW_REG_MFSC_LEN]; - int err, idx; + unsigned long cur_state, i; + int idx; + u8 duty; + int err; idx = mlxsw_get_cooling_device_idx(thermal, cdev); if (idx < 0) return idx; + /* Verify if this request is for changing allowed fan dynamical + * minimum. If it is - update cooling levels accordingly and update + * state, if current state is below the newly requested minimum state. + * For example, if current state is 5, and minimal state is to be + * changed from 4 to 6, thermal->cooling_levels[0 to 5] will be changed + * all from 4 to 6. And state 5 (thermal->cooling_levels[4]) should be + * overwritten. + */ + if (state >= MLXSW_THERMAL_SPEED_MIN && + state <= MLXSW_THERMAL_SPEED_MAX) { + state -= MLXSW_THERMAL_MAX_STATE; + for (i = 0; i <= MLXSW_THERMAL_MAX_STATE; i++) + thermal->cooling_levels[i] = max(state, i); + + mlxsw_reg_mfsc_pack(mfsc_pl, idx, 0); + err = mlxsw_reg_query(thermal->core, MLXSW_REG(mfsc), mfsc_pl); + if (err) + return err; + + duty = mlxsw_reg_mfsc_pwm_duty_cycle_get(mfsc_pl); + cur_state = mlxsw_duty_to_state(duty); + + /* If current fan state is lower than requested dynamical + * minimum, increase fan speed up to dynamical minimum. + */ + if (state < cur_state) + return 0; + + state = cur_state; + } + + if (state > MLXSW_THERMAL_MAX_STATE) + return -EINVAL; + + /* Normalize the state to the valid speed range. */ + state = thermal->cooling_levels[state]; mlxsw_reg_mfsc_pack(mfsc_pl, idx, mlxsw_state_to_duty(state)); err = mlxsw_reg_write(thermal->core, MLXSW_REG(mfsc), mfsc_pl); if (err) { @@ -369,6 +418,11 @@ int mlxsw_thermal_init(struct mlxsw_core *core, } } + /* Initialize cooling levels per PWM state. */ + for (i = 0; i < MLXSW_THERMAL_MAX_STATE; i++) + thermal->cooling_levels[i] = max(MLXSW_THERMAL_SPEED_MIN_LEVEL, + i); + thermal->tzdev = thermal_zone_device_register("mlxsw", MLXSW_THERMAL_NUM_TRIPS, MLXSW_THERMAL_TRIP_MASK, diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci.c b/drivers/net/ethernet/mellanox/mlxsw/pci.c index 5890fdfd62c3..66b8098c6fd2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/pci.c +++ b/drivers/net/ethernet/mellanox/mlxsw/pci.c @@ -1720,7 +1720,6 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { const char *driver_name = pdev->driver->name; struct mlxsw_pci *mlxsw_pci; - bool called_again = false; int err; mlxsw_pci = kzalloc(sizeof(*mlxsw_pci), GFP_KERNEL); @@ -1777,18 +1776,10 @@ static int mlxsw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) mlxsw_pci->bus_info.dev = &pdev->dev; mlxsw_pci->id = id; -again: err = mlxsw_core_bus_device_register(&mlxsw_pci->bus_info, &mlxsw_pci_bus, mlxsw_pci, false, NULL); - /* -EAGAIN is returned in case the FW was updated. FW needs - * a reset, so lets try to call mlxsw_core_bus_device_register() - * again. - */ - if (err == -EAGAIN && !called_again) { - called_again = true; - goto again; - } else if (err) { + if (err) { dev_err(&pdev->dev, "cannot register bus device\n"); goto err_bus_device_register; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h index db3d2790aeec..9b48dffc9f63 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/reg.h +++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h @@ -641,6 +641,10 @@ enum mlxsw_reg_sfn_rec_type { MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC = 0x7, /* Aged-out MAC address on a LAG port. */ MLXSW_REG_SFN_REC_TYPE_AGED_OUT_MAC_LAG = 0x8, + /* Learned unicast tunnel record. */ + MLXSW_REG_SFN_REC_TYPE_LEARNED_UNICAST_TUNNEL = 0xD, + /* Aged-out unicast tunnel record. */ + MLXSW_REG_SFN_REC_TYPE_AGED_OUT_UNICAST_TUNNEL = 0xE, }; /* reg_sfn_rec_type @@ -704,6 +708,66 @@ static inline void mlxsw_reg_sfn_mac_lag_unpack(char *payload, int rec_index, *p_lag_id = mlxsw_reg_sfn_mac_lag_lag_id_get(payload, rec_index); } +/* reg_sfn_uc_tunnel_uip_msb + * When protocol is IPv4, the most significant byte of the underlay IPv4 + * address of the remote VTEP. + * When protocol is IPv6, reserved. + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, sfn, uc_tunnel_uip_msb, MLXSW_REG_SFN_BASE_LEN, 24, + 8, MLXSW_REG_SFN_REC_LEN, 0x08, false); + +enum mlxsw_reg_sfn_uc_tunnel_protocol { + MLXSW_REG_SFN_UC_TUNNEL_PROTOCOL_IPV4, + MLXSW_REG_SFN_UC_TUNNEL_PROTOCOL_IPV6, +}; + +/* reg_sfn_uc_tunnel_protocol + * IP protocol. + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, sfn, uc_tunnel_protocol, MLXSW_REG_SFN_BASE_LEN, 27, + 1, MLXSW_REG_SFN_REC_LEN, 0x0C, false); + +/* reg_sfn_uc_tunnel_uip_lsb + * When protocol is IPv4, the least significant bytes of the underlay + * IPv4 address of the remote VTEP. + * When protocol is IPv6, ipv6_id to be queried from TNIPSD. + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, sfn, uc_tunnel_uip_lsb, MLXSW_REG_SFN_BASE_LEN, 0, + 24, MLXSW_REG_SFN_REC_LEN, 0x0C, false); + +enum mlxsw_reg_sfn_tunnel_port { + MLXSW_REG_SFN_TUNNEL_PORT_NVE, + MLXSW_REG_SFN_TUNNEL_PORT_VPLS, + MLXSW_REG_SFN_TUNNEL_FLEX_TUNNEL0, + MLXSW_REG_SFN_TUNNEL_FLEX_TUNNEL1, +}; + +/* reg_sfn_uc_tunnel_port + * Tunnel port. + * Reserved on Spectrum. + * Access: RO + */ +MLXSW_ITEM32_INDEXED(reg, sfn, tunnel_port, MLXSW_REG_SFN_BASE_LEN, 0, 4, + MLXSW_REG_SFN_REC_LEN, 0x10, false); + +static inline void +mlxsw_reg_sfn_uc_tunnel_unpack(char *payload, int rec_index, char *mac, + u16 *p_fid, u32 *p_uip, + enum mlxsw_reg_sfn_uc_tunnel_protocol *p_proto) +{ + u32 uip_msb, uip_lsb; + + mlxsw_reg_sfn_rec_mac_memcpy_from(payload, rec_index, mac); + *p_fid = mlxsw_reg_sfn_mac_fid_get(payload, rec_index); + uip_msb = mlxsw_reg_sfn_uc_tunnel_uip_msb_get(payload, rec_index); + uip_lsb = mlxsw_reg_sfn_uc_tunnel_uip_lsb_get(payload, rec_index); + *p_uip = uip_msb << 24 | uip_lsb; + *p_proto = mlxsw_reg_sfn_uc_tunnel_protocol_get(payload, rec_index); +} + /* SPMS - Switch Port MSTP/RSTP State Register * ------------------------------------------- * Configures the spanning tree state of a physical port. @@ -2431,6 +2495,43 @@ static inline void mlxsw_reg_pefa_unpack(char *payload, bool *p_a) *p_a = mlxsw_reg_pefa_a_get(payload); } +/* PEMRBT - Policy-Engine Multicast Router Binding Table Register + * -------------------------------------------------------------- + * This register is used for binding Multicast router to an ACL group + * that serves the MC router. + * This register is not supported by SwitchX/-2 and Spectrum. + */ +#define MLXSW_REG_PEMRBT_ID 0x3014 +#define MLXSW_REG_PEMRBT_LEN 0x14 + +MLXSW_REG_DEFINE(pemrbt, MLXSW_REG_PEMRBT_ID, MLXSW_REG_PEMRBT_LEN); + +enum mlxsw_reg_pemrbt_protocol { + MLXSW_REG_PEMRBT_PROTO_IPV4, + MLXSW_REG_PEMRBT_PROTO_IPV6, +}; + +/* reg_pemrbt_protocol + * Access: Index + */ +MLXSW_ITEM32(reg, pemrbt, protocol, 0x00, 0, 1); + +/* reg_pemrbt_group_id + * ACL group identifier. + * Range 0..cap_max_acl_groups-1 + * Access: RW + */ +MLXSW_ITEM32(reg, pemrbt, group_id, 0x10, 0, 16); + +static inline void +mlxsw_reg_pemrbt_pack(char *payload, enum mlxsw_reg_pemrbt_protocol protocol, + u16 group_id) +{ + MLXSW_REG_ZERO(pemrbt, payload); + mlxsw_reg_pemrbt_protocol_set(payload, protocol); + mlxsw_reg_pemrbt_group_id_set(payload, group_id); +} + /* PTCE-V2 - Policy-Engine TCAM Entry Register Version 2 * ----------------------------------------------------- * This register is used for accessing rules within a TCAM region. @@ -2642,7 +2743,7 @@ mlxsw_reg_perpt_pack(char *payload, u8 erpt_bank, u8 erpt_index, mlxsw_reg_perpt_erpt_bank_set(payload, erpt_bank); mlxsw_reg_perpt_erpt_index_set(payload, erpt_index); mlxsw_reg_perpt_key_size_set(payload, key_size); - mlxsw_reg_perpt_bf_bypass_set(payload, true); + mlxsw_reg_perpt_bf_bypass_set(payload, false); mlxsw_reg_perpt_erp_id_set(payload, erp_id); mlxsw_reg_perpt_erpt_base_bank_set(payload, erpt_base_bank); mlxsw_reg_perpt_erpt_base_index_set(payload, erpt_base_index); @@ -2834,8 +2935,9 @@ static inline void mlxsw_reg_ptce3_pack(char *payload, bool valid, u32 priority, const char *tcam_region_info, const char *key, u8 erp_id, - bool large_exists, u32 lkey_id, - u32 action_pointer) + u16 delta_start, u8 delta_mask, + u8 delta_value, bool large_exists, + u32 lkey_id, u32 action_pointer) { MLXSW_REG_ZERO(ptce3, payload); mlxsw_reg_ptce3_v_set(payload, valid); @@ -2844,6 +2946,9 @@ static inline void mlxsw_reg_ptce3_pack(char *payload, bool valid, mlxsw_reg_ptce3_tcam_region_info_memcpy_to(payload, tcam_region_info); mlxsw_reg_ptce3_flex2_key_blocks_memcpy_to(payload, key); mlxsw_reg_ptce3_erp_id_set(payload, erp_id); + mlxsw_reg_ptce3_delta_start_set(payload, delta_start); + mlxsw_reg_ptce3_delta_mask_set(payload, delta_mask); + mlxsw_reg_ptce3_delta_value_set(payload, delta_value); mlxsw_reg_ptce3_large_exists_set(payload, large_exists); mlxsw_reg_ptce3_large_entry_key_id_set(payload, lkey_id); mlxsw_reg_ptce3_action_pointer_set(payload, action_pointer); @@ -2901,7 +3006,7 @@ static inline void mlxsw_reg_percr_pack(char *payload, u16 region_id) mlxsw_reg_percr_region_id_set(payload, region_id); mlxsw_reg_percr_atcam_ignore_prune_set(payload, false); mlxsw_reg_percr_ctcam_ignore_prune_set(payload, false); - mlxsw_reg_percr_bf_bypass_set(payload, true); + mlxsw_reg_percr_bf_bypass_set(payload, false); } /* PERERP - Policy-Engine Region eRP Register @@ -2990,6 +3095,72 @@ static inline void mlxsw_reg_pererp_pack(char *payload, u16 region_id, mlxsw_reg_pererp_master_rp_id_set(payload, master_rp_id); } +/* PEABFE - Policy-Engine Algorithmic Bloom Filter Entries Register + * ---------------------------------------------------------------- + * This register configures the Bloom filter entries. + */ +#define MLXSW_REG_PEABFE_ID 0x3022 +#define MLXSW_REG_PEABFE_BASE_LEN 0x10 +#define MLXSW_REG_PEABFE_BF_REC_LEN 0x4 +#define MLXSW_REG_PEABFE_BF_REC_MAX_COUNT 256 +#define MLXSW_REG_PEABFE_LEN (MLXSW_REG_PEABFE_BASE_LEN + \ + MLXSW_REG_PEABFE_BF_REC_LEN * \ + MLXSW_REG_PEABFE_BF_REC_MAX_COUNT) + +MLXSW_REG_DEFINE(peabfe, MLXSW_REG_PEABFE_ID, MLXSW_REG_PEABFE_LEN); + +/* reg_peabfe_size + * Number of BF entries to be updated. + * Range 1..256 + * Access: Op + */ +MLXSW_ITEM32(reg, peabfe, size, 0x00, 0, 9); + +/* reg_peabfe_bf_entry_state + * Bloom filter state + * 0 - Clear + * 1 - Set + * Access: RW + */ +MLXSW_ITEM32_INDEXED(reg, peabfe, bf_entry_state, + MLXSW_REG_PEABFE_BASE_LEN, 31, 1, + MLXSW_REG_PEABFE_BF_REC_LEN, 0x00, false); + +/* reg_peabfe_bf_entry_bank + * Bloom filter bank ID + * Range 0..cap_max_erp_table_banks-1 + * Access: Index + */ +MLXSW_ITEM32_INDEXED(reg, peabfe, bf_entry_bank, + MLXSW_REG_PEABFE_BASE_LEN, 24, 4, + MLXSW_REG_PEABFE_BF_REC_LEN, 0x00, false); + +/* reg_peabfe_bf_entry_index + * Bloom filter entry index + * Range 0..2^cap_max_bf_log-1 + * Access: Index + */ +MLXSW_ITEM32_INDEXED(reg, peabfe, bf_entry_index, + MLXSW_REG_PEABFE_BASE_LEN, 0, 24, + MLXSW_REG_PEABFE_BF_REC_LEN, 0x00, false); + +static inline void mlxsw_reg_peabfe_pack(char *payload) +{ + MLXSW_REG_ZERO(peabfe, payload); +} + +static inline void mlxsw_reg_peabfe_rec_pack(char *payload, int rec_index, + u8 state, u8 bank, u32 bf_index) +{ + u8 num_rec = mlxsw_reg_peabfe_size_get(payload); + + if (rec_index >= num_rec) + mlxsw_reg_peabfe_size_set(payload, rec_index + 1); + mlxsw_reg_peabfe_bf_entry_state_set(payload, rec_index, state); + mlxsw_reg_peabfe_bf_entry_bank_set(payload, rec_index, bank); + mlxsw_reg_peabfe_bf_entry_index_set(payload, rec_index, bf_index); +} + /* IEDR - Infrastructure Entry Delete Register * ---------------------------------------------------- * This register is used for deleting entries from the entry tables. @@ -4231,8 +4402,11 @@ MLXSW_ITEM32(reg, ppcnt, pnat, 0x00, 14, 2); enum mlxsw_reg_ppcnt_grp { MLXSW_REG_PPCNT_IEEE_8023_CNT = 0x0, + MLXSW_REG_PPCNT_RFC_2863_CNT = 0x1, MLXSW_REG_PPCNT_RFC_2819_CNT = 0x2, + MLXSW_REG_PPCNT_RFC_3635_CNT = 0x3, MLXSW_REG_PPCNT_EXT_CNT = 0x5, + MLXSW_REG_PPCNT_DISCARD_CNT = 0x6, MLXSW_REG_PPCNT_PRIO_CNT = 0x10, MLXSW_REG_PPCNT_TC_CNT = 0x11, MLXSW_REG_PPCNT_TC_CONG_TC = 0x13, @@ -4247,6 +4421,7 @@ enum mlxsw_reg_ppcnt_grp { * 0x2: RFC 2819 Counters * 0x3: RFC 3635 Counters * 0x5: Ethernet Extended Counters + * 0x6: Ethernet Discard Counters * 0x8: Link Level Retransmission Counters * 0x10: Per Priority Counters * 0x11: Per Traffic Class Counters @@ -4390,8 +4565,46 @@ MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_received, MLXSW_ITEM64(reg, ppcnt, a_pause_mac_ctrl_frames_transmitted, MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x90, 0, 64); +/* Ethernet RFC 2863 Counter Group */ + +/* reg_ppcnt_if_in_discards + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, if_in_discards, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x10, 0, 64); + +/* reg_ppcnt_if_out_discards + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, if_out_discards, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x38, 0, 64); + +/* reg_ppcnt_if_out_errors + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, if_out_errors, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x40, 0, 64); + /* Ethernet RFC 2819 Counter Group */ +/* reg_ppcnt_ether_stats_undersize_pkts + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ether_stats_undersize_pkts, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x30, 0, 64); + +/* reg_ppcnt_ether_stats_oversize_pkts + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ether_stats_oversize_pkts, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x38, 0, 64); + +/* reg_ppcnt_ether_stats_fragments + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ether_stats_fragments, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x40, 0, 64); + /* reg_ppcnt_ether_stats_pkts64octets * Access: RO */ @@ -4452,6 +4665,32 @@ MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts4096to8191octets, MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts8192to10239octets, MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0xA0, 0, 64); +/* Ethernet RFC 3635 Counter Group */ + +/* reg_ppcnt_dot3stats_fcs_errors + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, dot3stats_fcs_errors, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64); + +/* reg_ppcnt_dot3stats_symbol_errors + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, dot3stats_symbol_errors, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x60, 0, 64); + +/* reg_ppcnt_dot3control_in_unknown_opcodes + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, dot3control_in_unknown_opcodes, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x68, 0, 64); + +/* reg_ppcnt_dot3in_pause_frames + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, dot3in_pause_frames, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x70, 0, 64); + /* Ethernet Extended Counter Group Counters */ /* reg_ppcnt_ecn_marked @@ -4460,6 +4699,80 @@ MLXSW_ITEM64(reg, ppcnt, ether_stats_pkts8192to10239octets, MLXSW_ITEM64(reg, ppcnt, ecn_marked, MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64); +/* Ethernet Discard Counter Group Counters */ + +/* reg_ppcnt_ingress_general + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ingress_general, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x00, 0, 64); + +/* reg_ppcnt_ingress_policy_engine + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ingress_policy_engine, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x08, 0, 64); + +/* reg_ppcnt_ingress_vlan_membership + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ingress_vlan_membership, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x10, 0, 64); + +/* reg_ppcnt_ingress_tag_frame_type + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ingress_tag_frame_type, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x18, 0, 64); + +/* reg_ppcnt_egress_vlan_membership + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, egress_vlan_membership, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x20, 0, 64); + +/* reg_ppcnt_loopback_filter + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, loopback_filter, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x28, 0, 64); + +/* reg_ppcnt_egress_general + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, egress_general, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x30, 0, 64); + +/* reg_ppcnt_egress_hoq + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, egress_hoq, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x40, 0, 64); + +/* reg_ppcnt_egress_policy_engine + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, egress_policy_engine, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x50, 0, 64); + +/* reg_ppcnt_ingress_tx_link_down + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, ingress_tx_link_down, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x58, 0, 64); + +/* reg_ppcnt_egress_stp_filter + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, egress_stp_filter, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x60, 0, 64); + +/* reg_ppcnt_egress_sll + * Access: RO + */ +MLXSW_ITEM64(reg, ppcnt, egress_sll, + MLXSW_REG_PPCNT_COUNTERS_OFFSET + 0x70, 0, 64); + /* Ethernet Per Priority Group Counters */ /* reg_ppcnt_rx_octets @@ -4862,6 +5175,7 @@ enum mlxsw_reg_htgt_trap_group { MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT, MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD, MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND, + MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR, }; /* reg_htgt_trap_group @@ -9357,8 +9671,10 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = { MLXSW_REG(ppbs), MLXSW_REG(prcr), MLXSW_REG(pefa), + MLXSW_REG(pemrbt), MLXSW_REG(ptce2), MLXSW_REG(perpt), + MLXSW_REG(peabfe), MLXSW_REG(perar), MLXSW_REG(ptce3), MLXSW_REG(percr), diff --git a/drivers/net/ethernet/mellanox/mlxsw/resources.h b/drivers/net/ethernet/mellanox/mlxsw/resources.h index 99b341539870..b8b3a01c2a9e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/resources.h +++ b/drivers/net/ethernet/mellanox/mlxsw/resources.h @@ -41,6 +41,7 @@ enum mlxsw_res_id { MLXSW_RES_ID_ACL_ERPT_ENTRIES_4KB, MLXSW_RES_ID_ACL_ERPT_ENTRIES_8KB, MLXSW_RES_ID_ACL_ERPT_ENTRIES_12KB, + MLXSW_RES_ID_ACL_MAX_BF_LOG, MLXSW_RES_ID_MAX_CPU_POLICERS, MLXSW_RES_ID_MAX_VRS, MLXSW_RES_ID_MAX_RIFS, @@ -93,6 +94,7 @@ static u16 mlxsw_res_ids[] = { [MLXSW_RES_ID_ACL_ERPT_ENTRIES_4KB] = 0x2951, [MLXSW_RES_ID_ACL_ERPT_ENTRIES_8KB] = 0x2952, [MLXSW_RES_ID_ACL_ERPT_ENTRIES_12KB] = 0x2953, + [MLXSW_RES_ID_ACL_MAX_BF_LOG] = 0x2960, [MLXSW_RES_ID_MAX_CPU_POLICERS] = 0x2A13, [MLXSW_RES_ID_MAX_VRS] = 0x2C01, [MLXSW_RES_ID_MAX_RIFS] = 0x2C02, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index f84b9c02fcc5..eed1045e4d96 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -45,8 +45,8 @@ #define MLXSW_SP_FWREV_MINOR_TO_BRANCH(minor) ((minor) / 100) #define MLXSW_SP1_FWREV_MAJOR 13 -#define MLXSW_SP1_FWREV_MINOR 1703 -#define MLXSW_SP1_FWREV_SUBMINOR 4 +#define MLXSW_SP1_FWREV_MINOR 1910 +#define MLXSW_SP1_FWREV_SUBMINOR 622 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = { @@ -65,6 +65,13 @@ static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum"; static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2"; static const char mlxsw_sp_driver_version[] = "1.0"; +static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = { + 0xff, 0xff, 0xff, 0xff, 0xfc, 0x00 +}; +static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = { + 0xff, 0xff, 0xff, 0xff, 0xf0, 0x00 +}; + /* tx_hdr_version * Tx header version. * Must be set to 1. @@ -323,6 +330,7 @@ static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev; const struct mlxsw_fw_rev *req_rev = mlxsw_sp->req_rev; const char *fw_filename = mlxsw_sp->fw_filename; + union devlink_param_value value; const struct firmware *firmware; int err; @@ -330,6 +338,15 @@ static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp) if (!req_rev || !fw_filename) return 0; + /* Don't check if devlink 'fw_load_policy' param is 'flash' */ + err = devlink_param_driverinit_value_get(priv_to_devlink(mlxsw_sp->core), + DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, + &value); + if (err) + return err; + if (value.vu8 == DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH) + return 0; + /* Validate driver & FW are compatible */ if (rev->major != req_rev->major) { WARN(1, "Mismatch in major FW version [%d:%d] is never expected; Please contact support\n", @@ -1123,22 +1140,40 @@ int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, return 0; } -static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port) +static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port, + bool flush_default) { struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp; list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp, - &mlxsw_sp_port->vlans_list, list) - mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); + &mlxsw_sp_port->vlans_list, list) { + if (!flush_default && + mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID) + continue; + mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); + } +} + +static void +mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) +{ + if (mlxsw_sp_port_vlan->bridge_port) + mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); + else if (mlxsw_sp_port_vlan->fid) + mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); } -static struct mlxsw_sp_port_vlan * +struct mlxsw_sp_port_vlan * mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) { struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; - bool untagged = vid == 1; + bool untagged = vid == MLXSW_SP_DEFAULT_VID; int err; + mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); + if (mlxsw_sp_port_vlan) + return ERR_PTR(-EEXIST); + err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged); if (err) return ERR_PTR(err); @@ -1150,7 +1185,6 @@ mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) } mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port; - mlxsw_sp_port_vlan->ref_count = 1; mlxsw_sp_port_vlan->vid = vid; list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list); @@ -1161,46 +1195,17 @@ err_port_vlan_alloc: return ERR_PTR(err); } -static void -mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) +void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) { struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; u16 vid = mlxsw_sp_port_vlan->vid; + mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan); list_del(&mlxsw_sp_port_vlan->list); kfree(mlxsw_sp_port_vlan); mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); } -struct mlxsw_sp_port_vlan * -mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid) -{ - struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; - - mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); - if (mlxsw_sp_port_vlan) { - mlxsw_sp_port_vlan->ref_count++; - return mlxsw_sp_port_vlan; - } - - return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid); -} - -void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) -{ - struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; - - if (--mlxsw_sp_port_vlan->ref_count != 0) - return; - - if (mlxsw_sp_port_vlan->bridge_port) - mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); - else if (fid) - mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); - - mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); -} - static int mlxsw_sp_port_add_vid(struct net_device *dev, __be16 __always_unused proto, u16 vid) { @@ -1212,7 +1217,7 @@ static int mlxsw_sp_port_add_vid(struct net_device *dev, if (!vid) return 0; - return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid)); + return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid)); } static int mlxsw_sp_port_kill_vid(struct net_device *dev, @@ -1230,7 +1235,7 @@ static int mlxsw_sp_port_kill_vid(struct net_device *dev, mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); if (!mlxsw_sp_port_vlan) return 0; - mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); + mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); return 0; } @@ -1342,7 +1347,6 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry; __be16 protocol = f->common.protocol; const struct tc_action *a; - LIST_HEAD(actions); int err; if (!tcf_exts_has_one_action(f->exts)) { @@ -1881,8 +1885,38 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = { #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats) +static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2863_stats[] = { + { + .str = "if_in_discards", + .getter = mlxsw_reg_ppcnt_if_in_discards_get, + }, + { + .str = "if_out_discards", + .getter = mlxsw_reg_ppcnt_if_out_discards_get, + }, + { + .str = "if_out_errors", + .getter = mlxsw_reg_ppcnt_if_out_errors_get, + }, +}; + +#define MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN \ + ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2863_stats) + static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = { { + .str = "ether_stats_undersize_pkts", + .getter = mlxsw_reg_ppcnt_ether_stats_undersize_pkts_get, + }, + { + .str = "ether_stats_oversize_pkts", + .getter = mlxsw_reg_ppcnt_ether_stats_oversize_pkts_get, + }, + { + .str = "ether_stats_fragments", + .getter = mlxsw_reg_ppcnt_ether_stats_fragments_get, + }, + { .str = "ether_pkts64octets", .getter = mlxsw_reg_ppcnt_ether_stats_pkts64octets_get, }, @@ -1927,6 +1961,82 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_2819_stats[] = { #define MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN \ ARRAY_SIZE(mlxsw_sp_port_hw_rfc_2819_stats) +static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_rfc_3635_stats[] = { + { + .str = "dot3stats_fcs_errors", + .getter = mlxsw_reg_ppcnt_dot3stats_fcs_errors_get, + }, + { + .str = "dot3stats_symbol_errors", + .getter = mlxsw_reg_ppcnt_dot3stats_symbol_errors_get, + }, + { + .str = "dot3control_in_unknown_opcodes", + .getter = mlxsw_reg_ppcnt_dot3control_in_unknown_opcodes_get, + }, + { + .str = "dot3in_pause_frames", + .getter = mlxsw_reg_ppcnt_dot3in_pause_frames_get, + }, +}; + +#define MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN \ + ARRAY_SIZE(mlxsw_sp_port_hw_rfc_3635_stats) + +static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_discard_stats[] = { + { + .str = "discard_ingress_general", + .getter = mlxsw_reg_ppcnt_ingress_general_get, + }, + { + .str = "discard_ingress_policy_engine", + .getter = mlxsw_reg_ppcnt_ingress_policy_engine_get, + }, + { + .str = "discard_ingress_vlan_membership", + .getter = mlxsw_reg_ppcnt_ingress_vlan_membership_get, + }, + { + .str = "discard_ingress_tag_frame_type", + .getter = mlxsw_reg_ppcnt_ingress_tag_frame_type_get, + }, + { + .str = "discard_egress_vlan_membership", + .getter = mlxsw_reg_ppcnt_egress_vlan_membership_get, + }, + { + .str = "discard_loopback_filter", + .getter = mlxsw_reg_ppcnt_loopback_filter_get, + }, + { + .str = "discard_egress_general", + .getter = mlxsw_reg_ppcnt_egress_general_get, + }, + { + .str = "discard_egress_hoq", + .getter = mlxsw_reg_ppcnt_egress_hoq_get, + }, + { + .str = "discard_egress_policy_engine", + .getter = mlxsw_reg_ppcnt_egress_policy_engine_get, + }, + { + .str = "discard_ingress_tx_link_down", + .getter = mlxsw_reg_ppcnt_ingress_tx_link_down_get, + }, + { + .str = "discard_egress_stp_filter", + .getter = mlxsw_reg_ppcnt_egress_stp_filter_get, + }, + { + .str = "discard_egress_sll", + .getter = mlxsw_reg_ppcnt_egress_sll_get, + }, +}; + +#define MLXSW_SP_PORT_HW_DISCARD_STATS_LEN \ + ARRAY_SIZE(mlxsw_sp_port_hw_discard_stats) + static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = { { .str = "rx_octets_prio", @@ -1979,7 +2089,10 @@ static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = { #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats) #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \ + MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN + \ MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN + \ + MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN + \ + MLXSW_SP_PORT_HW_DISCARD_STATS_LEN + \ (MLXSW_SP_PORT_HW_PRIO_STATS_LEN * \ IEEE_8021QAZ_MAX_TCS) + \ (MLXSW_SP_PORT_HW_TC_STATS_LEN * \ @@ -2020,12 +2133,31 @@ static void mlxsw_sp_port_get_strings(struct net_device *dev, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } + + for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; i++) { + memcpy(p, mlxsw_sp_port_hw_rfc_2863_stats[i].str, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; i++) { memcpy(p, mlxsw_sp_port_hw_rfc_2819_stats[i].str, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } + for (i = 0; i < MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; i++) { + memcpy(p, mlxsw_sp_port_hw_rfc_3635_stats[i].str, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + + for (i = 0; i < MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; i++) { + memcpy(p, mlxsw_sp_port_hw_discard_stats[i].str, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) mlxsw_sp_port_get_prio_strings(&p, i); @@ -2068,10 +2200,22 @@ mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats, *p_hw_stats = mlxsw_sp_port_hw_stats; *p_len = MLXSW_SP_PORT_HW_STATS_LEN; break; + case MLXSW_REG_PPCNT_RFC_2863_CNT: + *p_hw_stats = mlxsw_sp_port_hw_rfc_2863_stats; + *p_len = MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; + break; case MLXSW_REG_PPCNT_RFC_2819_CNT: *p_hw_stats = mlxsw_sp_port_hw_rfc_2819_stats; *p_len = MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; break; + case MLXSW_REG_PPCNT_RFC_3635_CNT: + *p_hw_stats = mlxsw_sp_port_hw_rfc_3635_stats; + *p_len = MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; + break; + case MLXSW_REG_PPCNT_DISCARD_CNT: + *p_hw_stats = mlxsw_sp_port_hw_discard_stats; + *p_len = MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; + break; case MLXSW_REG_PPCNT_PRIO_CNT: *p_hw_stats = mlxsw_sp_port_hw_prio_stats; *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN; @@ -2121,11 +2265,26 @@ static void mlxsw_sp_port_get_stats(struct net_device *dev, data, data_index); data_index = MLXSW_SP_PORT_HW_STATS_LEN; + /* RFC 2863 Counters */ + __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2863_CNT, 0, + data, data_index); + data_index += MLXSW_SP_PORT_HW_RFC_2863_STATS_LEN; + /* RFC 2819 Counters */ __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_2819_CNT, 0, data, data_index); data_index += MLXSW_SP_PORT_HW_RFC_2819_STATS_LEN; + /* RFC 3635 Counters */ + __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_RFC_3635_CNT, 0, + data, data_index); + data_index += MLXSW_SP_PORT_HW_RFC_3635_STATS_LEN; + + /* Discard Counters */ + __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_DISCARD_CNT, 0, + data, data_index); + data_index += MLXSW_SP_PORT_HW_DISCARD_STATS_LEN; + /* Per-Priority Counters */ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i, @@ -2892,7 +3051,7 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, mlxsw_sp_port->dev = dev; mlxsw_sp_port->mlxsw_sp = mlxsw_sp; mlxsw_sp_port->local_port = local_port; - mlxsw_sp_port->pvid = 1; + mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID; mlxsw_sp_port->split = split; mlxsw_sp_port->mapping.module = module; mlxsw_sp_port->mapping.width = width; @@ -3031,13 +3190,22 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, goto err_port_nve_init; } - mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1); + err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n", + mlxsw_sp_port->local_port); + goto err_port_pvid_set; + } + + mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, + MLXSW_SP_DEFAULT_VID); if (IS_ERR(mlxsw_sp_port_vlan)) { dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n", mlxsw_sp_port->local_port); err = PTR_ERR(mlxsw_sp_port_vlan); - goto err_port_vlan_get; + goto err_port_vlan_create; } + mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan; mlxsw_sp_port_switchdev_init(mlxsw_sp_port); mlxsw_sp->ports[local_port] = mlxsw_sp_port; @@ -3057,8 +3225,9 @@ static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port, err_register_netdev: mlxsw_sp->ports[local_port] = NULL; mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); - mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); -err_port_vlan_get: + mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); +err_port_vlan_create: +err_port_pvid_set: mlxsw_sp_port_nve_fini(mlxsw_sp_port); err_port_nve_init: mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); @@ -3099,7 +3268,7 @@ static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port) unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */ mlxsw_sp->ports[local_port] = NULL; mlxsw_sp_port_switchdev_fini(mlxsw_sp_port); - mlxsw_sp_port_vlan_flush(mlxsw_sp_port); + mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true); mlxsw_sp_port_nve_fini(mlxsw_sp_port); mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port); mlxsw_sp_port_fids_fini(mlxsw_sp_port); @@ -3394,10 +3563,10 @@ static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port, return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); } -static void mlxsw_sp_rx_listener_mr_mark_func(struct sk_buff *skb, +static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb, u8 local_port, void *priv) { - skb->offload_mr_fwd_mark = 1; + skb->offload_l3_fwd_mark = 1; skb->offload_fwd_mark = 1; return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv); } @@ -3445,8 +3614,8 @@ out: MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \ _is_ctrl, SP_##_trap_group, DISCARD) -#define MLXSW_SP_RXL_MR_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ - MLXSW_RXL(mlxsw_sp_rx_listener_mr_mark_func, _trap_id, _action, \ +#define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl) \ + MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \ _is_ctrl, SP_##_trap_group, DISCARD) #define MLXSW_SP_EVENTL(_func, _trap_id) \ @@ -3479,7 +3648,7 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { /* L3 traps */ MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false), MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false), - MLXSW_SP_RXL_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false), + MLXSW_SP_RXL_L3_MARK(LBERROR, MIRROR_TO_CPU, LBERROR, false), MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false), MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP, false), @@ -3523,7 +3692,7 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = { MLXSW_SP_RXL_MARK(IPV6_PIM, TRAP_TO_CPU, PIM, false), MLXSW_SP_RXL_MARK(RPF, TRAP_TO_CPU, RPF, false), MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false), - MLXSW_SP_RXL_MR_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), + MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false), /* NVE traps */ MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, ARP, false), MLXSW_SP_RXL_NO_MARK(NVE_DECAP_ARP, TRAP_TO_CPU, ARP, false), @@ -3554,6 +3723,7 @@ static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core) case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF: case MLXSW_REG_HTGT_TRAP_GROUP_SP_PIM: case MLXSW_REG_HTGT_TRAP_GROUP_SP_RPF: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: rate = 128; burst_size = 7; break; @@ -3639,6 +3809,7 @@ static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core) case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP: case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE: case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST: + case MLXSW_REG_HTGT_TRAP_GROUP_SP_LBERROR: priority = 1; tc = 1; break; @@ -3841,6 +4012,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, goto err_nve_init; } + err = mlxsw_sp_acl_init(mlxsw_sp); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); + goto err_acl_init; + } + err = mlxsw_sp_router_init(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n"); @@ -3858,12 +4035,6 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, goto err_netdev_notifier; } - err = mlxsw_sp_acl_init(mlxsw_sp); - if (err) { - dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n"); - goto err_acl_init; - } - err = mlxsw_sp_dpipe_init(mlxsw_sp); if (err) { dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n"); @@ -3881,12 +4052,12 @@ static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core, err_ports_create: mlxsw_sp_dpipe_fini(mlxsw_sp); err_dpipe_init: - mlxsw_sp_acl_fini(mlxsw_sp); -err_acl_init: unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); err_netdev_notifier: mlxsw_sp_router_fini(mlxsw_sp); err_router_init: + mlxsw_sp_acl_fini(mlxsw_sp); +err_acl_init: mlxsw_sp_nve_fini(mlxsw_sp); err_nve_init: mlxsw_sp_afa_fini(mlxsw_sp); @@ -3922,6 +4093,7 @@ static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops; mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops; mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr; + mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask; return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); } @@ -3937,6 +4109,7 @@ static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core, mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops; mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops; mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr; + mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask; return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info); } @@ -3947,9 +4120,9 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) mlxsw_sp_ports_remove(mlxsw_sp); mlxsw_sp_dpipe_fini(mlxsw_sp); - mlxsw_sp_acl_fini(mlxsw_sp); unregister_netdevice_notifier(&mlxsw_sp->netdevice_nb); mlxsw_sp_router_fini(mlxsw_sp); + mlxsw_sp_acl_fini(mlxsw_sp); mlxsw_sp_nve_fini(mlxsw_sp); mlxsw_sp_afa_fini(mlxsw_sp); mlxsw_sp_counter_pool_fini(mlxsw_sp); @@ -3962,16 +4135,20 @@ static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core) mlxsw_sp_kvdl_fini(mlxsw_sp); } +/* Per-FID flood tables are used for both "true" 802.1D FIDs and emulated + * 802.1Q FIDs + */ +#define MLXSW_SP_FID_FLOOD_TABLE_SIZE (MLXSW_SP_FID_8021D_MAX + \ + VLAN_VID_MASK - 1) + static const struct mlxsw_config_profile mlxsw_sp1_config_profile = { .used_max_mid = 1, .max_mid = MLXSW_SP_MID_MAX, .used_flood_tables = 1, .used_flood_mode = 1, .flood_mode = 3, - .max_fid_offset_flood_tables = 3, - .fid_offset_flood_table_size = VLAN_N_VID - 1, .max_fid_flood_tables = 3, - .fid_flood_table_size = MLXSW_SP_FID_8021D_MAX, + .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, .used_max_ib_mc = 1, .max_ib_mc = 0, .used_max_pkey = 1, @@ -3994,10 +4171,8 @@ static const struct mlxsw_config_profile mlxsw_sp2_config_profile = { .used_flood_tables = 1, .used_flood_mode = 1, .flood_mode = 3, - .max_fid_offset_flood_tables = 3, - .fid_offset_flood_table_size = VLAN_N_VID - 1, .max_fid_flood_tables = 3, - .fid_flood_table_size = MLXSW_SP_FID_8021D_MAX, + .fid_flood_table_size = MLXSW_SP_FID_FLOOD_TABLE_SIZE, .used_max_ib_mc = 1, .max_ib_mc = 0, .used_max_pkey = 1, @@ -4177,6 +4352,52 @@ static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core, return 0; } +static int +mlxsw_sp_devlink_param_fw_load_policy_validate(struct devlink *devlink, u32 id, + union devlink_param_value val, + struct netlink_ext_ack *extack) +{ + if ((val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER) && + (val.vu8 != DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_FLASH)) { + NL_SET_ERR_MSG_MOD(extack, "'fw_load_policy' must be 'driver' or 'flash'"); + return -EINVAL; + } + + return 0; +} + +static const struct devlink_param mlxsw_sp_devlink_params[] = { + DEVLINK_PARAM_GENERIC(FW_LOAD_POLICY, + BIT(DEVLINK_PARAM_CMODE_DRIVERINIT), + NULL, NULL, + mlxsw_sp_devlink_param_fw_load_policy_validate), +}; + +static int mlxsw_sp_params_register(struct mlxsw_core *mlxsw_core) +{ + struct devlink *devlink = priv_to_devlink(mlxsw_core); + union devlink_param_value value; + int err; + + err = devlink_params_register(devlink, mlxsw_sp_devlink_params, + ARRAY_SIZE(mlxsw_sp_devlink_params)); + if (err) + return err; + + value.vu8 = DEVLINK_PARAM_FW_LOAD_POLICY_VALUE_DRIVER; + devlink_param_driverinit_value_set(devlink, + DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY, + value); + return 0; +} + +static void mlxsw_sp_params_unregister(struct mlxsw_core *mlxsw_core) +{ + devlink_params_unregister(priv_to_devlink(mlxsw_core), + mlxsw_sp_devlink_params, + ARRAY_SIZE(mlxsw_sp_devlink_params)); +} + static struct mlxsw_driver mlxsw_sp1_driver = { .kind = mlxsw_sp1_driver_name, .priv_size = sizeof(struct mlxsw_sp), @@ -4198,6 +4419,8 @@ static struct mlxsw_driver mlxsw_sp1_driver = { .txhdr_construct = mlxsw_sp_txhdr_construct, .resources_register = mlxsw_sp1_resources_register, .kvd_sizes_get = mlxsw_sp_kvd_sizes_get, + .params_register = mlxsw_sp_params_register, + .params_unregister = mlxsw_sp_params_unregister, .txhdr_len = MLXSW_TXHDR_LEN, .profile = &mlxsw_sp1_config_profile, .res_query_enabled = true, @@ -4223,6 +4446,8 @@ static struct mlxsw_driver mlxsw_sp2_driver = { .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get, .txhdr_construct = mlxsw_sp_txhdr_construct, .resources_register = mlxsw_sp2_resources_register, + .params_register = mlxsw_sp_params_register, + .params_unregister = mlxsw_sp_params_unregister, .txhdr_len = MLXSW_TXHDR_LEN, .profile = &mlxsw_sp2_config_profile, .res_query_enabled = true, @@ -4298,6 +4523,25 @@ void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port) dev_put(mlxsw_sp_port->dev); } +static void +mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port, + struct net_device *lag_dev) +{ + struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev); + struct net_device *upper_dev; + struct list_head *iter; + + if (netif_is_bridge_port(lag_dev)) + mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev); + + netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) { + if (!netif_is_bridge_port(upper_dev)) + continue; + br_dev = netdev_master_upper_dev_get(upper_dev); + mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev); + } +} + static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id) { char sldr_pl[MLXSW_REG_SLDR_LEN]; @@ -4425,7 +4669,6 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, struct net_device *lag_dev) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; - struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; struct mlxsw_sp_upper *lag; u16 lag_id; u8 port_index; @@ -4459,9 +4702,8 @@ static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port, lag->ref_count++; /* Port is no longer usable as a router interface */ - mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1); - if (mlxsw_sp_port_vlan->fid) - mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); + if (mlxsw_sp_port->default_vlan->fid) + mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); return 0; @@ -4489,7 +4731,12 @@ static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id); /* Any VLANs configured on the port are no longer valid */ - mlxsw_sp_port_vlan_flush(mlxsw_sp_port); + mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false); + mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan); + /* Make the LAG and its directly linked uppers leave bridges they + * are memeber in + */ + mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev); if (lag->ref_count == 1) mlxsw_sp_lag_destroy(mlxsw_sp, lag_id); @@ -4499,9 +4746,8 @@ static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_port->lagged = 0; lag->ref_count--; - mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1); /* Make sure untagged frames are allowed to ingress */ - mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1); + mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); } static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port, @@ -4579,7 +4825,7 @@ static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port) err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true); if (err) goto err_port_stp_set; - err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, + err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, true, false); if (err) goto err_port_vlan_set; @@ -4611,7 +4857,7 @@ static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port) mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); - mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1, + mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2, false, false); mlxsw_sp_port_stp_set(mlxsw_sp_port, false); mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false); @@ -4631,6 +4877,30 @@ static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev) return num_vxlans > 1; } +static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev) +{ + DECLARE_BITMAP(vlans, VLAN_N_VID) = {0}; + struct net_device *dev; + struct list_head *iter; + + netdev_for_each_lower_dev(br_dev, dev, iter) { + u16 pvid; + int err; + + if (!netif_is_vxlan(dev)) + continue; + + err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); + if (err || !pvid) + continue; + + if (test_and_set_bit(pvid, vlans)) + return false; + } + + return true; +} + static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, struct netlink_ext_ack *extack) { @@ -4639,13 +4909,15 @@ static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev, return false; } - if (br_vlan_enabled(br_dev)) { - NL_SET_ERR_MSG_MOD(extack, "VLAN filtering can not be enabled on a bridge with a VxLAN device"); + if (!br_vlan_enabled(br_dev) && + mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { + NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); return false; } - if (mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) { - NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge"); + if (br_vlan_enabled(br_dev) && + !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) { + NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged"); return false; } @@ -4719,11 +4991,6 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port"); return -EINVAL; } - if (is_vlan_dev(upper_dev) && - vlan_dev_vlan_id(upper_dev) == 1) { - NL_SET_ERR_MSG_MOD(extack, "Creating a VLAN device with VID 1 is unsupported: VLAN 1 carries untagged traffic"); - return -EINVAL; - } break; case NETDEV_CHANGEUPPER: upper_dev = info->upper_dev; @@ -4752,6 +5019,16 @@ static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev, } else if (netif_is_macvlan(upper_dev)) { if (!info->linking) mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); + } else if (is_vlan_dev(upper_dev)) { + struct net_device *br_dev; + + if (!netif_is_bridge_port(upper_dev)) + break; + if (info->linking) + break; + br_dev = netdev_master_upper_dev_get(upper_dev); + mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, + br_dev); } break; } @@ -4908,6 +5185,48 @@ static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev, return 0; } +static int mlxsw_sp_netdevice_bridge_vlan_event(struct net_device *vlan_dev, + struct net_device *br_dev, + unsigned long event, void *ptr, + u16 vid) +{ + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(vlan_dev); + struct netdev_notifier_changeupper_info *info = ptr; + struct netlink_ext_ack *extack; + struct net_device *upper_dev; + + if (!mlxsw_sp) + return 0; + + extack = netdev_notifier_info_to_extack(&info->info); + + switch (event) { + case NETDEV_PRECHANGEUPPER: + upper_dev = info->upper_dev; + if (!netif_is_macvlan(upper_dev)) { + NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type"); + return -EOPNOTSUPP; + } + if (!info->linking) + break; + if (netif_is_macvlan(upper_dev) && + !mlxsw_sp_rif_find_by_dev(mlxsw_sp, vlan_dev)) { + NL_SET_ERR_MSG_MOD(extack, "macvlan is only supported on top of router interfaces"); + return -EOPNOTSUPP; + } + break; + case NETDEV_CHANGEUPPER: + upper_dev = info->upper_dev; + if (info->linking) + break; + if (netif_is_macvlan(upper_dev)) + mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev); + break; + } + + return 0; +} + static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, unsigned long event, void *ptr) { @@ -4921,6 +5240,9 @@ static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev, return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev, real_dev, event, ptr, vid); + else if (netif_is_bridge_master(real_dev)) + return mlxsw_sp_netdevice_bridge_vlan_event(vlan_dev, real_dev, + event, ptr, vid); return 0; } @@ -5020,10 +5342,21 @@ static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, if (cu_info->linking) { if (!netif_running(dev)) return 0; + /* When the bridge is VLAN-aware, the VNI of the VxLAN + * device needs to be mapped to a VLAN, but at this + * point no VLANs are configured on the VxLAN device + */ + if (br_vlan_enabled(upper_dev)) + return 0; return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, - dev, extack); + dev, 0, extack); } else { - mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, upper_dev, dev); + /* VLANs were already flushed, which triggered the + * necessary cleanup + */ + if (br_vlan_enabled(upper_dev)) + return 0; + mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); } break; case NETDEV_PRE_UP: @@ -5034,7 +5367,7 @@ static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, return 0; if (!mlxsw_sp_lower_get(upper_dev)) return 0; - return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, + return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0, extack); case NETDEV_DOWN: upper_dev = netdev_master_upper_dev_get(dev); @@ -5044,7 +5377,7 @@ static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp, return 0; if (!mlxsw_sp_lower_get(upper_dev)) return 0; - mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, upper_dev, dev); + mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev); break; } @@ -5075,8 +5408,10 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *nb, else if (mlxsw_sp_netdev_is_ipip_ul(mlxsw_sp, dev)) err = mlxsw_sp_netdevice_ipip_ul_event(mlxsw_sp, dev, event, ptr); - else if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU) - err = mlxsw_sp_netdevice_router_port_event(dev); + else if (event == NETDEV_PRE_CHANGEADDR || + event == NETDEV_CHANGEADDR || + event == NETDEV_CHANGEMTU) + err = mlxsw_sp_netdevice_router_port_event(dev, event, ptr); else if (mlxsw_sp_is_vrf_event(event, ptr)) err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr); else if (mlxsw_sp_port_dev_check(dev)) @@ -5097,18 +5432,10 @@ static struct notifier_block mlxsw_sp_inetaddr_valid_nb __read_mostly = { .notifier_call = mlxsw_sp_inetaddr_valid_event, }; -static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = { - .notifier_call = mlxsw_sp_inetaddr_event, -}; - static struct notifier_block mlxsw_sp_inet6addr_valid_nb __read_mostly = { .notifier_call = mlxsw_sp_inet6addr_valid_event, }; -static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly = { - .notifier_call = mlxsw_sp_inet6addr_event, -}; - static const struct pci_device_id mlxsw_sp1_pci_id_table[] = { {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0}, {0, }, @@ -5134,9 +5461,7 @@ static int __init mlxsw_sp_module_init(void) int err; register_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); - register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); register_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); - register_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); err = mlxsw_core_driver_register(&mlxsw_sp1_driver); if (err) @@ -5163,9 +5488,7 @@ err_sp1_pci_driver_register: err_sp2_core_driver_register: mlxsw_core_driver_unregister(&mlxsw_sp1_driver); err_sp1_core_driver_register: - unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); - unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); return err; } @@ -5176,9 +5499,7 @@ static void __exit mlxsw_sp_module_exit(void) mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver); mlxsw_core_driver_unregister(&mlxsw_sp2_driver); mlxsw_core_driver_unregister(&mlxsw_sp1_driver); - unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb); unregister_inet6addr_validator_notifier(&mlxsw_sp_inet6addr_valid_nb); - unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb); unregister_inetaddr_validator_notifier(&mlxsw_sp_inetaddr_valid_nb); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h index 0875a79cbe7b..a1c32a81b011 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h @@ -8,6 +8,7 @@ #include <linux/netdevice.h> #include <linux/rhashtable.h> #include <linux/bitops.h> +#include <linux/if_bridge.h> #include <linux/if_vlan.h> #include <linux/list.h> #include <linux/dcbnl.h> @@ -24,6 +25,8 @@ #include "core_acl_flex_actions.h" #include "reg.h" +#define MLXSW_SP_DEFAULT_VID (VLAN_N_VID - 1) + #define MLXSW_SP_FID_8021D_MAX 1024 #define MLXSW_SP_MID_MAX 7000 @@ -80,6 +83,10 @@ enum mlxsw_sp_fid_type { MLXSW_SP_FID_TYPE_MAX, }; +enum mlxsw_sp_nve_type { + MLXSW_SP_NVE_TYPE_VXLAN, +}; + struct mlxsw_sp_mid { struct list_head list; unsigned char addr[ETH_ALEN]; @@ -127,6 +134,7 @@ struct mlxsw_sp { struct mlxsw_core *core; const struct mlxsw_bus_info *bus_info; unsigned char base_mac[ETH_ALEN]; + const unsigned char *mac_mask; struct mlxsw_sp_upper *lags; int *port_to_module; struct mlxsw_sp_sb *sb; @@ -184,7 +192,6 @@ struct mlxsw_sp_port_vlan { struct list_head list; struct mlxsw_sp_port *mlxsw_sp_port; struct mlxsw_sp_fid *fid; - unsigned int ref_count; u16 vid; struct mlxsw_sp_bridge_port *bridge_port; struct list_head bridge_vlan_node; @@ -235,6 +242,7 @@ struct mlxsw_sp_port { } periodic_hw_stats; struct mlxsw_sp_port_sample *sample; struct list_head vlans_list; + struct mlxsw_sp_port_vlan *default_vlan; struct mlxsw_sp_qdisc *root_qdisc; struct mlxsw_sp_qdisc *tclass_qdiscs; unsigned acl_rule_count; @@ -261,6 +269,26 @@ static inline bool mlxsw_sp_bridge_has_vxlan(struct net_device *br_dev) return !!mlxsw_sp_bridge_vxlan_dev_find(br_dev); } +static inline int +mlxsw_sp_vxlan_mapped_vid(const struct net_device *vxlan_dev, u16 *p_vid) +{ + struct bridge_vlan_info vinfo; + u16 vid = 0; + int err; + + err = br_vlan_get_pvid(vxlan_dev, &vid); + if (err || !vid) + goto out; + + err = br_vlan_get_info(vxlan_dev, vid, &vinfo); + if (err || !(vinfo.flags & BRIDGE_VLAN_INFO_UNTAGGED)) + vid = 0; + +out: + *p_vid = vid; + return err; +} + static inline bool mlxsw_sp_port_is_pause_en(const struct mlxsw_sp_port *mlxsw_sp_port) { @@ -358,11 +386,15 @@ bool mlxsw_sp_bridge_device_is_offloaded(const struct mlxsw_sp *mlxsw_sp, const struct net_device *br_dev); int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp, const struct net_device *br_dev, - const struct net_device *vxlan_dev, + const struct net_device *vxlan_dev, u16 vid, struct netlink_ext_ack *extack); void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp, - const struct net_device *br_dev, const struct net_device *vxlan_dev); +struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp, + const struct net_device *br_dev, + u16 vid, + struct netlink_ext_ack *extack); +extern struct notifier_block mlxsw_sp_switchdev_notifier; /* spectrum.c */ int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port, @@ -384,8 +416,8 @@ int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid, bool learn_enable); int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid); struct mlxsw_sp_port_vlan * -mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid); -void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan); +mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid); +void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan); int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin, u16 vid_end, bool is_member, bool untagged); int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp, @@ -429,15 +461,12 @@ union mlxsw_sp_l3addr { int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp); void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp); -int mlxsw_sp_netdevice_router_port_event(struct net_device *dev); +int mlxsw_sp_netdevice_router_port_event(struct net_device *dev, + unsigned long event, void *ptr); void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp, const struct net_device *macvlan_dev); -int mlxsw_sp_inetaddr_event(struct notifier_block *unused, - unsigned long event, void *ptr); int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused, unsigned long event, void *ptr); -int mlxsw_sp_inet6addr_event(struct notifier_block *unused, - unsigned long event, void *ptr); int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused, unsigned long event, void *ptr); int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event, @@ -457,7 +486,6 @@ mlxsw_sp_netdevice_ipip_ul_event(struct mlxsw_sp *mlxsw_sp, struct netdev_notifier_info *info); void mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan); -void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif); void mlxsw_sp_rif_destroy_by_dev(struct mlxsw_sp *mlxsw_sp, struct net_device *dev); struct mlxsw_sp_rif *mlxsw_sp_rif_find_by_dev(const struct mlxsw_sp *mlxsw_sp, @@ -538,6 +566,7 @@ struct mlxsw_sp_acl_rule_info { unsigned int priority; struct mlxsw_afk_element_values values; struct mlxsw_afa_block *act_block; + u8 action_created:1; unsigned int counter_index; }; @@ -547,6 +576,7 @@ struct mlxsw_sp_acl_ruleset; /* spectrum_acl.c */ enum mlxsw_sp_acl_profile { MLXSW_SP_ACL_PROFILE_FLOWER, + MLXSW_SP_ACL_PROFILE_MR, }; struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl); @@ -581,7 +611,8 @@ void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp, u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset); struct mlxsw_sp_acl_rule_info * -mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl); +mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl, + struct mlxsw_afa_block *afa_block); void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei); int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei); void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei, @@ -625,6 +656,7 @@ struct mlxsw_sp_acl_rule * mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ruleset *ruleset, unsigned long cookie, + struct mlxsw_afa_block *afa_block, struct netlink_ext_ack *extack); void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule *rule); @@ -632,6 +664,9 @@ int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule *rule); void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule *rule); +int mlxsw_sp_acl_rule_action_replace(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule *rule, + struct mlxsw_afa_block *afa_block); struct mlxsw_sp_acl_rule * mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ruleset *ruleset, @@ -676,6 +711,10 @@ struct mlxsw_sp_acl_tcam_ops { void (*entry_del)(struct mlxsw_sp *mlxsw_sp, void *region_priv, void *chunk_priv, void *entry_priv); + int (*entry_action_replace)(struct mlxsw_sp *mlxsw_sp, + void *region_priv, void *chunk_priv, + void *entry_priv, + struct mlxsw_sp_acl_rule_info *rulei); int (*entry_activity_get)(struct mlxsw_sp *mlxsw_sp, void *region_priv, void *entry_priv, bool *activity); @@ -721,6 +760,12 @@ int mlxsw_sp_setup_tc_prio(struct mlxsw_sp_port *mlxsw_sp_port, struct tc_prio_qopt_offload *p); /* spectrum_fid.c */ +bool mlxsw_sp_fid_lag_vid_valid(const struct mlxsw_sp_fid *fid); +struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_index(struct mlxsw_sp *mlxsw_sp, + u16 fid_index); +int mlxsw_sp_fid_nve_ifindex(const struct mlxsw_sp_fid *fid, int *nve_ifindex); +int mlxsw_sp_fid_nve_type(const struct mlxsw_sp_fid *fid, + enum mlxsw_sp_nve_type *p_type); struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_vni(struct mlxsw_sp *mlxsw_sp, __be32 vni); int mlxsw_sp_fid_vni(const struct mlxsw_sp_fid *fid, __be32 *vni); @@ -728,9 +773,12 @@ int mlxsw_sp_fid_nve_flood_index_set(struct mlxsw_sp_fid *fid, u32 nve_flood_index); void mlxsw_sp_fid_nve_flood_index_clear(struct mlxsw_sp_fid *fid); bool mlxsw_sp_fid_nve_flood_index_is_set(const struct mlxsw_sp_fid *fid); -int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, __be32 vni); +int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, enum mlxsw_sp_nve_type type, + __be32 vni, int nve_ifindex); void mlxsw_sp_fid_vni_clear(struct mlxsw_sp_fid *fid); bool mlxsw_sp_fid_vni_is_set(const struct mlxsw_sp_fid *fid); +void mlxsw_sp_fid_fdb_clear_offload(const struct mlxsw_sp_fid *fid, + const struct net_device *nve_dev); int mlxsw_sp_fid_flood_set(struct mlxsw_sp_fid *fid, enum mlxsw_sp_flood_type packet_type, u8 local_port, bool member); @@ -738,10 +786,10 @@ int mlxsw_sp_fid_port_vid_map(struct mlxsw_sp_fid *fid, struct mlxsw_sp_port *mlxsw_sp_port, u16 vid); void mlxsw_sp_fid_port_vid_unmap(struct mlxsw_sp_fid *fid, struct mlxsw_sp_port *mlxsw_sp_port, u16 vid); -enum mlxsw_sp_rif_type mlxsw_sp_fid_rif_type(const struct mlxsw_sp_fid *fid); u16 mlxsw_sp_fid_index(const struct mlxsw_sp_fid *fid); enum mlxsw_sp_fid_type mlxsw_sp_fid_type(const struct mlxsw_sp_fid *fid); void mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif); +struct mlxsw_sp_rif *mlxsw_sp_fid_rif(const struct mlxsw_sp_fid *fid); enum mlxsw_sp_rif_type mlxsw_sp_fid_type_rif_type(const struct mlxsw_sp *mlxsw_sp, enum mlxsw_sp_fid_type type); @@ -749,6 +797,8 @@ u16 mlxsw_sp_fid_8021q_vid(const struct mlxsw_sp_fid *fid); struct mlxsw_sp_fid *mlxsw_sp_fid_8021q_get(struct mlxsw_sp *mlxsw_sp, u16 vid); struct mlxsw_sp_fid *mlxsw_sp_fid_8021d_get(struct mlxsw_sp *mlxsw_sp, int br_ifindex); +struct mlxsw_sp_fid *mlxsw_sp_fid_8021q_lookup(struct mlxsw_sp *mlxsw_sp, + u16 vid); struct mlxsw_sp_fid *mlxsw_sp_fid_8021d_lookup(struct mlxsw_sp *mlxsw_sp, int br_ifindex); struct mlxsw_sp_fid *mlxsw_sp_fid_rfid_get(struct mlxsw_sp *mlxsw_sp, @@ -797,10 +847,6 @@ extern const struct mlxsw_sp_mr_tcam_ops mlxsw_sp1_mr_tcam_ops; extern const struct mlxsw_sp_mr_tcam_ops mlxsw_sp2_mr_tcam_ops; /* spectrum_nve.c */ -enum mlxsw_sp_nve_type { - MLXSW_SP_NVE_TYPE_VXLAN, -}; - struct mlxsw_sp_nve_params { enum mlxsw_sp_nve_type type; __be32 vni; @@ -810,6 +856,9 @@ struct mlxsw_sp_nve_params { extern const struct mlxsw_sp_nve_ops *mlxsw_sp1_nve_ops_arr[]; extern const struct mlxsw_sp_nve_ops *mlxsw_sp2_nve_ops_arr[]; +int mlxsw_sp_nve_learned_ip_resolve(struct mlxsw_sp *mlxsw_sp, u32 uip, + enum mlxsw_sp_l3proto proto, + union mlxsw_sp_l3addr *addr); int mlxsw_sp_nve_flood_ip_add(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid, enum mlxsw_sp_l3proto proto, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c index 2a9eac90002e..fe270c1a26a6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum1_acl_tcam.c @@ -67,7 +67,7 @@ mlxsw_sp1_acl_ctcam_region_catchall_add(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_acl_ctcam_chunk_init(®ion->cregion, ®ion->catchall.cchunk, MLXSW_SP_ACL_TCAM_CATCHALL_PRIO); - rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl); + rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl, NULL); if (IS_ERR(rulei)) { err = PTR_ERR(rulei); goto err_rulei_create; @@ -193,6 +193,15 @@ static void mlxsw_sp1_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp, } static int +mlxsw_sp1_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp, + void *region_priv, void *chunk_priv, + void *entry_priv, + struct mlxsw_sp_acl_rule_info *rulei) +{ + return -EOPNOTSUPP; +} + +static int mlxsw_sp1_acl_tcam_region_entry_activity_get(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_region *_region, unsigned int offset, @@ -240,5 +249,6 @@ const struct mlxsw_sp_acl_tcam_ops mlxsw_sp1_acl_tcam_ops = { .entry_priv_size = sizeof(struct mlxsw_sp1_acl_tcam_entry), .entry_add = mlxsw_sp1_acl_tcam_entry_add, .entry_del = mlxsw_sp1_acl_tcam_entry_del, + .entry_action_replace = mlxsw_sp1_acl_tcam_entry_action_replace, .entry_activity_get = mlxsw_sp1_acl_tcam_entry_activity_get, }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c index 8ca77f3e8f27..234ab51916db 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_acl_tcam.c @@ -34,15 +34,15 @@ mlxsw_sp2_acl_ctcam_region_entry_insert(struct mlxsw_sp_acl_ctcam_region *cregio { struct mlxsw_sp_acl_atcam_region *aregion; struct mlxsw_sp_acl_atcam_entry *aentry; - struct mlxsw_sp_acl_erp *erp; + struct mlxsw_sp_acl_erp_mask *erp_mask; aregion = mlxsw_sp_acl_tcam_cregion_aregion(cregion); aentry = mlxsw_sp_acl_tcam_centry_aentry(centry); - erp = mlxsw_sp_acl_erp_get(aregion, mask, true); - if (IS_ERR(erp)) - return PTR_ERR(erp); - aentry->erp = erp; + erp_mask = mlxsw_sp_acl_erp_mask_get(aregion, mask, true); + if (IS_ERR(erp_mask)) + return PTR_ERR(erp_mask); + aentry->erp_mask = erp_mask; return 0; } @@ -57,7 +57,7 @@ mlxsw_sp2_acl_ctcam_region_entry_remove(struct mlxsw_sp_acl_ctcam_region *cregio aregion = mlxsw_sp_acl_tcam_cregion_aregion(cregion); aentry = mlxsw_sp_acl_tcam_centry_aentry(centry); - mlxsw_sp_acl_erp_put(aregion, aentry->erp); + mlxsw_sp_acl_erp_mask_put(aregion, aentry->erp_mask); } static const struct mlxsw_sp_acl_ctcam_region_ops @@ -211,6 +211,23 @@ static void mlxsw_sp2_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp, } static int +mlxsw_sp2_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp, + void *region_priv, void *chunk_priv, + void *entry_priv, + struct mlxsw_sp_acl_rule_info *rulei) +{ + struct mlxsw_sp2_acl_tcam_region *region = region_priv; + struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv; + struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv; + + entry->act_block = rulei->act_block; + return mlxsw_sp_acl_atcam_entry_action_replace(mlxsw_sp, + ®ion->aregion, + &chunk->achunk, + &entry->aentry, rulei); +} + +static int mlxsw_sp2_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp, void *region_priv, void *entry_priv, bool *activity) @@ -235,5 +252,6 @@ const struct mlxsw_sp_acl_tcam_ops mlxsw_sp2_acl_tcam_ops = { .entry_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_entry), .entry_add = mlxsw_sp2_acl_tcam_entry_add, .entry_del = mlxsw_sp2_acl_tcam_entry_del, + .entry_action_replace = mlxsw_sp2_acl_tcam_entry_action_replace, .entry_activity_get = mlxsw_sp2_acl_tcam_entry_activity_get, }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c index 4dd62478162e..e31ec75ac035 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum2_mr_tcam.c @@ -7,6 +7,201 @@ #include "spectrum.h" #include "spectrum_mr.h" +struct mlxsw_sp2_mr_tcam { + struct mlxsw_sp *mlxsw_sp; + struct mlxsw_sp_acl_block *acl_block; + struct mlxsw_sp_acl_ruleset *ruleset4; + struct mlxsw_sp_acl_ruleset *ruleset6; +}; + +struct mlxsw_sp2_mr_route { + struct mlxsw_sp2_mr_tcam *mr_tcam; +}; + +static struct mlxsw_sp_acl_ruleset * +mlxsw_sp2_mr_tcam_proto_ruleset(struct mlxsw_sp2_mr_tcam *mr_tcam, + enum mlxsw_sp_l3proto proto) +{ + switch (proto) { + case MLXSW_SP_L3_PROTO_IPV4: + return mr_tcam->ruleset4; + case MLXSW_SP_L3_PROTO_IPV6: + return mr_tcam->ruleset6; + } + return NULL; +} + +static int mlxsw_sp2_mr_tcam_bind_group(struct mlxsw_sp *mlxsw_sp, + enum mlxsw_reg_pemrbt_protocol protocol, + struct mlxsw_sp_acl_ruleset *ruleset) +{ + char pemrbt_pl[MLXSW_REG_PEMRBT_LEN]; + u16 group_id; + + group_id = mlxsw_sp_acl_ruleset_group_id(ruleset); + + mlxsw_reg_pemrbt_pack(pemrbt_pl, protocol, group_id); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pemrbt), pemrbt_pl); +} + +static const enum mlxsw_afk_element mlxsw_sp2_mr_tcam_usage_ipv4[] = { + MLXSW_AFK_ELEMENT_VIRT_ROUTER_8_10, + MLXSW_AFK_ELEMENT_VIRT_ROUTER_0_7, + MLXSW_AFK_ELEMENT_SRC_IP_0_31, + MLXSW_AFK_ELEMENT_DST_IP_0_31, +}; + +static int mlxsw_sp2_mr_tcam_ipv4_init(struct mlxsw_sp2_mr_tcam *mr_tcam) +{ + struct mlxsw_afk_element_usage elusage; + int err; + + /* Initialize IPv4 ACL group. */ + mlxsw_afk_element_usage_fill(&elusage, + mlxsw_sp2_mr_tcam_usage_ipv4, + ARRAY_SIZE(mlxsw_sp2_mr_tcam_usage_ipv4)); + mr_tcam->ruleset4 = mlxsw_sp_acl_ruleset_get(mr_tcam->mlxsw_sp, + mr_tcam->acl_block, + MLXSW_SP_L3_PROTO_IPV4, + MLXSW_SP_ACL_PROFILE_MR, + &elusage); + + if (IS_ERR(mr_tcam->ruleset4)) + return PTR_ERR(mr_tcam->ruleset4); + + /* MC Router groups should be bound before routes are inserted. */ + err = mlxsw_sp2_mr_tcam_bind_group(mr_tcam->mlxsw_sp, + MLXSW_REG_PEMRBT_PROTO_IPV4, + mr_tcam->ruleset4); + if (err) + goto err_bind_group; + + return 0; + +err_bind_group: + mlxsw_sp_acl_ruleset_put(mr_tcam->mlxsw_sp, mr_tcam->ruleset4); + return err; +} + +static void mlxsw_sp2_mr_tcam_ipv4_fini(struct mlxsw_sp2_mr_tcam *mr_tcam) +{ + mlxsw_sp_acl_ruleset_put(mr_tcam->mlxsw_sp, mr_tcam->ruleset4); +} + +static const enum mlxsw_afk_element mlxsw_sp2_mr_tcam_usage_ipv6[] = { + MLXSW_AFK_ELEMENT_VIRT_ROUTER_8_10, + MLXSW_AFK_ELEMENT_VIRT_ROUTER_0_7, + MLXSW_AFK_ELEMENT_SRC_IP_96_127, + MLXSW_AFK_ELEMENT_SRC_IP_64_95, + MLXSW_AFK_ELEMENT_SRC_IP_32_63, + MLXSW_AFK_ELEMENT_SRC_IP_0_31, + MLXSW_AFK_ELEMENT_DST_IP_96_127, + MLXSW_AFK_ELEMENT_DST_IP_64_95, + MLXSW_AFK_ELEMENT_DST_IP_32_63, + MLXSW_AFK_ELEMENT_DST_IP_0_31, +}; + +static int mlxsw_sp2_mr_tcam_ipv6_init(struct mlxsw_sp2_mr_tcam *mr_tcam) +{ + struct mlxsw_afk_element_usage elusage; + int err; + + /* Initialize IPv6 ACL group */ + mlxsw_afk_element_usage_fill(&elusage, + mlxsw_sp2_mr_tcam_usage_ipv6, + ARRAY_SIZE(mlxsw_sp2_mr_tcam_usage_ipv6)); + mr_tcam->ruleset6 = mlxsw_sp_acl_ruleset_get(mr_tcam->mlxsw_sp, + mr_tcam->acl_block, + MLXSW_SP_L3_PROTO_IPV6, + MLXSW_SP_ACL_PROFILE_MR, + &elusage); + + if (IS_ERR(mr_tcam->ruleset6)) + return PTR_ERR(mr_tcam->ruleset6); + + /* MC Router groups should be bound before routes are inserted. */ + err = mlxsw_sp2_mr_tcam_bind_group(mr_tcam->mlxsw_sp, + MLXSW_REG_PEMRBT_PROTO_IPV6, + mr_tcam->ruleset6); + if (err) + goto err_bind_group; + + return 0; + +err_bind_group: + mlxsw_sp_acl_ruleset_put(mr_tcam->mlxsw_sp, mr_tcam->ruleset6); + return err; +} + +static void mlxsw_sp2_mr_tcam_ipv6_fini(struct mlxsw_sp2_mr_tcam *mr_tcam) +{ + mlxsw_sp_acl_ruleset_put(mr_tcam->mlxsw_sp, mr_tcam->ruleset6); +} + +static void +mlxsw_sp2_mr_tcam_rule_parse4(struct mlxsw_sp_acl_rule_info *rulei, + struct mlxsw_sp_mr_route_key *key) +{ + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31, + (char *) &key->source.addr4, + (char *) &key->source_mask.addr4, 4); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31, + (char *) &key->group.addr4, + (char *) &key->group_mask.addr4, 4); +} + +static void +mlxsw_sp2_mr_tcam_rule_parse6(struct mlxsw_sp_acl_rule_info *rulei, + struct mlxsw_sp_mr_route_key *key) +{ + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_96_127, + &key->source.addr6.s6_addr[0x0], + &key->source_mask.addr6.s6_addr[0x0], 4); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_64_95, + &key->source.addr6.s6_addr[0x4], + &key->source_mask.addr6.s6_addr[0x4], 4); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_32_63, + &key->source.addr6.s6_addr[0x8], + &key->source_mask.addr6.s6_addr[0x8], 4); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_SRC_IP_0_31, + &key->source.addr6.s6_addr[0xc], + &key->source_mask.addr6.s6_addr[0xc], 4); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_96_127, + &key->group.addr6.s6_addr[0x0], + &key->group_mask.addr6.s6_addr[0x0], 4); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_64_95, + &key->group.addr6.s6_addr[0x4], + &key->group_mask.addr6.s6_addr[0x4], 4); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_32_63, + &key->group.addr6.s6_addr[0x8], + &key->group_mask.addr6.s6_addr[0x8], 4); + mlxsw_sp_acl_rulei_keymask_buf(rulei, MLXSW_AFK_ELEMENT_DST_IP_0_31, + &key->group.addr6.s6_addr[0xc], + &key->group_mask.addr6.s6_addr[0xc], 4); +} + +static void +mlxsw_sp2_mr_tcam_rule_parse(struct mlxsw_sp_acl_rule *rule, + struct mlxsw_sp_mr_route_key *key, + unsigned int priority) +{ + struct mlxsw_sp_acl_rule_info *rulei; + + rulei = mlxsw_sp_acl_rule_rulei(rule); + rulei->priority = priority; + mlxsw_sp_acl_rulei_keymask_u32(rulei, MLXSW_AFK_ELEMENT_VIRT_ROUTER_0_7, + key->vrid, GENMASK(7, 0)); + mlxsw_sp_acl_rulei_keymask_u32(rulei, + MLXSW_AFK_ELEMENT_VIRT_ROUTER_8_10, + key->vrid >> 8, GENMASK(2, 0)); + switch (key->proto) { + case MLXSW_SP_L3_PROTO_IPV4: + return mlxsw_sp2_mr_tcam_rule_parse4(rulei, key); + case MLXSW_SP_L3_PROTO_IPV6: + return mlxsw_sp2_mr_tcam_rule_parse6(rulei, key); + } +} + static int mlxsw_sp2_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv, void *route_priv, @@ -14,7 +209,33 @@ mlxsw_sp2_mr_tcam_route_create(struct mlxsw_sp *mlxsw_sp, void *priv, struct mlxsw_afa_block *afa_block, enum mlxsw_sp_mr_route_prio prio) { + struct mlxsw_sp2_mr_route *mr_route = route_priv; + struct mlxsw_sp2_mr_tcam *mr_tcam = priv; + struct mlxsw_sp_acl_ruleset *ruleset; + struct mlxsw_sp_acl_rule *rule; + int err; + + mr_route->mr_tcam = mr_tcam; + ruleset = mlxsw_sp2_mr_tcam_proto_ruleset(mr_tcam, key->proto); + if (WARN_ON(!ruleset)) + return -EINVAL; + + rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, + (unsigned long) route_priv, afa_block, + NULL); + if (IS_ERR(rule)) + return PTR_ERR(rule); + + mlxsw_sp2_mr_tcam_rule_parse(rule, key, prio); + err = mlxsw_sp_acl_rule_add(mlxsw_sp, rule); + if (err) + goto err_rule_add; + return 0; + +err_rule_add: + mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule); + return err; } static void @@ -22,6 +243,21 @@ mlxsw_sp2_mr_tcam_route_destroy(struct mlxsw_sp *mlxsw_sp, void *priv, void *route_priv, struct mlxsw_sp_mr_route_key *key) { + struct mlxsw_sp2_mr_tcam *mr_tcam = priv; + struct mlxsw_sp_acl_ruleset *ruleset; + struct mlxsw_sp_acl_rule *rule; + + ruleset = mlxsw_sp2_mr_tcam_proto_ruleset(mr_tcam, key->proto); + if (WARN_ON(!ruleset)) + return; + + rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, + (unsigned long) route_priv); + if (WARN_ON(!rule)) + return; + + mlxsw_sp_acl_rule_del(mlxsw_sp, rule); + mlxsw_sp_acl_rule_destroy(mlxsw_sp, rule); } static int @@ -30,21 +266,64 @@ mlxsw_sp2_mr_tcam_route_update(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_mr_route_key *key, struct mlxsw_afa_block *afa_block) { - return 0; + struct mlxsw_sp2_mr_route *mr_route = route_priv; + struct mlxsw_sp2_mr_tcam *mr_tcam = mr_route->mr_tcam; + struct mlxsw_sp_acl_ruleset *ruleset; + struct mlxsw_sp_acl_rule *rule; + + ruleset = mlxsw_sp2_mr_tcam_proto_ruleset(mr_tcam, key->proto); + if (WARN_ON(!ruleset)) + return -EINVAL; + + rule = mlxsw_sp_acl_rule_lookup(mlxsw_sp, ruleset, + (unsigned long) route_priv); + if (WARN_ON(!rule)) + return -EINVAL; + + return mlxsw_sp_acl_rule_action_replace(mlxsw_sp, rule, afa_block); } static int mlxsw_sp2_mr_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv) { + struct mlxsw_sp2_mr_tcam *mr_tcam = priv; + int err; + + mr_tcam->mlxsw_sp = mlxsw_sp; + mr_tcam->acl_block = mlxsw_sp_acl_block_create(mlxsw_sp, NULL); + if (!mr_tcam->acl_block) + return -ENOMEM; + + err = mlxsw_sp2_mr_tcam_ipv4_init(mr_tcam); + if (err) + goto err_ipv4_init; + + err = mlxsw_sp2_mr_tcam_ipv6_init(mr_tcam); + if (err) + goto err_ipv6_init; + return 0; + +err_ipv6_init: + mlxsw_sp2_mr_tcam_ipv4_fini(mr_tcam); +err_ipv4_init: + mlxsw_sp_acl_block_destroy(mr_tcam->acl_block); + return err; } static void mlxsw_sp2_mr_tcam_fini(void *priv) { + struct mlxsw_sp2_mr_tcam *mr_tcam = priv; + + mlxsw_sp2_mr_tcam_ipv6_fini(mr_tcam); + mlxsw_sp2_mr_tcam_ipv4_fini(mr_tcam); + mlxsw_sp_acl_block_destroy(mr_tcam->acl_block); } const struct mlxsw_sp_mr_tcam_ops mlxsw_sp2_mr_tcam_ops = { + .priv_size = sizeof(struct mlxsw_sp2_mr_tcam), .init = mlxsw_sp2_mr_tcam_init, .fini = mlxsw_sp2_mr_tcam_fini, + .route_priv_size = sizeof(struct mlxsw_sp2_mr_route), .route_create = mlxsw_sp2_mr_tcam_route_create, .route_destroy = mlxsw_sp2_mr_tcam_route_destroy, .route_update = mlxsw_sp2_mr_tcam_route_update, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c index c4f9238591e6..695d33358988 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c @@ -435,7 +435,8 @@ u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset) } struct mlxsw_sp_acl_rule_info * -mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl) +mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl, + struct mlxsw_afa_block *afa_block) { struct mlxsw_sp_acl_rule_info *rulei; int err; @@ -443,11 +444,18 @@ mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl) rulei = kzalloc(sizeof(*rulei), GFP_KERNEL); if (!rulei) return NULL; + + if (afa_block) { + rulei->act_block = afa_block; + return rulei; + } + rulei->act_block = mlxsw_afa_block_create(acl->mlxsw_sp->afa); if (IS_ERR(rulei->act_block)) { err = PTR_ERR(rulei->act_block); goto err_afa_block_create; } + rulei->action_created = 1; return rulei; err_afa_block_create: @@ -457,7 +465,8 @@ err_afa_block_create: void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei) { - mlxsw_afa_block_destroy(rulei->act_block); + if (rulei->action_created) + mlxsw_afa_block_destroy(rulei->act_block); kfree(rulei); } @@ -623,6 +632,7 @@ struct mlxsw_sp_acl_rule * mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ruleset *ruleset, unsigned long cookie, + struct mlxsw_afa_block *afa_block, struct netlink_ext_ack *extack) { const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; @@ -639,7 +649,7 @@ mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp, rule->cookie = cookie; rule->ruleset = ruleset; - rule->rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl); + rule->rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl, afa_block); if (IS_ERR(rule->rulei)) { err = PTR_ERR(rule->rulei); goto err_rulei_create; @@ -721,6 +731,21 @@ void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp, ops->rule_del(mlxsw_sp, rule->priv); } +int mlxsw_sp_acl_rule_action_replace(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_rule *rule, + struct mlxsw_afa_block *afa_block) +{ + struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset; + const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops; + struct mlxsw_sp_acl_rule_info *rulei; + + rulei = mlxsw_sp_acl_rule_rulei(rule); + rulei->act_block = afa_block; + + return ops->rule_action_replace(mlxsw_sp, ruleset->priv, rule->priv, + rule->rulei); +} + struct mlxsw_sp_acl_rule * mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ruleset *ruleset, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c index 2dda028f94db..80fb268d51a5 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_atcam.c @@ -14,8 +14,8 @@ #include "spectrum_acl_tcam.h" #include "core_acl_flex_keys.h" -#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_START 6 -#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_END 11 +#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_CLEAR_START 0 +#define MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_CLEAR_END 5 struct mlxsw_sp_acl_atcam_lkey_id_ht_key { char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* MSB blocks */ @@ -34,7 +34,7 @@ struct mlxsw_sp_acl_atcam_region_ops { void (*fini)(struct mlxsw_sp_acl_atcam_region *aregion); struct mlxsw_sp_acl_atcam_lkey_id * (*lkey_id_get)(struct mlxsw_sp_acl_atcam_region *aregion, - struct mlxsw_sp_acl_rule_info *rulei, u8 erp_id); + char *enc_key, u8 erp_id); void (*lkey_id_put)(struct mlxsw_sp_acl_atcam_region *aregion, struct mlxsw_sp_acl_atcam_lkey_id *lkey_id); }; @@ -64,7 +64,7 @@ static const struct rhashtable_params mlxsw_sp_acl_atcam_entries_ht_params = { static bool mlxsw_sp_acl_atcam_is_centry(const struct mlxsw_sp_acl_atcam_entry *aentry) { - return mlxsw_sp_acl_erp_is_ctcam_erp(aentry->erp); + return mlxsw_sp_acl_erp_mask_is_ctcam(aentry->erp_mask); } static int @@ -90,8 +90,7 @@ mlxsw_sp_acl_atcam_region_generic_fini(struct mlxsw_sp_acl_atcam_region *aregion static struct mlxsw_sp_acl_atcam_lkey_id * mlxsw_sp_acl_atcam_generic_lkey_id_get(struct mlxsw_sp_acl_atcam_region *aregion, - struct mlxsw_sp_acl_rule_info *rulei, - u8 erp_id) + char *enc_key, u8 erp_id) { struct mlxsw_sp_acl_atcam_region_generic *region_generic; @@ -220,8 +219,7 @@ mlxsw_sp_acl_atcam_lkey_id_destroy(struct mlxsw_sp_acl_atcam_region *aregion, static struct mlxsw_sp_acl_atcam_lkey_id * mlxsw_sp_acl_atcam_12kb_lkey_id_get(struct mlxsw_sp_acl_atcam_region *aregion, - struct mlxsw_sp_acl_rule_info *rulei, - u8 erp_id) + char *enc_key, u8 erp_id) { struct mlxsw_sp_acl_atcam_region_12kb *region_12kb = aregion->priv; struct mlxsw_sp_acl_tcam_region *region = aregion->region; @@ -230,9 +228,10 @@ mlxsw_sp_acl_atcam_12kb_lkey_id_get(struct mlxsw_sp_acl_atcam_region *aregion, struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl); struct mlxsw_sp_acl_atcam_lkey_id *lkey_id; - mlxsw_afk_encode(afk, region->key_info, &rulei->values, ht_key.enc_key, - NULL, MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_START, - MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_END); + memcpy(ht_key.enc_key, enc_key, sizeof(ht_key.enc_key)); + mlxsw_afk_clear(afk, ht_key.enc_key, + MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_CLEAR_START, + MLXSW_SP_ACL_ATCAM_LKEY_ID_BLOCK_CLEAR_END); ht_key.erp_id = erp_id; lkey_id = rhashtable_lookup_fast(®ion_12kb->lkey_ht, &ht_key, mlxsw_sp_acl_atcam_lkey_id_ht_params); @@ -324,6 +323,7 @@ mlxsw_sp_acl_atcam_region_init(struct mlxsw_sp *mlxsw_sp, aregion->region = region; aregion->atcam = atcam; mlxsw_sp_acl_atcam_region_type_init(aregion); + INIT_LIST_HEAD(&aregion->entries_list); err = rhashtable_init(&aregion->entries_ht, &mlxsw_sp_acl_atcam_entries_ht_params); @@ -357,6 +357,7 @@ void mlxsw_sp_acl_atcam_region_fini(struct mlxsw_sp_acl_atcam_region *aregion) mlxsw_sp_acl_erp_region_fini(aregion); aregion->ops->fini(aregion); rhashtable_destroy(&aregion->entries_ht); + WARN_ON(!list_empty(&aregion->entries_list)); } void mlxsw_sp_acl_atcam_chunk_init(struct mlxsw_sp_acl_atcam_region *aregion, @@ -379,7 +380,7 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_rule_info *rulei) { struct mlxsw_sp_acl_tcam_region *region = aregion->region; - u8 erp_id = mlxsw_sp_acl_erp_id(aentry->erp); + u8 erp_id = mlxsw_sp_acl_erp_mask_erp_id(aentry->erp_mask); struct mlxsw_sp_acl_atcam_lkey_id *lkey_id; char ptce3_pl[MLXSW_REG_PTCE3_LEN]; u32 kvdl_index, priority; @@ -389,7 +390,8 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, if (err) return err; - lkey_id = aregion->ops->lkey_id_get(aregion, rulei, erp_id); + lkey_id = aregion->ops->lkey_id_get(aregion, aentry->ht_key.enc_key, + erp_id); if (IS_ERR(lkey_id)) return PTR_ERR(lkey_id); aentry->lkey_id = lkey_id; @@ -398,6 +400,9 @@ mlxsw_sp_acl_atcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_WRITE, priority, region->tcam_region_info, aentry->ht_key.enc_key, erp_id, + aentry->delta_info.start, + aentry->delta_info.mask, + aentry->delta_info.value, refcount_read(&lkey_id->refcnt) != 1, lkey_id->id, kvdl_index); err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce3), ptce3_pl); @@ -418,18 +423,51 @@ mlxsw_sp_acl_atcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp, { struct mlxsw_sp_acl_atcam_lkey_id *lkey_id = aentry->lkey_id; struct mlxsw_sp_acl_tcam_region *region = aregion->region; - u8 erp_id = mlxsw_sp_acl_erp_id(aentry->erp); + u8 erp_id = mlxsw_sp_acl_erp_mask_erp_id(aentry->erp_mask); + char *enc_key = aentry->ht_key.enc_key; char ptce3_pl[MLXSW_REG_PTCE3_LEN]; mlxsw_reg_ptce3_pack(ptce3_pl, false, MLXSW_REG_PTCE3_OP_WRITE_WRITE, 0, - region->tcam_region_info, aentry->ht_key.enc_key, - erp_id, refcount_read(&lkey_id->refcnt) != 1, + region->tcam_region_info, + enc_key, erp_id, + aentry->delta_info.start, + aentry->delta_info.mask, + aentry->delta_info.value, + refcount_read(&lkey_id->refcnt) != 1, lkey_id->id, 0); mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce3), ptce3_pl); aregion->ops->lkey_id_put(aregion, lkey_id); } static int +mlxsw_sp_acl_atcam_region_entry_action_replace(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_entry *aentry, + struct mlxsw_sp_acl_rule_info *rulei) +{ + struct mlxsw_sp_acl_atcam_lkey_id *lkey_id = aentry->lkey_id; + u8 erp_id = mlxsw_sp_acl_erp_mask_erp_id(aentry->erp_mask); + struct mlxsw_sp_acl_tcam_region *region = aregion->region; + char ptce3_pl[MLXSW_REG_PTCE3_LEN]; + u32 kvdl_index, priority; + int err; + + err = mlxsw_sp_acl_tcam_priority_get(mlxsw_sp, rulei, &priority, true); + if (err) + return err; + kvdl_index = mlxsw_afa_block_first_kvdl_index(rulei->act_block); + mlxsw_reg_ptce3_pack(ptce3_pl, true, MLXSW_REG_PTCE3_OP_WRITE_UPDATE, + priority, region->tcam_region_info, + aentry->ht_key.enc_key, erp_id, + aentry->delta_info.start, + aentry->delta_info.mask, + aentry->delta_info.value, + refcount_read(&lkey_id->refcnt) != 1, lkey_id->id, + kvdl_index); + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce3), ptce3_pl); +} + +static int __mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_atcam_region *aregion, struct mlxsw_sp_acl_atcam_entry *aentry, @@ -438,19 +476,36 @@ __mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_region *region = aregion->region; char mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN] = { 0 }; struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl); - struct mlxsw_sp_acl_erp *erp; - unsigned int blocks_count; + const struct mlxsw_sp_acl_erp_delta *delta; + struct mlxsw_sp_acl_erp_mask *erp_mask; int err; - blocks_count = mlxsw_afk_key_info_blocks_count_get(region->key_info); mlxsw_afk_encode(afk, region->key_info, &rulei->values, - aentry->ht_key.enc_key, mask, 0, blocks_count - 1); - - erp = mlxsw_sp_acl_erp_get(aregion, mask, false); - if (IS_ERR(erp)) - return PTR_ERR(erp); - aentry->erp = erp; - aentry->ht_key.erp_id = mlxsw_sp_acl_erp_id(erp); + aentry->full_enc_key, mask); + + erp_mask = mlxsw_sp_acl_erp_mask_get(aregion, mask, false); + if (IS_ERR(erp_mask)) + return PTR_ERR(erp_mask); + aentry->erp_mask = erp_mask; + aentry->ht_key.erp_id = mlxsw_sp_acl_erp_mask_erp_id(erp_mask); + memcpy(aentry->ht_key.enc_key, aentry->full_enc_key, + sizeof(aentry->ht_key.enc_key)); + + /* Compute all needed delta information and clear the delta bits + * from the encrypted key. + */ + delta = mlxsw_sp_acl_erp_delta(aentry->erp_mask); + aentry->delta_info.start = mlxsw_sp_acl_erp_delta_start(delta); + aentry->delta_info.mask = mlxsw_sp_acl_erp_delta_mask(delta); + aentry->delta_info.value = + mlxsw_sp_acl_erp_delta_value(delta, aentry->full_enc_key); + mlxsw_sp_acl_erp_delta_clear(delta, aentry->ht_key.enc_key); + + /* Add rule to the list of A-TCAM rules, assuming this + * rule is intended to A-TCAM. In case this rule does + * not fit into A-TCAM it will be removed from the list. + */ + list_add(&aentry->list, &aregion->entries_list); /* We can't insert identical rules into the A-TCAM, so fail and * let the rule spill into C-TCAM @@ -461,6 +516,13 @@ __mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp, if (err) goto err_rhashtable_insert; + /* Bloom filter must be updated here, before inserting the rule into + * the A-TCAM. + */ + err = mlxsw_sp_acl_erp_bf_insert(mlxsw_sp, aregion, erp_mask, aentry); + if (err) + goto err_bf_insert; + err = mlxsw_sp_acl_atcam_region_entry_insert(mlxsw_sp, aregion, aentry, rulei); if (err) @@ -469,10 +531,13 @@ __mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp, return 0; err_rule_insert: + mlxsw_sp_acl_erp_bf_remove(mlxsw_sp, aregion, erp_mask, aentry); +err_bf_insert: rhashtable_remove_fast(&aregion->entries_ht, &aentry->ht_node, mlxsw_sp_acl_atcam_entries_ht_params); err_rhashtable_insert: - mlxsw_sp_acl_erp_put(aregion, erp); + list_del(&aentry->list); + mlxsw_sp_acl_erp_mask_put(aregion, erp_mask); return err; } @@ -482,9 +547,21 @@ __mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_atcam_entry *aentry) { mlxsw_sp_acl_atcam_region_entry_remove(mlxsw_sp, aregion, aentry); + mlxsw_sp_acl_erp_bf_remove(mlxsw_sp, aregion, aentry->erp_mask, aentry); rhashtable_remove_fast(&aregion->entries_ht, &aentry->ht_node, mlxsw_sp_acl_atcam_entries_ht_params); - mlxsw_sp_acl_erp_put(aregion, aentry->erp); + list_del(&aentry->list); + mlxsw_sp_acl_erp_mask_put(aregion, aentry->erp_mask); +} + +static int +__mlxsw_sp_acl_atcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_entry *aentry, + struct mlxsw_sp_acl_rule_info *rulei) +{ + return mlxsw_sp_acl_atcam_region_entry_action_replace(mlxsw_sp, aregion, + aentry, rulei); } int mlxsw_sp_acl_atcam_entry_add(struct mlxsw_sp *mlxsw_sp, @@ -523,6 +600,29 @@ void mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp, __mlxsw_sp_acl_atcam_entry_del(mlxsw_sp, aregion, aentry); } +int +mlxsw_sp_acl_atcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_chunk *achunk, + struct mlxsw_sp_acl_atcam_entry *aentry, + struct mlxsw_sp_acl_rule_info *rulei) +{ + int err; + + if (mlxsw_sp_acl_atcam_is_centry(aentry)) + err = mlxsw_sp_acl_ctcam_entry_action_replace(mlxsw_sp, + &aregion->cregion, + &achunk->cchunk, + &aentry->centry, + rulei); + else + err = __mlxsw_sp_acl_atcam_entry_action_replace(mlxsw_sp, + aregion, aentry, + rulei); + + return err; +} + int mlxsw_sp_acl_atcam_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_atcam *atcam) { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c new file mode 100644 index 000000000000..505b87846acc --- /dev/null +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_bloom_filter.c @@ -0,0 +1,249 @@ +// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 +/* Copyright (c) 2018 Mellanox Technologies. All rights reserved */ + +#include <linux/errno.h> +#include <linux/gfp.h> +#include <linux/kernel.h> +#include <linux/refcount.h> + +#include "spectrum.h" +#include "spectrum_acl_tcam.h" + +struct mlxsw_sp_acl_bf { + unsigned int bank_size; + refcount_t refcnt[0]; +}; + +/* Bloom filter uses a crc-16 hash over chunks of data which contain 4 key + * blocks, eRP ID and region ID. In Spectrum-2, region key is combined of up to + * 12 key blocks, so there can be up to 3 chunks in the Bloom filter key, + * depending on the actual number of key blocks used in the region. + * The layout of the Bloom filter key is as follows: + * + * +-------------------------+------------------------+------------------------+ + * | Chunk 2 Key blocks 11-8 | Chunk 1 Key blocks 7-4 | Chunk 0 Key blocks 3-0 | + * +-------------------------+------------------------+------------------------+ + */ +#define MLXSW_BLOOM_KEY_CHUNKS 3 +#define MLXSW_BLOOM_KEY_LEN 69 + +/* Each chunk size is 23 bytes. 18 bytes of it contain 4 key blocks, each is + * 36 bits, 2 bytes which hold eRP ID and region ID, and 3 bytes of zero + * padding. + * The layout of each chunk is as follows: + * + * +---------+----------------------+-----------------------------------+ + * | 3 bytes | 2 bytes | 18 bytes | + * +---------+-----------+----------+-----------------------------------+ + * | 183:158 | 157:148 | 147:144 | 143:0 | + * +---------+-----------+----------+-----------------------------------+ + * | 0 | region ID | eRP ID | 4 Key blocks (18 Bytes) | + * +---------+-----------+----------+-----------------------------------+ + */ +#define MLXSW_BLOOM_CHUNK_PAD_BYTES 3 +#define MLXSW_BLOOM_CHUNK_KEY_BYTES 18 +#define MLXSW_BLOOM_KEY_CHUNK_BYTES 23 + +/* The offset of the key block within a chunk is 5 bytes as it comes after + * 3 bytes of zero padding and 16 bits of region ID and eRP ID. + */ +#define MLXSW_BLOOM_CHUNK_KEY_OFFSET 5 + +/* Each chunk contains 4 key blocks. Chunk 2 uses key blocks 11-8, + * and we need to populate it with 4 key blocks copied from the entry encoded + * key. Since the encoded key contains a padding, key block 11 starts at offset + * 2. block 7 that is used in chunk 1 starts at offset 20 as 4 key blocks take + * 18 bytes. + * This array defines key offsets for easy access when copying key blocks from + * entry key to Bloom filter chunk. + */ +static const u8 chunk_key_offsets[MLXSW_BLOOM_KEY_CHUNKS] = {2, 20, 38}; + +/* This table is just the CRC of each possible byte. It is + * computed, Msbit first, for the Bloom filter polynomial + * which is 0x8529 (1 + x^3 + x^5 + x^8 + x^10 + x^15 and + * the implicit x^16). + */ +static const u16 mlxsw_sp_acl_bf_crc_tab[256] = { +0x0000, 0x8529, 0x8f7b, 0x0a52, 0x9bdf, 0x1ef6, 0x14a4, 0x918d, +0xb297, 0x37be, 0x3dec, 0xb8c5, 0x2948, 0xac61, 0xa633, 0x231a, +0xe007, 0x652e, 0x6f7c, 0xea55, 0x7bd8, 0xfef1, 0xf4a3, 0x718a, +0x5290, 0xd7b9, 0xddeb, 0x58c2, 0xc94f, 0x4c66, 0x4634, 0xc31d, +0x4527, 0xc00e, 0xca5c, 0x4f75, 0xdef8, 0x5bd1, 0x5183, 0xd4aa, +0xf7b0, 0x7299, 0x78cb, 0xfde2, 0x6c6f, 0xe946, 0xe314, 0x663d, +0xa520, 0x2009, 0x2a5b, 0xaf72, 0x3eff, 0xbbd6, 0xb184, 0x34ad, +0x17b7, 0x929e, 0x98cc, 0x1de5, 0x8c68, 0x0941, 0x0313, 0x863a, +0x8a4e, 0x0f67, 0x0535, 0x801c, 0x1191, 0x94b8, 0x9eea, 0x1bc3, +0x38d9, 0xbdf0, 0xb7a2, 0x328b, 0xa306, 0x262f, 0x2c7d, 0xa954, +0x6a49, 0xef60, 0xe532, 0x601b, 0xf196, 0x74bf, 0x7eed, 0xfbc4, +0xd8de, 0x5df7, 0x57a5, 0xd28c, 0x4301, 0xc628, 0xcc7a, 0x4953, +0xcf69, 0x4a40, 0x4012, 0xc53b, 0x54b6, 0xd19f, 0xdbcd, 0x5ee4, +0x7dfe, 0xf8d7, 0xf285, 0x77ac, 0xe621, 0x6308, 0x695a, 0xec73, +0x2f6e, 0xaa47, 0xa015, 0x253c, 0xb4b1, 0x3198, 0x3bca, 0xbee3, +0x9df9, 0x18d0, 0x1282, 0x97ab, 0x0626, 0x830f, 0x895d, 0x0c74, +0x91b5, 0x149c, 0x1ece, 0x9be7, 0x0a6a, 0x8f43, 0x8511, 0x0038, +0x2322, 0xa60b, 0xac59, 0x2970, 0xb8fd, 0x3dd4, 0x3786, 0xb2af, +0x71b2, 0xf49b, 0xfec9, 0x7be0, 0xea6d, 0x6f44, 0x6516, 0xe03f, +0xc325, 0x460c, 0x4c5e, 0xc977, 0x58fa, 0xddd3, 0xd781, 0x52a8, +0xd492, 0x51bb, 0x5be9, 0xdec0, 0x4f4d, 0xca64, 0xc036, 0x451f, +0x6605, 0xe32c, 0xe97e, 0x6c57, 0xfdda, 0x78f3, 0x72a1, 0xf788, +0x3495, 0xb1bc, 0xbbee, 0x3ec7, 0xaf4a, 0x2a63, 0x2031, 0xa518, +0x8602, 0x032b, 0x0979, 0x8c50, 0x1ddd, 0x98f4, 0x92a6, 0x178f, +0x1bfb, 0x9ed2, 0x9480, 0x11a9, 0x8024, 0x050d, 0x0f5f, 0x8a76, +0xa96c, 0x2c45, 0x2617, 0xa33e, 0x32b3, 0xb79a, 0xbdc8, 0x38e1, +0xfbfc, 0x7ed5, 0x7487, 0xf1ae, 0x6023, 0xe50a, 0xef58, 0x6a71, +0x496b, 0xcc42, 0xc610, 0x4339, 0xd2b4, 0x579d, 0x5dcf, 0xd8e6, +0x5edc, 0xdbf5, 0xd1a7, 0x548e, 0xc503, 0x402a, 0x4a78, 0xcf51, +0xec4b, 0x6962, 0x6330, 0xe619, 0x7794, 0xf2bd, 0xf8ef, 0x7dc6, +0xbedb, 0x3bf2, 0x31a0, 0xb489, 0x2504, 0xa02d, 0xaa7f, 0x2f56, +0x0c4c, 0x8965, 0x8337, 0x061e, 0x9793, 0x12ba, 0x18e8, 0x9dc1, +}; + +static u16 mlxsw_sp_acl_bf_crc_byte(u16 crc, u8 c) +{ + return (crc << 8) ^ mlxsw_sp_acl_bf_crc_tab[(crc >> 8) ^ c]; +} + +static u16 mlxsw_sp_acl_bf_crc(const u8 *buffer, size_t len) +{ + u16 crc = 0; + + while (len--) + crc = mlxsw_sp_acl_bf_crc_byte(crc, *buffer++); + return crc; +} + +static void +mlxsw_sp_acl_bf_key_encode(struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_entry *aentry, + char *output, u8 *len) +{ + struct mlxsw_afk_key_info *key_info = aregion->region->key_info; + u8 chunk_index, chunk_count, block_count; + char *chunk = output; + __be16 erp_region_id; + + block_count = mlxsw_afk_key_info_blocks_count_get(key_info); + chunk_count = 1 + ((block_count - 1) >> 2); + erp_region_id = cpu_to_be16(aentry->ht_key.erp_id | + (aregion->region->id << 4)); + for (chunk_index = MLXSW_BLOOM_KEY_CHUNKS - chunk_count; + chunk_index < MLXSW_BLOOM_KEY_CHUNKS; chunk_index++) { + memset(chunk, 0, MLXSW_BLOOM_CHUNK_PAD_BYTES); + memcpy(chunk + MLXSW_BLOOM_CHUNK_PAD_BYTES, &erp_region_id, + sizeof(erp_region_id)); + memcpy(chunk + MLXSW_BLOOM_CHUNK_KEY_OFFSET, + &aentry->ht_key.enc_key[chunk_key_offsets[chunk_index]], + MLXSW_BLOOM_CHUNK_KEY_BYTES); + chunk += MLXSW_BLOOM_KEY_CHUNK_BYTES; + } + *len = chunk_count * MLXSW_BLOOM_KEY_CHUNK_BYTES; +} + +static unsigned int +mlxsw_sp_acl_bf_rule_count_index_get(struct mlxsw_sp_acl_bf *bf, + unsigned int erp_bank, + unsigned int bf_index) +{ + return erp_bank * bf->bank_size + bf_index; +} + +static unsigned int +mlxsw_sp_acl_bf_index_get(struct mlxsw_sp_acl_bf *bf, + struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_entry *aentry) +{ + char bf_key[MLXSW_BLOOM_KEY_LEN]; + u8 bf_size; + + mlxsw_sp_acl_bf_key_encode(aregion, aentry, bf_key, &bf_size); + return mlxsw_sp_acl_bf_crc(bf_key, bf_size); +} + +int +mlxsw_sp_acl_bf_entry_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_bf *bf, + struct mlxsw_sp_acl_atcam_region *aregion, + unsigned int erp_bank, + struct mlxsw_sp_acl_atcam_entry *aentry) +{ + unsigned int rule_index; + char *peabfe_pl; + u16 bf_index; + int err; + + bf_index = mlxsw_sp_acl_bf_index_get(bf, aregion, aentry); + rule_index = mlxsw_sp_acl_bf_rule_count_index_get(bf, erp_bank, + bf_index); + + if (refcount_inc_not_zero(&bf->refcnt[rule_index])) + return 0; + + peabfe_pl = kmalloc(MLXSW_REG_PEABFE_LEN, GFP_KERNEL); + if (!peabfe_pl) + return -ENOMEM; + + mlxsw_reg_peabfe_pack(peabfe_pl); + mlxsw_reg_peabfe_rec_pack(peabfe_pl, 0, 1, erp_bank, bf_index); + err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(peabfe), peabfe_pl); + kfree(peabfe_pl); + if (err) + return err; + + refcount_set(&bf->refcnt[rule_index], 1); + return 0; +} + +void +mlxsw_sp_acl_bf_entry_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_bf *bf, + struct mlxsw_sp_acl_atcam_region *aregion, + unsigned int erp_bank, + struct mlxsw_sp_acl_atcam_entry *aentry) +{ + unsigned int rule_index; + char *peabfe_pl; + u16 bf_index; + + bf_index = mlxsw_sp_acl_bf_index_get(bf, aregion, aentry); + rule_index = mlxsw_sp_acl_bf_rule_count_index_get(bf, erp_bank, + bf_index); + + if (refcount_dec_and_test(&bf->refcnt[rule_index])) { + peabfe_pl = kmalloc(MLXSW_REG_PEABFE_LEN, GFP_KERNEL); + if (!peabfe_pl) + return; + + mlxsw_reg_peabfe_pack(peabfe_pl); + mlxsw_reg_peabfe_rec_pack(peabfe_pl, 0, 0, erp_bank, bf_index); + mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(peabfe), peabfe_pl); + kfree(peabfe_pl); + } +} + +struct mlxsw_sp_acl_bf * +mlxsw_sp_acl_bf_init(struct mlxsw_sp *mlxsw_sp, unsigned int num_erp_banks) +{ + struct mlxsw_sp_acl_bf *bf; + unsigned int bf_bank_size; + + if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_BF_LOG)) + return ERR_PTR(-EIO); + + /* Bloom filter size per erp_table_bank + * is 2^ACL_MAX_BF_LOG + */ + bf_bank_size = 1 << MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_BF_LOG); + bf = kzalloc(sizeof(*bf) + bf_bank_size * num_erp_banks * + sizeof(*bf->refcnt), GFP_KERNEL); + if (!bf) + return ERR_PTR(-ENOMEM); + + bf->bank_size = bf_bank_size; + return bf; +} + +void mlxsw_sp_acl_bf_fini(struct mlxsw_sp_acl_bf *bf) +{ + kfree(bf); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c index e3c6fe8b1d40..b0f2d8e8ded0 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_ctcam.c @@ -46,7 +46,6 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_region *region = cregion->region; struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl); char ptce2_pl[MLXSW_REG_PTCE2_LEN]; - unsigned int blocks_count; char *act_set; u32 priority; char *mask; @@ -63,9 +62,7 @@ mlxsw_sp_acl_ctcam_region_entry_insert(struct mlxsw_sp *mlxsw_sp, centry->parman_item.index, priority); key = mlxsw_reg_ptce2_flex_key_blocks_data(ptce2_pl); mask = mlxsw_reg_ptce2_mask_data(ptce2_pl); - blocks_count = mlxsw_afk_key_info_blocks_count_get(region->key_info); - mlxsw_afk_encode(afk, region->key_info, &rulei->values, key, mask, 0, - blocks_count - 1); + mlxsw_afk_encode(afk, region->key_info, &rulei->values, key, mask); err = cregion->ops->entry_insert(cregion, centry, mask); if (err) @@ -92,6 +89,27 @@ mlxsw_sp_acl_ctcam_region_entry_remove(struct mlxsw_sp *mlxsw_sp, cregion->ops->entry_remove(cregion, centry); } +static int +mlxsw_sp_acl_ctcam_region_entry_action_replace(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_ctcam_entry *centry, + struct mlxsw_afa_block *afa_block, + unsigned int priority) +{ + char ptce2_pl[MLXSW_REG_PTCE2_LEN]; + char *act_set; + + mlxsw_reg_ptce2_pack(ptce2_pl, true, MLXSW_REG_PTCE2_OP_WRITE_UPDATE, + cregion->region->tcam_region_info, + centry->parman_item.index, priority); + + act_set = mlxsw_afa_block_first_set(afa_block); + mlxsw_reg_ptce2_flex_action_set_memcpy_to(ptce2_pl, act_set); + + return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptce2), ptce2_pl); +} + + static int mlxsw_sp_acl_ctcam_region_parman_resize(void *priv, unsigned long new_count) { @@ -194,3 +212,15 @@ void mlxsw_sp_acl_ctcam_entry_del(struct mlxsw_sp *mlxsw_sp, parman_item_remove(cregion->parman, &cchunk->parman_prio, ¢ry->parman_item); } + +int mlxsw_sp_acl_ctcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_ctcam_chunk *cchunk, + struct mlxsw_sp_acl_ctcam_entry *centry, + struct mlxsw_sp_acl_rule_info *rulei) +{ + return mlxsw_sp_acl_ctcam_region_entry_action_replace(mlxsw_sp, cregion, + centry, + rulei->act_block, + rulei->priority); +} diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c index 0a4fd3c8662a..1c19feefa5f2 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_erp.c @@ -7,7 +7,7 @@ #include <linux/gfp.h> #include <linux/kernel.h> #include <linux/list.h> -#include <linux/rhashtable.h> +#include <linux/objagg.h> #include <linux/rtnetlink.h> #include <linux/slab.h> @@ -24,11 +24,14 @@ struct mlxsw_sp_acl_erp_core { unsigned int erpt_entries_size[MLXSW_SP_ACL_ATCAM_REGION_TYPE_MAX + 1]; struct gen_pool *erp_tables; struct mlxsw_sp *mlxsw_sp; + struct mlxsw_sp_acl_bf *bf; unsigned int num_erp_banks; }; struct mlxsw_sp_acl_erp_key { char mask[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; +#define __MASK_LEN 0x38 +#define __MASK_IDX(i) (__MASK_LEN - (i) - 1) bool ctcam; }; @@ -36,10 +39,8 @@ struct mlxsw_sp_acl_erp { struct mlxsw_sp_acl_erp_key key; u8 id; u8 index; - refcount_t refcnt; DECLARE_BITMAP(mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN); struct list_head list; - struct rhash_head ht_node; struct mlxsw_sp_acl_erp_table *erp_table; }; @@ -53,7 +54,6 @@ struct mlxsw_sp_acl_erp_table { DECLARE_BITMAP(erp_id_bitmap, MLXSW_SP_ACL_ERP_MAX_PER_REGION); DECLARE_BITMAP(erp_index_bitmap, MLXSW_SP_ACL_ERP_MAX_PER_REGION); struct list_head atcam_erps_list; - struct rhashtable erp_ht; struct mlxsw_sp_acl_erp_core *erp_core; struct mlxsw_sp_acl_atcam_region *aregion; const struct mlxsw_sp_acl_erp_table_ops *ops; @@ -61,12 +61,8 @@ struct mlxsw_sp_acl_erp_table { unsigned int num_atcam_erps; unsigned int num_max_atcam_erps; unsigned int num_ctcam_erps; -}; - -static const struct rhashtable_params mlxsw_sp_acl_erp_ht_params = { - .key_len = sizeof(struct mlxsw_sp_acl_erp_key), - .key_offset = offsetof(struct mlxsw_sp_acl_erp, key), - .head_offset = offsetof(struct mlxsw_sp_acl_erp, ht_node), + unsigned int num_deltas; + struct objagg *objagg; }; struct mlxsw_sp_acl_erp_table_ops { @@ -119,14 +115,17 @@ static const struct mlxsw_sp_acl_erp_table_ops erp_no_mask_ops = { .erp_destroy = mlxsw_sp_acl_erp_no_mask_destroy, }; -bool mlxsw_sp_acl_erp_is_ctcam_erp(const struct mlxsw_sp_acl_erp *erp) +static bool +mlxsw_sp_acl_erp_table_is_used(const struct mlxsw_sp_acl_erp_table *erp_table) { - return erp->key.ctcam; + return erp_table->ops != &erp_single_mask_ops && + erp_table->ops != &erp_no_mask_ops; } -u8 mlxsw_sp_acl_erp_id(const struct mlxsw_sp_acl_erp *erp) +static unsigned int +mlxsw_sp_acl_erp_bank_get(const struct mlxsw_sp_acl_erp *erp) { - return erp->id; + return erp->index % erp->erp_table->erp_core->num_erp_banks; } static unsigned int @@ -194,12 +193,15 @@ mlxsw_sp_acl_erp_master_mask_update(struct mlxsw_sp_acl_erp_table *erp_table) static int mlxsw_sp_acl_erp_master_mask_set(struct mlxsw_sp_acl_erp_table *erp_table, - const struct mlxsw_sp_acl_erp *erp) + struct mlxsw_sp_acl_erp_key *key) { + DECLARE_BITMAP(mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN); unsigned long bit; int err; - for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN) + bitmap_from_arr32(mask_bitmap, (u32 *) key->mask, + MLXSW_SP_ACL_TCAM_MASK_LEN); + for_each_set_bit(bit, mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN) mlxsw_sp_acl_erp_master_mask_bit_set(bit, &erp_table->master_mask); @@ -210,7 +212,7 @@ mlxsw_sp_acl_erp_master_mask_set(struct mlxsw_sp_acl_erp_table *erp_table, return 0; err_master_mask_update: - for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN) + for_each_set_bit(bit, mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN) mlxsw_sp_acl_erp_master_mask_bit_clear(bit, &erp_table->master_mask); return err; @@ -218,12 +220,15 @@ err_master_mask_update: static int mlxsw_sp_acl_erp_master_mask_clear(struct mlxsw_sp_acl_erp_table *erp_table, - const struct mlxsw_sp_acl_erp *erp) + struct mlxsw_sp_acl_erp_key *key) { + DECLARE_BITMAP(mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN); unsigned long bit; int err; - for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN) + bitmap_from_arr32(mask_bitmap, (u32 *) key->mask, + MLXSW_SP_ACL_TCAM_MASK_LEN); + for_each_set_bit(bit, mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN) mlxsw_sp_acl_erp_master_mask_bit_clear(bit, &erp_table->master_mask); @@ -234,7 +239,7 @@ mlxsw_sp_acl_erp_master_mask_clear(struct mlxsw_sp_acl_erp_table *erp_table, return 0; err_master_mask_update: - for_each_set_bit(bit, erp->mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN) + for_each_set_bit(bit, mask_bitmap, MLXSW_SP_ACL_TCAM_MASK_LEN) mlxsw_sp_acl_erp_master_mask_bit_set(bit, &erp_table->master_mask); return err; @@ -256,26 +261,16 @@ mlxsw_sp_acl_erp_generic_create(struct mlxsw_sp_acl_erp_table *erp_table, goto err_erp_id_get; memcpy(&erp->key, key, sizeof(*key)); - bitmap_from_arr32(erp->mask_bitmap, (u32 *) key->mask, - MLXSW_SP_ACL_TCAM_MASK_LEN); list_add(&erp->list, &erp_table->atcam_erps_list); - refcount_set(&erp->refcnt, 1); erp_table->num_atcam_erps++; erp->erp_table = erp_table; - err = mlxsw_sp_acl_erp_master_mask_set(erp_table, erp); + err = mlxsw_sp_acl_erp_master_mask_set(erp_table, &erp->key); if (err) goto err_master_mask_set; - err = rhashtable_insert_fast(&erp_table->erp_ht, &erp->ht_node, - mlxsw_sp_acl_erp_ht_params); - if (err) - goto err_rhashtable_insert; - return erp; -err_rhashtable_insert: - mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp); err_master_mask_set: erp_table->num_atcam_erps--; list_del(&erp->list); @@ -290,9 +285,7 @@ mlxsw_sp_acl_erp_generic_destroy(struct mlxsw_sp_acl_erp *erp) { struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table; - rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node, - mlxsw_sp_acl_erp_ht_params); - mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp); + mlxsw_sp_acl_erp_master_mask_clear(erp_table, &erp->key); erp_table->num_atcam_erps--; list_del(&erp->list); mlxsw_sp_acl_erp_id_put(erp_table, erp->id); @@ -525,6 +518,48 @@ err_table_relocate: } static int +mlxsw_acl_erp_table_bf_add(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp *erp) +{ + struct mlxsw_sp_acl_atcam_region *aregion = erp_table->aregion; + unsigned int erp_bank = mlxsw_sp_acl_erp_bank_get(erp); + struct mlxsw_sp_acl_atcam_entry *aentry; + int err; + + list_for_each_entry(aentry, &aregion->entries_list, list) { + err = mlxsw_sp_acl_bf_entry_add(aregion->region->mlxsw_sp, + erp_table->erp_core->bf, + aregion, erp_bank, aentry); + if (err) + goto bf_entry_add_err; + } + + return 0; + +bf_entry_add_err: + list_for_each_entry_continue_reverse(aentry, &aregion->entries_list, + list) + mlxsw_sp_acl_bf_entry_del(aregion->region->mlxsw_sp, + erp_table->erp_core->bf, + aregion, erp_bank, aentry); + return err; +} + +static void +mlxsw_acl_erp_table_bf_del(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp *erp) +{ + struct mlxsw_sp_acl_atcam_region *aregion = erp_table->aregion; + unsigned int erp_bank = mlxsw_sp_acl_erp_bank_get(erp); + struct mlxsw_sp_acl_atcam_entry *aentry; + + list_for_each_entry_reverse(aentry, &aregion->entries_list, list) + mlxsw_sp_acl_bf_entry_del(aregion->region->mlxsw_sp, + erp_table->erp_core->bf, + aregion, erp_bank, aentry); +} + +static int mlxsw_sp_acl_erp_region_table_trans(struct mlxsw_sp_acl_erp_table *erp_table) { struct mlxsw_sp_acl_erp_core *erp_core = erp_table->erp_core; @@ -548,16 +583,24 @@ mlxsw_sp_acl_erp_region_table_trans(struct mlxsw_sp_acl_erp_table *erp_table) goto err_table_master_rp; } - /* Maintain the same eRP bank for the master RP, so that we - * wouldn't need to update the bloom filter + /* Make sure the master RP is using a valid index, as + * only a single eRP row is currently allocated. */ - master_rp->index = master_rp->index % erp_core->num_erp_banks; + master_rp->index = 0; __set_bit(master_rp->index, erp_table->erp_index_bitmap); err = mlxsw_sp_acl_erp_table_erp_add(erp_table, master_rp); if (err) goto err_table_master_rp_add; + /* Update Bloom filter before enabling eRP table, as rules + * on the master RP were not set to Bloom filter up to this + * point. + */ + err = mlxsw_acl_erp_table_bf_add(erp_table, master_rp); + if (err) + goto err_table_bf_add; + err = mlxsw_sp_acl_erp_table_enable(erp_table, false); if (err) goto err_table_enable; @@ -565,6 +608,8 @@ mlxsw_sp_acl_erp_region_table_trans(struct mlxsw_sp_acl_erp_table *erp_table) return 0; err_table_enable: + mlxsw_acl_erp_table_bf_del(erp_table, master_rp); +err_table_bf_add: mlxsw_sp_acl_erp_table_erp_del(master_rp); err_table_master_rp_add: __clear_bit(master_rp->index, erp_table->erp_index_bitmap); @@ -585,6 +630,7 @@ mlxsw_sp_acl_erp_region_master_mask_trans(struct mlxsw_sp_acl_erp_table *erp_tab master_rp = mlxsw_sp_acl_erp_table_master_rp(erp_table); if (!master_rp) return; + mlxsw_acl_erp_table_bf_del(erp_table, master_rp); mlxsw_sp_acl_erp_table_erp_del(master_rp); __clear_bit(master_rp->index, erp_table->erp_index_bitmap); mlxsw_sp_acl_erp_table_free(erp_core, erp_table->num_max_atcam_erps, @@ -647,9 +693,55 @@ mlxsw_sp_acl_erp_region_ctcam_disable(struct mlxsw_sp_acl_erp_table *erp_table) mlxsw_sp_acl_erp_table_enable(erp_table, false); } +static int +__mlxsw_sp_acl_erp_table_other_inc(struct mlxsw_sp_acl_erp_table *erp_table, + unsigned int *inc_num) +{ + int err; + + /* If there are C-TCAM eRP or deltas in use we need to transition + * the region to use eRP table, if it is not already done + */ + if (!mlxsw_sp_acl_erp_table_is_used(erp_table)) { + err = mlxsw_sp_acl_erp_region_table_trans(erp_table); + if (err) + return err; + } + + /* When C-TCAM or deltas are used, the eRP table must be used */ + if (erp_table->ops != &erp_multiple_masks_ops) + erp_table->ops = &erp_multiple_masks_ops; + + (*inc_num)++; + + return 0; +} + +static int mlxsw_sp_acl_erp_ctcam_inc(struct mlxsw_sp_acl_erp_table *erp_table) +{ + return __mlxsw_sp_acl_erp_table_other_inc(erp_table, + &erp_table->num_ctcam_erps); +} + +static int mlxsw_sp_acl_erp_delta_inc(struct mlxsw_sp_acl_erp_table *erp_table) +{ + return __mlxsw_sp_acl_erp_table_other_inc(erp_table, + &erp_table->num_deltas); +} + static void -mlxsw_sp_acl_erp_ctcam_table_ops_set(struct mlxsw_sp_acl_erp_table *erp_table) +__mlxsw_sp_acl_erp_table_other_dec(struct mlxsw_sp_acl_erp_table *erp_table, + unsigned int *dec_num) { + (*dec_num)--; + + /* If there are no C-TCAM eRP or deltas in use, the state we + * transition to depends on the number of A-TCAM eRPs currently + * in use. + */ + if (erp_table->num_ctcam_erps > 0 || erp_table->num_deltas > 0) + return; + switch (erp_table->num_atcam_erps) { case 2: /* Keep using the eRP table, but correctly set the @@ -683,9 +775,21 @@ mlxsw_sp_acl_erp_ctcam_table_ops_set(struct mlxsw_sp_acl_erp_table *erp_table) } } +static void mlxsw_sp_acl_erp_ctcam_dec(struct mlxsw_sp_acl_erp_table *erp_table) +{ + __mlxsw_sp_acl_erp_table_other_dec(erp_table, + &erp_table->num_ctcam_erps); +} + +static void mlxsw_sp_acl_erp_delta_dec(struct mlxsw_sp_acl_erp_table *erp_table) +{ + __mlxsw_sp_acl_erp_table_other_dec(erp_table, + &erp_table->num_deltas); +} + static struct mlxsw_sp_acl_erp * -__mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table, - struct mlxsw_sp_acl_erp_key *key) +mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table, + struct mlxsw_sp_acl_erp_key *key) { struct mlxsw_sp_acl_erp *erp; int err; @@ -697,89 +801,41 @@ __mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table, memcpy(&erp->key, key, sizeof(*key)); bitmap_from_arr32(erp->mask_bitmap, (u32 *) key->mask, MLXSW_SP_ACL_TCAM_MASK_LEN); - refcount_set(&erp->refcnt, 1); - erp_table->num_ctcam_erps++; - erp->erp_table = erp_table; - err = mlxsw_sp_acl_erp_master_mask_set(erp_table, erp); + err = mlxsw_sp_acl_erp_ctcam_inc(erp_table); if (err) - goto err_master_mask_set; + goto err_erp_ctcam_inc; + + erp->erp_table = erp_table; - err = rhashtable_insert_fast(&erp_table->erp_ht, &erp->ht_node, - mlxsw_sp_acl_erp_ht_params); + err = mlxsw_sp_acl_erp_master_mask_set(erp_table, &erp->key); if (err) - goto err_rhashtable_insert; + goto err_master_mask_set; err = mlxsw_sp_acl_erp_region_ctcam_enable(erp_table); if (err) goto err_erp_region_ctcam_enable; - /* When C-TCAM is used, the eRP table must be used */ - erp_table->ops = &erp_multiple_masks_ops; - return erp; err_erp_region_ctcam_enable: - rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node, - mlxsw_sp_acl_erp_ht_params); -err_rhashtable_insert: - mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp); + mlxsw_sp_acl_erp_master_mask_clear(erp_table, &erp->key); err_master_mask_set: - erp_table->num_ctcam_erps--; + mlxsw_sp_acl_erp_ctcam_dec(erp_table); +err_erp_ctcam_inc: kfree(erp); return ERR_PTR(err); } -static struct mlxsw_sp_acl_erp * -mlxsw_sp_acl_erp_ctcam_mask_create(struct mlxsw_sp_acl_erp_table *erp_table, - struct mlxsw_sp_acl_erp_key *key) -{ - struct mlxsw_sp_acl_erp *erp; - int err; - - /* There is a special situation where we need to spill rules - * into the C-TCAM, yet the region is still using a master - * mask and thus not performing a lookup in the C-TCAM. This - * can happen when two rules that only differ in priority - and - * thus sharing the same key - are programmed. In this case - * we transition the region to use an eRP table - */ - err = mlxsw_sp_acl_erp_region_table_trans(erp_table); - if (err) - return ERR_PTR(err); - - erp = __mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key); - if (IS_ERR(erp)) { - err = PTR_ERR(erp); - goto err_erp_create; - } - - return erp; - -err_erp_create: - mlxsw_sp_acl_erp_region_master_mask_trans(erp_table); - return ERR_PTR(err); -} - static void mlxsw_sp_acl_erp_ctcam_mask_destroy(struct mlxsw_sp_acl_erp *erp) { struct mlxsw_sp_acl_erp_table *erp_table = erp->erp_table; mlxsw_sp_acl_erp_region_ctcam_disable(erp_table); - rhashtable_remove_fast(&erp_table->erp_ht, &erp->ht_node, - mlxsw_sp_acl_erp_ht_params); - mlxsw_sp_acl_erp_master_mask_clear(erp_table, erp); - erp_table->num_ctcam_erps--; + mlxsw_sp_acl_erp_master_mask_clear(erp_table, &erp->key); + mlxsw_sp_acl_erp_ctcam_dec(erp_table); kfree(erp); - - /* Once the last C-TCAM eRP was destroyed, the state we - * transition to depends on the number of A-TCAM eRPs currently - * in use - */ - if (erp_table->num_ctcam_erps > 0) - return; - mlxsw_sp_acl_erp_ctcam_table_ops_set(erp_table); } static struct mlxsw_sp_acl_erp * @@ -790,7 +846,7 @@ mlxsw_sp_acl_erp_mask_create(struct mlxsw_sp_acl_erp_table *erp_table, int err; if (key->ctcam) - return __mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key); + return mlxsw_sp_acl_erp_ctcam_mask_create(erp_table, key); /* Expand the eRP table for the new eRP, if needed */ err = mlxsw_sp_acl_erp_table_expand(erp_table); @@ -838,7 +894,8 @@ mlxsw_sp_acl_erp_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table, mlxsw_sp_acl_erp_index_put(erp_table, erp->index); mlxsw_sp_acl_erp_generic_destroy(erp); - if (erp_table->num_atcam_erps == 2 && erp_table->num_ctcam_erps == 0) + if (erp_table->num_atcam_erps == 2 && erp_table->num_ctcam_erps == 0 && + erp_table->num_deltas == 0) erp_table->ops = &erp_two_masks_ops; } @@ -940,13 +997,12 @@ mlxsw_sp_acl_erp_no_mask_destroy(struct mlxsw_sp_acl_erp_table *erp_table, WARN_ON(1); } -struct mlxsw_sp_acl_erp * -mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion, - const char *mask, bool ctcam) +struct mlxsw_sp_acl_erp_mask * +mlxsw_sp_acl_erp_mask_get(struct mlxsw_sp_acl_atcam_region *aregion, + const char *mask, bool ctcam) { - struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table; struct mlxsw_sp_acl_erp_key key; - struct mlxsw_sp_acl_erp *erp; + struct objagg_obj *objagg_obj; /* eRPs are allocated from a shared resource, but currently all * allocations are done under RTNL. @@ -955,29 +1011,276 @@ mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion, memcpy(key.mask, mask, MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN); key.ctcam = ctcam; - erp = rhashtable_lookup_fast(&erp_table->erp_ht, &key, - mlxsw_sp_acl_erp_ht_params); - if (erp) { - refcount_inc(&erp->refcnt); - return erp; - } + objagg_obj = objagg_obj_get(aregion->erp_table->objagg, &key); + if (IS_ERR(objagg_obj)) + return ERR_CAST(objagg_obj); + return (struct mlxsw_sp_acl_erp_mask *) objagg_obj; +} + +void mlxsw_sp_acl_erp_mask_put(struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_erp_mask *erp_mask) +{ + struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask; - return erp_table->ops->erp_create(erp_table, &key); + ASSERT_RTNL(); + objagg_obj_put(aregion->erp_table->objagg, objagg_obj); } -void mlxsw_sp_acl_erp_put(struct mlxsw_sp_acl_atcam_region *aregion, - struct mlxsw_sp_acl_erp *erp) +int mlxsw_sp_acl_erp_bf_insert(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_erp_mask *erp_mask, + struct mlxsw_sp_acl_atcam_entry *aentry) { - struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table; + struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask; + const struct mlxsw_sp_acl_erp *erp = objagg_obj_root_priv(objagg_obj); + unsigned int erp_bank; ASSERT_RTNL(); + if (!mlxsw_sp_acl_erp_table_is_used(erp->erp_table)) + return 0; + + erp_bank = mlxsw_sp_acl_erp_bank_get(erp); + return mlxsw_sp_acl_bf_entry_add(mlxsw_sp, + erp->erp_table->erp_core->bf, + aregion, erp_bank, aentry); +} + +void mlxsw_sp_acl_erp_bf_remove(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_erp_mask *erp_mask, + struct mlxsw_sp_acl_atcam_entry *aentry) +{ + struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask; + const struct mlxsw_sp_acl_erp *erp = objagg_obj_root_priv(objagg_obj); + unsigned int erp_bank; - if (!refcount_dec_and_test(&erp->refcnt)) + ASSERT_RTNL(); + if (!mlxsw_sp_acl_erp_table_is_used(erp->erp_table)) return; - erp_table->ops->erp_destroy(erp_table, erp); + erp_bank = mlxsw_sp_acl_erp_bank_get(erp); + mlxsw_sp_acl_bf_entry_del(mlxsw_sp, + erp->erp_table->erp_core->bf, + aregion, erp_bank, aentry); +} + +bool +mlxsw_sp_acl_erp_mask_is_ctcam(const struct mlxsw_sp_acl_erp_mask *erp_mask) +{ + struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask; + const struct mlxsw_sp_acl_erp_key *key = objagg_obj_raw(objagg_obj); + + return key->ctcam; +} + +u8 mlxsw_sp_acl_erp_mask_erp_id(const struct mlxsw_sp_acl_erp_mask *erp_mask) +{ + struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask; + const struct mlxsw_sp_acl_erp *erp = objagg_obj_root_priv(objagg_obj); + + return erp->id; +} + +struct mlxsw_sp_acl_erp_delta { + struct mlxsw_sp_acl_erp_key key; + u16 start; + u8 mask; +}; + +u16 mlxsw_sp_acl_erp_delta_start(const struct mlxsw_sp_acl_erp_delta *delta) +{ + return delta->start; +} + +u8 mlxsw_sp_acl_erp_delta_mask(const struct mlxsw_sp_acl_erp_delta *delta) +{ + return delta->mask; +} + +u8 mlxsw_sp_acl_erp_delta_value(const struct mlxsw_sp_acl_erp_delta *delta, + const char *enc_key) +{ + u16 start = delta->start; + u8 mask = delta->mask; + u16 tmp; + + if (!mask) + return 0; + + tmp = (unsigned char) enc_key[__MASK_IDX(start / 8)]; + if (start / 8 + 1 < __MASK_LEN) + tmp |= (unsigned char) enc_key[__MASK_IDX(start / 8 + 1)] << 8; + tmp >>= start % 8; + tmp &= mask; + return tmp; +} + +void mlxsw_sp_acl_erp_delta_clear(const struct mlxsw_sp_acl_erp_delta *delta, + const char *enc_key) +{ + u16 start = delta->start; + u8 mask = delta->mask; + unsigned char *byte; + u16 tmp; + + tmp = mask; + tmp <<= start % 8; + tmp = ~tmp; + + byte = (unsigned char *) &enc_key[__MASK_IDX(start / 8)]; + *byte &= tmp & 0xff; + if (start / 8 + 1 < __MASK_LEN) { + byte = (unsigned char *) &enc_key[__MASK_IDX(start / 8 + 1)]; + *byte &= (tmp >> 8) & 0xff; + } +} + +static const struct mlxsw_sp_acl_erp_delta +mlxsw_sp_acl_erp_delta_default = {}; + +const struct mlxsw_sp_acl_erp_delta * +mlxsw_sp_acl_erp_delta(const struct mlxsw_sp_acl_erp_mask *erp_mask) +{ + struct objagg_obj *objagg_obj = (struct objagg_obj *) erp_mask; + const struct mlxsw_sp_acl_erp_delta *delta; + + delta = objagg_obj_delta_priv(objagg_obj); + if (!delta) + delta = &mlxsw_sp_acl_erp_delta_default; + return delta; +} + +static int +mlxsw_sp_acl_erp_delta_fill(const struct mlxsw_sp_acl_erp_key *parent_key, + const struct mlxsw_sp_acl_erp_key *key, + u16 *delta_start, u8 *delta_mask) +{ + int offset = 0; + int si = -1; + u16 pmask; + u16 mask; + int i; + + /* The difference between 2 masks can be up to 8 consecutive bits. */ + for (i = 0; i < __MASK_LEN; i++) { + if (parent_key->mask[__MASK_IDX(i)] == key->mask[__MASK_IDX(i)]) + continue; + if (si == -1) + si = i; + else if (si != i - 1) + return -EINVAL; + } + if (si == -1) { + /* The masks are the same, this cannot happen. + * That means the caller is broken. + */ + WARN_ON(1); + *delta_start = 0; + *delta_mask = 0; + return 0; + } + pmask = (unsigned char) parent_key->mask[__MASK_IDX(si)]; + mask = (unsigned char) key->mask[__MASK_IDX(si)]; + if (si + 1 < __MASK_LEN) { + pmask |= (unsigned char) parent_key->mask[__MASK_IDX(si + 1)] << 8; + mask |= (unsigned char) key->mask[__MASK_IDX(si + 1)] << 8; + } + + if ((pmask ^ mask) & pmask) + return -EINVAL; + mask &= ~pmask; + while (!(mask & (1 << offset))) + offset++; + while (!(mask & 1)) + mask >>= 1; + if (mask & 0xff00) + return -EINVAL; + + *delta_start = si * 8 + offset; + *delta_mask = mask; + + return 0; +} + +static void *mlxsw_sp_acl_erp_delta_create(void *priv, void *parent_obj, + void *obj) +{ + struct mlxsw_sp_acl_erp_key *parent_key = parent_obj; + struct mlxsw_sp_acl_atcam_region *aregion = priv; + struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table; + struct mlxsw_sp_acl_erp_key *key = obj; + struct mlxsw_sp_acl_erp_delta *delta; + u16 delta_start; + u8 delta_mask; + int err; + + if (parent_key->ctcam || key->ctcam) + return ERR_PTR(-EINVAL); + err = mlxsw_sp_acl_erp_delta_fill(parent_key, key, + &delta_start, &delta_mask); + if (err) + return ERR_PTR(-EINVAL); + + delta = kzalloc(sizeof(*delta), GFP_KERNEL); + if (!delta) + return ERR_PTR(-ENOMEM); + delta->start = delta_start; + delta->mask = delta_mask; + + err = mlxsw_sp_acl_erp_delta_inc(erp_table); + if (err) + goto err_erp_delta_inc; + + memcpy(&delta->key, key, sizeof(*key)); + err = mlxsw_sp_acl_erp_master_mask_set(erp_table, &delta->key); + if (err) + goto err_master_mask_set; + + return delta; + +err_master_mask_set: + mlxsw_sp_acl_erp_delta_dec(erp_table); +err_erp_delta_inc: + kfree(delta); + return ERR_PTR(err); +} + +static void mlxsw_sp_acl_erp_delta_destroy(void *priv, void *delta_priv) +{ + struct mlxsw_sp_acl_erp_delta *delta = delta_priv; + struct mlxsw_sp_acl_atcam_region *aregion = priv; + struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table; + + mlxsw_sp_acl_erp_master_mask_clear(erp_table, &delta->key); + mlxsw_sp_acl_erp_delta_dec(erp_table); + kfree(delta); +} + +static void *mlxsw_sp_acl_erp_root_create(void *priv, void *obj) +{ + struct mlxsw_sp_acl_atcam_region *aregion = priv; + struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table; + struct mlxsw_sp_acl_erp_key *key = obj; + + return erp_table->ops->erp_create(erp_table, key); +} + +static void mlxsw_sp_acl_erp_root_destroy(void *priv, void *root_priv) +{ + struct mlxsw_sp_acl_atcam_region *aregion = priv; + struct mlxsw_sp_acl_erp_table *erp_table = aregion->erp_table; + + erp_table->ops->erp_destroy(erp_table, root_priv); } +static const struct objagg_ops mlxsw_sp_acl_erp_objagg_ops = { + .obj_size = sizeof(struct mlxsw_sp_acl_erp_key), + .delta_create = mlxsw_sp_acl_erp_delta_create, + .delta_destroy = mlxsw_sp_acl_erp_delta_destroy, + .root_create = mlxsw_sp_acl_erp_root_create, + .root_destroy = mlxsw_sp_acl_erp_root_destroy, +}; + static struct mlxsw_sp_acl_erp_table * mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion) { @@ -988,9 +1291,12 @@ mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion) if (!erp_table) return ERR_PTR(-ENOMEM); - err = rhashtable_init(&erp_table->erp_ht, &mlxsw_sp_acl_erp_ht_params); - if (err) - goto err_rhashtable_init; + erp_table->objagg = objagg_create(&mlxsw_sp_acl_erp_objagg_ops, + aregion); + if (IS_ERR(erp_table->objagg)) { + err = PTR_ERR(erp_table->objagg); + goto err_objagg_create; + } erp_table->erp_core = aregion->atcam->erp_core; erp_table->ops = &erp_no_mask_ops; @@ -999,7 +1305,7 @@ mlxsw_sp_acl_erp_table_create(struct mlxsw_sp_acl_atcam_region *aregion) return erp_table; -err_rhashtable_init: +err_objagg_create: kfree(erp_table); return ERR_PTR(err); } @@ -1008,7 +1314,7 @@ static void mlxsw_sp_acl_erp_table_destroy(struct mlxsw_sp_acl_erp_table *erp_table) { WARN_ON(!list_empty(&erp_table->atcam_erps_list)); - rhashtable_destroy(&erp_table->erp_ht); + objagg_destroy(erp_table->objagg); kfree(erp_table); } @@ -1118,6 +1424,12 @@ static int mlxsw_sp_acl_erp_tables_init(struct mlxsw_sp *mlxsw_sp, if (err) goto err_gen_pool_add; + erp_core->bf = mlxsw_sp_acl_bf_init(mlxsw_sp, erp_core->num_erp_banks); + if (IS_ERR(erp_core->bf)) { + err = PTR_ERR(erp_core->bf); + goto err_bf_init; + } + /* Different regions require masks of different sizes */ err = mlxsw_sp_acl_erp_tables_sizes_query(mlxsw_sp, erp_core); if (err) @@ -1126,6 +1438,8 @@ static int mlxsw_sp_acl_erp_tables_init(struct mlxsw_sp *mlxsw_sp, return 0; err_erp_tables_sizes_query: + mlxsw_sp_acl_bf_fini(erp_core->bf); +err_bf_init: err_gen_pool_add: gen_pool_destroy(erp_core->erp_tables); return err; @@ -1134,6 +1448,7 @@ err_gen_pool_add: static void mlxsw_sp_acl_erp_tables_fini(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_erp_core *erp_core) { + mlxsw_sp_acl_bf_fini(erp_core->bf); gen_pool_destroy(erp_core->erp_tables); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c index d409b09ba8df..2a998dea4f39 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_keys.c @@ -98,8 +98,8 @@ static const struct mlxsw_afk_block mlxsw_sp1_afk_blocks[] = { #define MLXSW_SP1_AFK_KEY_BLOCK_SIZE 16 -static void mlxsw_sp1_afk_encode_block(char *block, int block_index, - char *output) +static void mlxsw_sp1_afk_encode_block(char *output, int block_index, + char *block) { unsigned int offset = block_index * MLXSW_SP1_AFK_KEY_BLOCK_SIZE; char *output_indexed = output + offset; @@ -107,10 +107,19 @@ static void mlxsw_sp1_afk_encode_block(char *block, int block_index, memcpy(output_indexed, block, MLXSW_SP1_AFK_KEY_BLOCK_SIZE); } +static void mlxsw_sp1_afk_clear_block(char *output, int block_index) +{ + unsigned int offset = block_index * MLXSW_SP1_AFK_KEY_BLOCK_SIZE; + char *output_indexed = output + offset; + + memset(output_indexed, 0, MLXSW_SP1_AFK_KEY_BLOCK_SIZE); +} + const struct mlxsw_afk_ops mlxsw_sp1_afk_ops = { .blocks = mlxsw_sp1_afk_blocks, .blocks_count = ARRAY_SIZE(mlxsw_sp1_afk_blocks), .encode_block = mlxsw_sp1_afk_encode_block, + .clear_block = mlxsw_sp1_afk_clear_block, }; static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_mac_0[] = { @@ -158,6 +167,11 @@ static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_2[] = { MLXSW_AFK_ELEMENT_INST_U32(IP_PROTO, 0x04, 16, 8), }; +static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv4_4[] = { + MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_0_7, 0x04, 24, 8), + MLXSW_AFK_ELEMENT_INST_U32(VIRT_ROUTER_8_10, 0x00, 0, 3), +}; + static struct mlxsw_afk_element_inst mlxsw_sp_afk_element_info_ipv6_0[] = { MLXSW_AFK_ELEMENT_INST_BUF(DST_IP_32_63, 0x04, 4), }; @@ -201,6 +215,7 @@ static const struct mlxsw_afk_block mlxsw_sp2_afk_blocks[] = { MLXSW_AFK_BLOCK(0x38, mlxsw_sp_afk_element_info_ipv4_0), MLXSW_AFK_BLOCK(0x39, mlxsw_sp_afk_element_info_ipv4_1), MLXSW_AFK_BLOCK(0x3A, mlxsw_sp_afk_element_info_ipv4_2), + MLXSW_AFK_BLOCK(0x3C, mlxsw_sp_afk_element_info_ipv4_4), MLXSW_AFK_BLOCK(0x40, mlxsw_sp_afk_element_info_ipv6_0), MLXSW_AFK_BLOCK(0x41, mlxsw_sp_afk_element_info_ipv6_1), MLXSW_AFK_BLOCK(0x42, mlxsw_sp_afk_element_info_ipv6_2), @@ -263,10 +278,9 @@ static const struct mlxsw_sp2_afk_block_layout mlxsw_sp2_afk_blocks_layout[] = { MLXSW_SP2_AFK_BLOCK_LAYOUT(block11, 0x00, 12), }; -static void mlxsw_sp2_afk_encode_block(char *block, int block_index, - char *output) +static void __mlxsw_sp2_afk_block_value_set(char *output, int block_index, + u64 block_value) { - u64 block_value = mlxsw_sp2_afk_block_value_get(block); const struct mlxsw_sp2_afk_block_layout *block_layout; if (WARN_ON(block_index < 0 || @@ -278,8 +292,22 @@ static void mlxsw_sp2_afk_encode_block(char *block, int block_index, &block_layout->item, 0, block_value); } +static void mlxsw_sp2_afk_encode_block(char *output, int block_index, + char *block) +{ + u64 block_value = mlxsw_sp2_afk_block_value_get(block); + + __mlxsw_sp2_afk_block_value_set(output, block_index, block_value); +} + +static void mlxsw_sp2_afk_clear_block(char *output, int block_index) +{ + __mlxsw_sp2_afk_block_value_set(output, block_index, 0); +} + const struct mlxsw_afk_ops mlxsw_sp2_afk_ops = { .blocks = mlxsw_sp2_afk_blocks, .blocks_count = ARRAY_SIZE(mlxsw_sp2_afk_blocks), .encode_block = mlxsw_sp2_afk_encode_block, + .clear_block = mlxsw_sp2_afk_clear_block, }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c index e171513bb32a..fe230acf92a9 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.c @@ -95,8 +95,9 @@ int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp, if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, KVD_SIZE)) return -EIO; - max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE); - if (rulei->priority > max_priority) + /* Priority range is 1..cap_kvd_size-1. */ + max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE) - 1; + if (rulei->priority >= max_priority) return -EINVAL; /* Unlike in TC, in HW, higher number means higher priority. */ @@ -779,6 +780,20 @@ static void mlxsw_sp_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp, } static int +mlxsw_sp_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam_group *group, + struct mlxsw_sp_acl_tcam_entry *entry, + struct mlxsw_sp_acl_rule_info *rulei) +{ + const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops; + struct mlxsw_sp_acl_tcam_chunk *chunk = entry->chunk; + struct mlxsw_sp_acl_tcam_region *region = chunk->region; + + return ops->entry_action_replace(mlxsw_sp, region->priv, chunk->priv, + entry->priv, rulei); +} + +static int mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam_entry *entry, bool *activity) @@ -848,6 +863,15 @@ struct mlxsw_sp_acl_tcam_flower_rule { struct mlxsw_sp_acl_tcam_entry entry; }; +struct mlxsw_sp_acl_tcam_mr_ruleset { + struct mlxsw_sp_acl_tcam_chunk *chunk; + struct mlxsw_sp_acl_tcam_group group; +}; + +struct mlxsw_sp_acl_tcam_mr_rule { + struct mlxsw_sp_acl_tcam_entry entry; +}; + static int mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_tcam *tcam, @@ -930,6 +954,15 @@ mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv) } static int +mlxsw_sp_acl_tcam_flower_rule_action_replace(struct mlxsw_sp *mlxsw_sp, + void *ruleset_priv, + void *rule_priv, + struct mlxsw_sp_acl_rule_info *rulei) +{ + return -EOPNOTSUPP; +} + +static int mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp, void *rule_priv, bool *activity) { @@ -949,12 +982,146 @@ static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = { .rule_priv_size = mlxsw_sp_acl_tcam_flower_rule_priv_size, .rule_add = mlxsw_sp_acl_tcam_flower_rule_add, .rule_del = mlxsw_sp_acl_tcam_flower_rule_del, + .rule_action_replace = mlxsw_sp_acl_tcam_flower_rule_action_replace, .rule_activity_get = mlxsw_sp_acl_tcam_flower_rule_activity_get, }; +static int +mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_tcam *tcam, + void *ruleset_priv, + struct mlxsw_afk_element_usage *tmplt_elusage) +{ + struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv; + int err; + + err = mlxsw_sp_acl_tcam_group_add(mlxsw_sp, tcam, &ruleset->group, + mlxsw_sp_acl_tcam_patterns, + MLXSW_SP_ACL_TCAM_PATTERNS_COUNT, + tmplt_elusage); + if (err) + return err; + + /* For most of the TCAM clients it would make sense to take a tcam chunk + * only when the first rule is written. This is not the case for + * multicast router as it is required to bind the multicast router to a + * specific ACL Group ID which must exist in HW before multicast router + * is initialized. + */ + ruleset->chunk = mlxsw_sp_acl_tcam_chunk_get(mlxsw_sp, &ruleset->group, + 1, tmplt_elusage); + if (IS_ERR(ruleset->chunk)) { + err = PTR_ERR(ruleset->chunk); + goto err_chunk_get; + } + + return 0; + +err_chunk_get: + mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group); + return err; +} + +static void +mlxsw_sp_acl_tcam_mr_ruleset_del(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv) +{ + struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv; + + mlxsw_sp_acl_tcam_chunk_put(mlxsw_sp, ruleset->chunk); + mlxsw_sp_acl_tcam_group_del(mlxsw_sp, &ruleset->group); +} + +static int +mlxsw_sp_acl_tcam_mr_ruleset_bind(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, + struct mlxsw_sp_port *mlxsw_sp_port, + bool ingress) +{ + /* Binding is done when initializing multicast router */ + return 0; +} + +static void +mlxsw_sp_acl_tcam_mr_ruleset_unbind(struct mlxsw_sp *mlxsw_sp, + void *ruleset_priv, + struct mlxsw_sp_port *mlxsw_sp_port, + bool ingress) +{ +} + +static u16 +mlxsw_sp_acl_tcam_mr_ruleset_group_id(void *ruleset_priv) +{ + struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv; + + return mlxsw_sp_acl_tcam_group_id(&ruleset->group); +} + +static size_t mlxsw_sp_acl_tcam_mr_rule_priv_size(struct mlxsw_sp *mlxsw_sp) +{ + return sizeof(struct mlxsw_sp_acl_tcam_mr_rule) + + mlxsw_sp_acl_tcam_entry_priv_size(mlxsw_sp); +} + +static int +mlxsw_sp_acl_tcam_mr_rule_add(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv, + void *rule_priv, + struct mlxsw_sp_acl_rule_info *rulei) +{ + struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv; + struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv; + + return mlxsw_sp_acl_tcam_entry_add(mlxsw_sp, &ruleset->group, + &rule->entry, rulei); +} + +static void +mlxsw_sp_acl_tcam_mr_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv) +{ + struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv; + + mlxsw_sp_acl_tcam_entry_del(mlxsw_sp, &rule->entry); +} + +static int +mlxsw_sp_acl_tcam_mr_rule_action_replace(struct mlxsw_sp *mlxsw_sp, + void *ruleset_priv, void *rule_priv, + struct mlxsw_sp_acl_rule_info *rulei) +{ + struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv; + struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv; + + return mlxsw_sp_acl_tcam_entry_action_replace(mlxsw_sp, &ruleset->group, + &rule->entry, rulei); +} + +static int +mlxsw_sp_acl_tcam_mr_rule_activity_get(struct mlxsw_sp *mlxsw_sp, + void *rule_priv, bool *activity) +{ + struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv; + + return mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, &rule->entry, + activity); +} + +static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_mr_ops = { + .ruleset_priv_size = sizeof(struct mlxsw_sp_acl_tcam_mr_ruleset), + .ruleset_add = mlxsw_sp_acl_tcam_mr_ruleset_add, + .ruleset_del = mlxsw_sp_acl_tcam_mr_ruleset_del, + .ruleset_bind = mlxsw_sp_acl_tcam_mr_ruleset_bind, + .ruleset_unbind = mlxsw_sp_acl_tcam_mr_ruleset_unbind, + .ruleset_group_id = mlxsw_sp_acl_tcam_mr_ruleset_group_id, + .rule_priv_size = mlxsw_sp_acl_tcam_mr_rule_priv_size, + .rule_add = mlxsw_sp_acl_tcam_mr_rule_add, + .rule_del = mlxsw_sp_acl_tcam_mr_rule_del, + .rule_action_replace = mlxsw_sp_acl_tcam_mr_rule_action_replace, + .rule_activity_get = mlxsw_sp_acl_tcam_mr_rule_activity_get, +}; + static const struct mlxsw_sp_acl_profile_ops * mlxsw_sp_acl_tcam_profile_ops_arr[] = { [MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops, + [MLXSW_SP_ACL_PROFILE_MR] = &mlxsw_sp_acl_tcam_mr_ops, }; const struct mlxsw_sp_acl_profile_ops * diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h index 219a4e26c332..0f1a9dee63de 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_tcam.h @@ -48,6 +48,9 @@ struct mlxsw_sp_acl_profile_ops { void *ruleset_priv, void *rule_priv, struct mlxsw_sp_acl_rule_info *rulei); void (*rule_del)(struct mlxsw_sp *mlxsw_sp, void *rule_priv); + int (*rule_action_replace)(struct mlxsw_sp *mlxsw_sp, + void *ruleset_priv, void *rule_priv, + struct mlxsw_sp_acl_rule_info *rulei); int (*rule_activity_get)(struct mlxsw_sp *mlxsw_sp, void *rule_priv, bool *activity); }; @@ -121,6 +124,11 @@ void mlxsw_sp_acl_ctcam_entry_del(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_ctcam_region *cregion, struct mlxsw_sp_acl_ctcam_chunk *cchunk, struct mlxsw_sp_acl_ctcam_entry *centry); +int mlxsw_sp_acl_ctcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_ctcam_region *cregion, + struct mlxsw_sp_acl_ctcam_chunk *cchunk, + struct mlxsw_sp_acl_ctcam_entry *centry, + struct mlxsw_sp_acl_rule_info *rulei); static inline unsigned int mlxsw_sp_acl_ctcam_entry_offset(struct mlxsw_sp_acl_ctcam_entry *centry) { @@ -144,6 +152,7 @@ struct mlxsw_sp_acl_atcam { struct mlxsw_sp_acl_atcam_region { struct rhashtable entries_ht; /* A-TCAM only */ + struct list_head entries_list; /* A-TCAM only */ struct mlxsw_sp_acl_ctcam_region cregion; const struct mlxsw_sp_acl_atcam_region_ops *ops; struct mlxsw_sp_acl_tcam_region *region; @@ -154,7 +163,9 @@ struct mlxsw_sp_acl_atcam_region { }; struct mlxsw_sp_acl_atcam_entry_ht_key { - char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key */ + char enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key, + * minus delta bits. + */ u8 erp_id; }; @@ -164,10 +175,17 @@ struct mlxsw_sp_acl_atcam_chunk { struct mlxsw_sp_acl_atcam_entry { struct rhash_head ht_node; + struct list_head list; /* Member in entries_list */ struct mlxsw_sp_acl_atcam_entry_ht_key ht_key; + char full_enc_key[MLXSW_REG_PTCEX_FLEX_KEY_BLOCKS_LEN]; /* Encoded key */ + struct { + u16 start; + u8 mask; + u8 value; + } delta_info; struct mlxsw_sp_acl_ctcam_entry centry; struct mlxsw_sp_acl_atcam_lkey_id *lkey_id; - struct mlxsw_sp_acl_erp *erp; + struct mlxsw_sp_acl_erp_mask *erp_mask; }; static inline struct mlxsw_sp_acl_atcam_region * @@ -204,20 +222,45 @@ void mlxsw_sp_acl_atcam_entry_del(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_atcam_region *aregion, struct mlxsw_sp_acl_atcam_chunk *achunk, struct mlxsw_sp_acl_atcam_entry *aentry); +int mlxsw_sp_acl_atcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_atcam_chunk *achunk, + struct mlxsw_sp_acl_atcam_entry *aentry, + struct mlxsw_sp_acl_rule_info *rulei); int mlxsw_sp_acl_atcam_init(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_atcam *atcam); void mlxsw_sp_acl_atcam_fini(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_atcam *atcam); -struct mlxsw_sp_acl_erp; +struct mlxsw_sp_acl_erp_delta; -bool mlxsw_sp_acl_erp_is_ctcam_erp(const struct mlxsw_sp_acl_erp *erp); -u8 mlxsw_sp_acl_erp_id(const struct mlxsw_sp_acl_erp *erp); -struct mlxsw_sp_acl_erp * -mlxsw_sp_acl_erp_get(struct mlxsw_sp_acl_atcam_region *aregion, - const char *mask, bool ctcam); -void mlxsw_sp_acl_erp_put(struct mlxsw_sp_acl_atcam_region *aregion, - struct mlxsw_sp_acl_erp *erp); +u16 mlxsw_sp_acl_erp_delta_start(const struct mlxsw_sp_acl_erp_delta *delta); +u8 mlxsw_sp_acl_erp_delta_mask(const struct mlxsw_sp_acl_erp_delta *delta); +u8 mlxsw_sp_acl_erp_delta_value(const struct mlxsw_sp_acl_erp_delta *delta, + const char *enc_key); +void mlxsw_sp_acl_erp_delta_clear(const struct mlxsw_sp_acl_erp_delta *delta, + const char *enc_key); + +struct mlxsw_sp_acl_erp_mask; + +bool +mlxsw_sp_acl_erp_mask_is_ctcam(const struct mlxsw_sp_acl_erp_mask *erp_mask); +u8 mlxsw_sp_acl_erp_mask_erp_id(const struct mlxsw_sp_acl_erp_mask *erp_mask); +const struct mlxsw_sp_acl_erp_delta * +mlxsw_sp_acl_erp_delta(const struct mlxsw_sp_acl_erp_mask *erp_mask); +struct mlxsw_sp_acl_erp_mask * +mlxsw_sp_acl_erp_mask_get(struct mlxsw_sp_acl_atcam_region *aregion, + const char *mask, bool ctcam); +void mlxsw_sp_acl_erp_mask_put(struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_erp_mask *erp_mask); +int mlxsw_sp_acl_erp_bf_insert(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_erp_mask *erp_mask, + struct mlxsw_sp_acl_atcam_entry *aentry); +void mlxsw_sp_acl_erp_bf_remove(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_atcam_region *aregion, + struct mlxsw_sp_acl_erp_mask *erp_mask, + struct mlxsw_sp_acl_atcam_entry *aentry); int mlxsw_sp_acl_erp_region_init(struct mlxsw_sp_acl_atcam_region *aregion); void mlxsw_sp_acl_erp_region_fini(struct mlxsw_sp_acl_atcam_region *aregion); int mlxsw_sp_acl_erps_init(struct mlxsw_sp *mlxsw_sp, @@ -225,4 +268,22 @@ int mlxsw_sp_acl_erps_init(struct mlxsw_sp *mlxsw_sp, void mlxsw_sp_acl_erps_fini(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_acl_atcam *atcam); +struct mlxsw_sp_acl_bf; + +int +mlxsw_sp_acl_bf_entry_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_bf *bf, + struct mlxsw_sp_acl_atcam_region *aregion, + unsigned int erp_bank, + struct mlxsw_sp_acl_atcam_entry *aentry); +void +mlxsw_sp_acl_bf_entry_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_acl_bf *bf, + struct mlxsw_sp_acl_atcam_region *aregion, + unsigned int erp_bank, + struct mlxsw_sp_acl_atcam_entry *aentry); +struct mlxsw_sp_acl_bf * +mlxsw_sp_acl_bf_init(struct mlxsw_sp *mlxsw_sp, unsigned int num_erp_banks); +void mlxsw_sp_acl_bf_fini(struct mlxsw_sp_acl_bf *bf); + #endif diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c index a3db033d7399..055cc6943b34 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_fid.c @@ -15,6 +15,7 @@ struct mlxsw_sp_fid_family; struct mlxsw_sp_fid_core { + struct rhashtable fid_ht; struct rhashtable vni_ht; struct mlxsw_sp_fid_family *fid_family_arr[MLXSW_SP_FID_TYPE_MAX]; unsigned int *port_fid_mappings; @@ -26,10 +27,13 @@ struct mlxsw_sp_fid { unsigned int ref_count; u16 fid_index; struct mlxsw_sp_fid_family *fid_family; + struct rhash_head ht_node; struct rhash_head vni_ht_node; + enum mlxsw_sp_nve_type nve_type; __be32 vni; u32 nve_flood_index; + int nve_ifindex; u8 vni_valid:1, nve_flood_index_valid:1; }; @@ -44,6 +48,12 @@ struct mlxsw_sp_fid_8021d { int br_ifindex; }; +static const struct rhashtable_params mlxsw_sp_fid_ht_params = { + .key_len = sizeof_field(struct mlxsw_sp_fid, fid_index), + .key_offset = offsetof(struct mlxsw_sp_fid, fid_index), + .head_offset = offsetof(struct mlxsw_sp_fid, ht_node), +}; + static const struct rhashtable_params mlxsw_sp_fid_vni_ht_params = { .key_len = sizeof_field(struct mlxsw_sp_fid, vni), .key_offset = offsetof(struct mlxsw_sp_fid, vni), @@ -75,6 +85,8 @@ struct mlxsw_sp_fid_ops { int (*nve_flood_index_set)(struct mlxsw_sp_fid *fid, u32 nve_flood_index); void (*nve_flood_index_clear)(struct mlxsw_sp_fid *fid); + void (*fdb_clear_offload)(const struct mlxsw_sp_fid *fid, + const struct net_device *nve_dev); }; struct mlxsw_sp_fid_family { @@ -89,6 +101,7 @@ struct mlxsw_sp_fid_family { enum mlxsw_sp_rif_type rif_type; const struct mlxsw_sp_fid_ops *ops; struct mlxsw_sp *mlxsw_sp; + u8 lag_vid_valid:1; }; static const int mlxsw_sp_sfgc_uc_packet_types[MLXSW_REG_SFGC_TYPE_MAX] = { @@ -113,6 +126,45 @@ static const int *mlxsw_sp_packet_type_sfgc_types[] = { [MLXSW_SP_FLOOD_TYPE_MC] = mlxsw_sp_sfgc_mc_packet_types, }; +bool mlxsw_sp_fid_lag_vid_valid(const struct mlxsw_sp_fid *fid) +{ + return fid->fid_family->lag_vid_valid; +} + +struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_index(struct mlxsw_sp *mlxsw_sp, + u16 fid_index) +{ + struct mlxsw_sp_fid *fid; + + fid = rhashtable_lookup_fast(&mlxsw_sp->fid_core->fid_ht, &fid_index, + mlxsw_sp_fid_ht_params); + if (fid) + fid->ref_count++; + + return fid; +} + +int mlxsw_sp_fid_nve_ifindex(const struct mlxsw_sp_fid *fid, int *nve_ifindex) +{ + if (!fid->vni_valid) + return -EINVAL; + + *nve_ifindex = fid->nve_ifindex; + + return 0; +} + +int mlxsw_sp_fid_nve_type(const struct mlxsw_sp_fid *fid, + enum mlxsw_sp_nve_type *p_type) +{ + if (!fid->vni_valid) + return -EINVAL; + + *p_type = fid->nve_type; + + return 0; +} + struct mlxsw_sp_fid *mlxsw_sp_fid_lookup_by_vni(struct mlxsw_sp *mlxsw_sp, __be32 vni) { @@ -173,7 +225,8 @@ bool mlxsw_sp_fid_nve_flood_index_is_set(const struct mlxsw_sp_fid *fid) return fid->nve_flood_index_valid; } -int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, __be32 vni) +int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, enum mlxsw_sp_nve_type type, + __be32 vni, int nve_ifindex) { struct mlxsw_sp_fid_family *fid_family = fid->fid_family; const struct mlxsw_sp_fid_ops *ops = fid_family->ops; @@ -183,6 +236,8 @@ int mlxsw_sp_fid_vni_set(struct mlxsw_sp_fid *fid, __be32 vni) if (WARN_ON(!ops->vni_set || fid->vni_valid)) return -EINVAL; + fid->nve_type = type; + fid->nve_ifindex = nve_ifindex; fid->vni = vni; err = rhashtable_lookup_insert_fast(&mlxsw_sp->fid_core->vni_ht, &fid->vni_ht_node, @@ -224,6 +279,16 @@ bool mlxsw_sp_fid_vni_is_set(const struct mlxsw_sp_fid *fid) return fid->vni_valid; } +void mlxsw_sp_fid_fdb_clear_offload(const struct mlxsw_sp_fid *fid, + const struct net_device *nve_dev) +{ + struct mlxsw_sp_fid_family *fid_family = fid->fid_family; + const struct mlxsw_sp_fid_ops *ops = fid_family->ops; + + if (ops->fdb_clear_offload) + ops->fdb_clear_offload(fid, nve_dev); +} + static const struct mlxsw_sp_flood_table * mlxsw_sp_fid_flood_table_lookup(const struct mlxsw_sp_fid *fid, enum mlxsw_sp_flood_type packet_type) @@ -284,11 +349,6 @@ void mlxsw_sp_fid_port_vid_unmap(struct mlxsw_sp_fid *fid, fid->fid_family->ops->port_vid_unmap(fid, mlxsw_sp_port, vid); } -enum mlxsw_sp_rif_type mlxsw_sp_fid_rif_type(const struct mlxsw_sp_fid *fid) -{ - return fid->fid_family->rif_type; -} - u16 mlxsw_sp_fid_index(const struct mlxsw_sp_fid *fid) { return fid->fid_index; @@ -304,6 +364,11 @@ void mlxsw_sp_fid_rif_set(struct mlxsw_sp_fid *fid, struct mlxsw_sp_rif *rif) fid->rif = rif; } +struct mlxsw_sp_rif *mlxsw_sp_fid_rif(const struct mlxsw_sp_fid *fid) +{ + return fid->rif; +} + enum mlxsw_sp_rif_type mlxsw_sp_fid_type_rif_type(const struct mlxsw_sp *mlxsw_sp, enum mlxsw_sp_fid_type type) @@ -568,7 +633,7 @@ mlxsw_sp_fid_8021d_compare(const struct mlxsw_sp_fid *fid, const void *arg) static u16 mlxsw_sp_fid_8021d_flood_index(const struct mlxsw_sp_fid *fid) { - return fid->fid_index - fid->fid_family->start_index; + return fid->fid_index - VLAN_N_VID; } static int mlxsw_sp_port_vp_mode_trans(struct mlxsw_sp_port *mlxsw_sp_port) @@ -713,6 +778,13 @@ static void mlxsw_sp_fid_8021d_nve_flood_index_clear(struct mlxsw_sp_fid *fid) fid->vni_valid, 0, false); } +static void +mlxsw_sp_fid_8021d_fdb_clear_offload(const struct mlxsw_sp_fid *fid, + const struct net_device *nve_dev) +{ + br_fdb_clear_offload(nve_dev, 0); +} + static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops = { .setup = mlxsw_sp_fid_8021d_setup, .configure = mlxsw_sp_fid_8021d_configure, @@ -726,6 +798,7 @@ static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021d_ops = { .vni_clear = mlxsw_sp_fid_8021d_vni_clear, .nve_flood_index_set = mlxsw_sp_fid_8021d_nve_flood_index_set, .nve_flood_index_clear = mlxsw_sp_fid_8021d_nve_flood_index_clear, + .fdb_clear_offload = mlxsw_sp_fid_8021d_fdb_clear_offload, }; static const struct mlxsw_sp_flood_table mlxsw_sp_fid_8021d_flood_tables[] = { @@ -759,6 +832,48 @@ static const struct mlxsw_sp_fid_family mlxsw_sp_fid_8021d_family = { .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables), .rif_type = MLXSW_SP_RIF_TYPE_FID, .ops = &mlxsw_sp_fid_8021d_ops, + .lag_vid_valid = 1, +}; + +static void +mlxsw_sp_fid_8021q_fdb_clear_offload(const struct mlxsw_sp_fid *fid, + const struct net_device *nve_dev) +{ + br_fdb_clear_offload(nve_dev, mlxsw_sp_fid_8021q_vid(fid)); +} + +static const struct mlxsw_sp_fid_ops mlxsw_sp_fid_8021q_emu_ops = { + .setup = mlxsw_sp_fid_8021q_setup, + .configure = mlxsw_sp_fid_8021d_configure, + .deconfigure = mlxsw_sp_fid_8021d_deconfigure, + .index_alloc = mlxsw_sp_fid_8021d_index_alloc, + .compare = mlxsw_sp_fid_8021q_compare, + .flood_index = mlxsw_sp_fid_8021d_flood_index, + .port_vid_map = mlxsw_sp_fid_8021d_port_vid_map, + .port_vid_unmap = mlxsw_sp_fid_8021d_port_vid_unmap, + .vni_set = mlxsw_sp_fid_8021d_vni_set, + .vni_clear = mlxsw_sp_fid_8021d_vni_clear, + .nve_flood_index_set = mlxsw_sp_fid_8021d_nve_flood_index_set, + .nve_flood_index_clear = mlxsw_sp_fid_8021d_nve_flood_index_clear, + .fdb_clear_offload = mlxsw_sp_fid_8021q_fdb_clear_offload, +}; + +/* There are 4K-2 emulated 802.1Q FIDs, starting right after the 802.1D FIDs */ +#define MLXSW_SP_FID_8021Q_EMU_START (VLAN_N_VID + MLXSW_SP_FID_8021D_MAX) +#define MLXSW_SP_FID_8021Q_EMU_END (MLXSW_SP_FID_8021Q_EMU_START + \ + VLAN_VID_MASK - 2) + +/* Range and flood configuration must match mlxsw_config_profile */ +static const struct mlxsw_sp_fid_family mlxsw_sp_fid_8021q_emu_family = { + .type = MLXSW_SP_FID_TYPE_8021Q, + .fid_size = sizeof(struct mlxsw_sp_fid_8021q), + .start_index = MLXSW_SP_FID_8021Q_EMU_START, + .end_index = MLXSW_SP_FID_8021Q_EMU_END, + .flood_tables = mlxsw_sp_fid_8021d_flood_tables, + .nr_flood_tables = ARRAY_SIZE(mlxsw_sp_fid_8021d_flood_tables), + .rif_type = MLXSW_SP_RIF_TYPE_VLAN, + .ops = &mlxsw_sp_fid_8021q_emu_ops, + .lag_vid_valid = 1, }; static int mlxsw_sp_fid_rfid_configure(struct mlxsw_sp_fid *fid) @@ -888,7 +1003,7 @@ static const struct mlxsw_sp_fid_family mlxsw_sp_fid_dummy_family = { }; static const struct mlxsw_sp_fid_family *mlxsw_sp_fid_family_arr[] = { - [MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp_fid_8021q_family, + [MLXSW_SP_FID_TYPE_8021Q] = &mlxsw_sp_fid_8021q_emu_family, [MLXSW_SP_FID_TYPE_8021D] = &mlxsw_sp_fid_8021d_family, [MLXSW_SP_FID_TYPE_RFID] = &mlxsw_sp_fid_rfid_family, [MLXSW_SP_FID_TYPE_DUMMY] = &mlxsw_sp_fid_dummy_family, @@ -944,10 +1059,17 @@ static struct mlxsw_sp_fid *mlxsw_sp_fid_get(struct mlxsw_sp *mlxsw_sp, if (err) goto err_configure; + err = rhashtable_insert_fast(&mlxsw_sp->fid_core->fid_ht, &fid->ht_node, + mlxsw_sp_fid_ht_params); + if (err) + goto err_rhashtable_insert; + list_add(&fid->list, &fid_family->fids_list); fid->ref_count++; return fid; +err_rhashtable_insert: + fid->fid_family->ops->deconfigure(fid); err_configure: __clear_bit(fid_index - fid_family->start_index, fid_family->fids_bitmap); @@ -959,19 +1081,18 @@ err_index_alloc: void mlxsw_sp_fid_put(struct mlxsw_sp_fid *fid) { struct mlxsw_sp_fid_family *fid_family = fid->fid_family; + struct mlxsw_sp *mlxsw_sp = fid_family->mlxsw_sp; - if (--fid->ref_count == 1 && fid->rif) { - /* Destroy the associated RIF and let it drop the last - * reference on the FID. - */ - return mlxsw_sp_rif_destroy(fid->rif); - } else if (fid->ref_count == 0) { - list_del(&fid->list); - fid->fid_family->ops->deconfigure(fid); - __clear_bit(fid->fid_index - fid_family->start_index, - fid_family->fids_bitmap); - kfree(fid); - } + if (--fid->ref_count != 0) + return; + + list_del(&fid->list); + rhashtable_remove_fast(&mlxsw_sp->fid_core->fid_ht, + &fid->ht_node, mlxsw_sp_fid_ht_params); + fid->fid_family->ops->deconfigure(fid); + __clear_bit(fid->fid_index - fid_family->start_index, + fid_family->fids_bitmap); + kfree(fid); } struct mlxsw_sp_fid *mlxsw_sp_fid_8021q_get(struct mlxsw_sp *mlxsw_sp, u16 vid) @@ -985,6 +1106,12 @@ struct mlxsw_sp_fid *mlxsw_sp_fid_8021d_get(struct mlxsw_sp *mlxsw_sp, return mlxsw_sp_fid_get(mlxsw_sp, MLXSW_SP_FID_TYPE_8021D, &br_ifindex); } +struct mlxsw_sp_fid *mlxsw_sp_fid_8021q_lookup(struct mlxsw_sp *mlxsw_sp, + u16 vid) +{ + return mlxsw_sp_fid_lookup(mlxsw_sp, MLXSW_SP_FID_TYPE_8021Q, &vid); +} + struct mlxsw_sp_fid *mlxsw_sp_fid_8021d_lookup(struct mlxsw_sp *mlxsw_sp, int br_ifindex) { @@ -1126,9 +1253,13 @@ int mlxsw_sp_fids_init(struct mlxsw_sp *mlxsw_sp) return -ENOMEM; mlxsw_sp->fid_core = fid_core; + err = rhashtable_init(&fid_core->fid_ht, &mlxsw_sp_fid_ht_params); + if (err) + goto err_rhashtable_fid_init; + err = rhashtable_init(&fid_core->vni_ht, &mlxsw_sp_fid_vni_ht_params); if (err) - goto err_rhashtable_init; + goto err_rhashtable_vni_init; fid_core->port_fid_mappings = kcalloc(max_ports, sizeof(unsigned int), GFP_KERNEL); @@ -1157,7 +1288,9 @@ err_fid_ops_register: kfree(fid_core->port_fid_mappings); err_alloc_port_fid_mappings: rhashtable_destroy(&fid_core->vni_ht); -err_rhashtable_init: +err_rhashtable_vni_init: + rhashtable_destroy(&fid_core->fid_ht); +err_rhashtable_fid_init: kfree(fid_core); return err; } @@ -1172,5 +1305,6 @@ void mlxsw_sp_fids_fini(struct mlxsw_sp *mlxsw_sp) fid_core->fid_family_arr[i]); kfree(fid_core->port_fid_mappings); rhashtable_destroy(&fid_core->vni_ht); + rhashtable_destroy(&fid_core->fid_ht); kfree(fid_core); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c index 8d211972c5e9..ff072358d950 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_flower.c @@ -406,7 +406,7 @@ int mlxsw_sp_flower_replace(struct mlxsw_sp *mlxsw_sp, if (IS_ERR(ruleset)) return PTR_ERR(ruleset); - rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie, + rule = mlxsw_sp_acl_rule_create(mlxsw_sp, ruleset, f->cookie, NULL, f->common.extack); if (IS_ERR(rule)) { err = PTR_ERR(rule); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c index b5b54b41349a..0a31fff2516e 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.c @@ -174,6 +174,20 @@ mlxsw_sp_nve_mc_record_ops_arr[] = { [MLXSW_SP_L3_PROTO_IPV6] = &mlxsw_sp_nve_mc_record_ipv6_ops, }; +int mlxsw_sp_nve_learned_ip_resolve(struct mlxsw_sp *mlxsw_sp, u32 uip, + enum mlxsw_sp_l3proto proto, + union mlxsw_sp_l3addr *addr) +{ + switch (proto) { + case MLXSW_SP_L3_PROTO_IPV4: + addr->addr4 = cpu_to_be32(uip); + return 0; + default: + WARN_ON(1); + return -EINVAL; + } +} + static struct mlxsw_sp_nve_mc_list * mlxsw_sp_nve_mc_list_find(struct mlxsw_sp *mlxsw_sp, const struct mlxsw_sp_nve_mc_list_key *key) @@ -775,6 +789,21 @@ static void mlxsw_sp_nve_fdb_flush_by_fid(struct mlxsw_sp *mlxsw_sp, mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl); } +static void mlxsw_sp_nve_fdb_clear_offload(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_fid *fid, + const struct net_device *nve_dev, + __be32 vni) +{ + const struct mlxsw_sp_nve_ops *ops; + enum mlxsw_sp_nve_type type; + + if (WARN_ON(mlxsw_sp_fid_nve_type(fid, &type))) + return; + + ops = mlxsw_sp->nve->nve_ops_arr[type]; + ops->fdb_clear_offload(nve_dev, vni); +} + int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid, struct mlxsw_sp_nve_params *params, struct netlink_ext_ack *extack) @@ -803,7 +832,8 @@ int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid, return err; } - err = mlxsw_sp_fid_vni_set(fid, params->vni); + err = mlxsw_sp_fid_vni_set(fid, params->type, params->vni, + params->dev->ifindex); if (err) { NL_SET_ERR_MSG_MOD(extack, "Failed to set VNI on FID"); goto err_fid_vni_set; @@ -811,8 +841,16 @@ int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid, nve->config = config; + err = ops->fdb_replay(params->dev, params->vni); + if (err) { + NL_SET_ERR_MSG_MOD(extack, "Failed to offload the FDB"); + goto err_fdb_replay; + } + return 0; +err_fdb_replay: + mlxsw_sp_fid_vni_clear(fid); err_fid_vni_set: mlxsw_sp_nve_tunnel_fini(mlxsw_sp); return err; @@ -822,9 +860,27 @@ void mlxsw_sp_nve_fid_disable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid) { u16 fid_index = mlxsw_sp_fid_index(fid); + struct net_device *nve_dev; + int nve_ifindex; + __be32 vni; mlxsw_sp_nve_flood_ip_flush(mlxsw_sp, fid); mlxsw_sp_nve_fdb_flush_by_fid(mlxsw_sp, fid_index); + + if (WARN_ON(mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex) || + mlxsw_sp_fid_vni(fid, &vni))) + goto out; + + nve_dev = dev_get_by_index(&init_net, nve_ifindex); + if (!nve_dev) + goto out; + + mlxsw_sp_nve_fdb_clear_offload(mlxsw_sp, fid, nve_dev, vni); + mlxsw_sp_fid_fdb_clear_offload(fid, nve_dev); + + dev_put(nve_dev); + +out: mlxsw_sp_fid_vni_clear(fid); mlxsw_sp_nve_tunnel_fini(mlxsw_sp); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h index 4cc3297e13d6..02937ea95bc3 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve.h @@ -41,6 +41,8 @@ struct mlxsw_sp_nve_ops { int (*init)(struct mlxsw_sp_nve *nve, const struct mlxsw_sp_nve_config *config); void (*fini)(struct mlxsw_sp_nve *nve); + int (*fdb_replay)(const struct net_device *nve_dev, __be32 vni); + void (*fdb_clear_offload)(const struct net_device *nve_dev, __be32 vni); }; extern const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c index d21c7be5b1c9..74e564c4ac19 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_nve_vxlan.c @@ -17,7 +17,8 @@ #define MLXSW_SP_NVE_VXLAN_PARSING_DEPTH 128 #define MLXSW_SP_NVE_DEFAULT_PARSING_DEPTH 96 -#define MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS VXLAN_F_UDP_ZERO_CSUM_TX +#define MLXSW_SP_NVE_VXLAN_SUPPORTED_FLAGS (VXLAN_F_UDP_ZERO_CSUM_TX | \ + VXLAN_F_LEARN) static bool mlxsw_sp1_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve, const struct net_device *dev, @@ -61,11 +62,6 @@ static bool mlxsw_sp1_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve, return false; } - if (cfg->flags & VXLAN_F_LEARN) { - NL_SET_ERR_MSG_MOD(extack, "VxLAN: Learning is not supported"); - return false; - } - if (!(cfg->flags & VXLAN_F_UDP_ZERO_CSUM_TX)) { NL_SET_ERR_MSG_MOD(extack, "VxLAN: UDP checksum is not supported"); return false; @@ -215,12 +211,30 @@ static void mlxsw_sp1_nve_vxlan_fini(struct mlxsw_sp_nve *nve) config->udp_dport); } +static int +mlxsw_sp_nve_vxlan_fdb_replay(const struct net_device *nve_dev, __be32 vni) +{ + if (WARN_ON(!netif_is_vxlan(nve_dev))) + return -EINVAL; + return vxlan_fdb_replay(nve_dev, vni, &mlxsw_sp_switchdev_notifier); +} + +static void +mlxsw_sp_nve_vxlan_clear_offload(const struct net_device *nve_dev, __be32 vni) +{ + if (WARN_ON(!netif_is_vxlan(nve_dev))) + return; + vxlan_fdb_clear_offload(nve_dev, vni); +} + const struct mlxsw_sp_nve_ops mlxsw_sp1_nve_vxlan_ops = { .type = MLXSW_SP_NVE_TYPE_VXLAN, .can_offload = mlxsw_sp1_nve_vxlan_can_offload, .nve_config = mlxsw_sp_nve_vxlan_config, .init = mlxsw_sp1_nve_vxlan_init, .fini = mlxsw_sp1_nve_vxlan_fini, + .fdb_replay = mlxsw_sp_nve_vxlan_fdb_replay, + .fdb_clear_offload = mlxsw_sp_nve_vxlan_clear_offload, }; static bool mlxsw_sp2_nve_vxlan_can_offload(const struct mlxsw_sp_nve *nve, @@ -246,4 +260,6 @@ const struct mlxsw_sp_nve_ops mlxsw_sp2_nve_vxlan_ops = { .nve_config = mlxsw_sp_nve_vxlan_config, .init = mlxsw_sp2_nve_vxlan_init, .fini = mlxsw_sp2_nve_vxlan_fini, + .fdb_replay = mlxsw_sp_nve_vxlan_fdb_replay, + .fdb_clear_offload = mlxsw_sp_nve_vxlan_clear_offload, }; diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 6ebf99cc3154..98e5ffd71b91 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -15,6 +15,7 @@ #include <linux/gcd.h> #include <linux/random.h> #include <linux/if_macvlan.h> +#include <linux/refcount.h> #include <net/netevent.h> #include <net/neighbour.h> #include <net/arp.h> @@ -70,6 +71,8 @@ struct mlxsw_sp_router { bool aborted; struct notifier_block fib_nb; struct notifier_block netevent_nb; + struct notifier_block inetaddr_nb; + struct notifier_block inet6addr_nb; const struct mlxsw_sp_rif_ops **rif_ops_arr; const struct mlxsw_sp_ipip_ops **ipip_ops_arr; }; @@ -104,6 +107,7 @@ struct mlxsw_sp_rif_params { struct mlxsw_sp_rif_subport { struct mlxsw_sp_rif common; + refcount_t ref_count; union { u16 system_port; u16 lag_id; @@ -136,6 +140,7 @@ struct mlxsw_sp_rif_ops { void (*fdb_del)(struct mlxsw_sp_rif *rif, const char *mac); }; +static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif); static void mlxsw_sp_lpm_tree_hold(struct mlxsw_sp_lpm_tree *lpm_tree); static void mlxsw_sp_lpm_tree_put(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_lpm_tree *lpm_tree); @@ -6297,6 +6302,7 @@ mlxsw_sp_rif_create(struct mlxsw_sp *mlxsw_sp, err = -ENOMEM; goto err_rif_alloc; } + dev_hold(rif->dev); rif->mlxsw_sp = mlxsw_sp; rif->ops = ops; @@ -6335,6 +6341,7 @@ err_configure: if (fid) mlxsw_sp_fid_put(fid); err_fid_get: + dev_put(rif->dev); kfree(rif); err_rif_alloc: err_rif_index_alloc: @@ -6343,7 +6350,7 @@ err_rif_index_alloc: return ERR_PTR(err); } -void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif) +static void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif) { const struct mlxsw_sp_rif_ops *ops = rif->ops; struct mlxsw_sp *mlxsw_sp = rif->mlxsw_sp; @@ -6362,6 +6369,7 @@ void mlxsw_sp_rif_destroy(struct mlxsw_sp_rif *rif) if (fid) /* Loopback RIFs are not associated with a FID. */ mlxsw_sp_fid_put(fid); + dev_put(rif->dev); kfree(rif); vr->rif_count--; mlxsw_sp_vr_put(mlxsw_sp, vr); @@ -6392,6 +6400,40 @@ mlxsw_sp_rif_subport_params_init(struct mlxsw_sp_rif_params *params, params->system_port = mlxsw_sp_port->local_port; } +static struct mlxsw_sp_rif_subport * +mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif) +{ + return container_of(rif, struct mlxsw_sp_rif_subport, common); +} + +static struct mlxsw_sp_rif * +mlxsw_sp_rif_subport_get(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_rif_params *params, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_rif_subport *rif_subport; + struct mlxsw_sp_rif *rif; + + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, params->dev); + if (!rif) + return mlxsw_sp_rif_create(mlxsw_sp, params, extack); + + rif_subport = mlxsw_sp_rif_subport_rif(rif); + refcount_inc(&rif_subport->ref_count); + return rif; +} + +static void mlxsw_sp_rif_subport_put(struct mlxsw_sp_rif *rif) +{ + struct mlxsw_sp_rif_subport *rif_subport; + + rif_subport = mlxsw_sp_rif_subport_rif(rif); + if (!refcount_dec_and_test(&rif_subport->ref_count)) + return; + + mlxsw_sp_rif_destroy(rif); +} + static int mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, struct net_device *l3_dev, @@ -6399,22 +6441,18 @@ mlxsw_sp_port_vlan_router_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, { struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp; + struct mlxsw_sp_rif_params params = { + .dev = l3_dev, + }; u16 vid = mlxsw_sp_port_vlan->vid; struct mlxsw_sp_rif *rif; struct mlxsw_sp_fid *fid; int err; - rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev); - if (!rif) { - struct mlxsw_sp_rif_params params = { - .dev = l3_dev, - }; - - mlxsw_sp_rif_subport_params_init(¶ms, mlxsw_sp_port_vlan); - rif = mlxsw_sp_rif_create(mlxsw_sp, ¶ms, extack); - if (IS_ERR(rif)) - return PTR_ERR(rif); - } + mlxsw_sp_rif_subport_params_init(¶ms, mlxsw_sp_port_vlan); + rif = mlxsw_sp_rif_subport_get(mlxsw_sp, ¶ms, extack); + if (IS_ERR(rif)) + return PTR_ERR(rif); /* FID was already created, just take a reference */ fid = rif->ops->fid_get(rif, extack); @@ -6441,6 +6479,7 @@ err_port_vid_learning_set: mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid); err_fid_port_vid_map: mlxsw_sp_fid_put(fid); + mlxsw_sp_rif_subport_put(rif); return err; } @@ -6449,6 +6488,7 @@ mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) { struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid; + struct mlxsw_sp_rif *rif = mlxsw_sp_fid_rif(fid); u16 vid = mlxsw_sp_port_vlan->vid; if (WARN_ON(mlxsw_sp_fid_type(fid) != MLXSW_SP_FID_TYPE_RFID)) @@ -6458,10 +6498,8 @@ mlxsw_sp_port_vlan_router_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) mlxsw_sp_port_vid_stp_set(mlxsw_sp_port, vid, BR_STATE_BLOCKING); mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true); mlxsw_sp_fid_port_vid_unmap(fid, mlxsw_sp_port, vid); - /* If router port holds the last reference on the rFID, then the - * associated Sub-port RIF will be destroyed. - */ mlxsw_sp_fid_put(fid); + mlxsw_sp_rif_subport_put(rif); } static int mlxsw_sp_inetaddr_port_vlan_event(struct net_device *l3_dev, @@ -6497,8 +6535,8 @@ static int mlxsw_sp_inetaddr_port_event(struct net_device *port_dev, netif_is_ovs_port(port_dev)) return 0; - return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, 1, - extack); + return mlxsw_sp_inetaddr_port_vlan_event(port_dev, port_dev, event, + MLXSW_SP_DEFAULT_VID, extack); } static int __mlxsw_sp_inetaddr_lag_event(struct net_device *l3_dev, @@ -6531,15 +6569,15 @@ static int mlxsw_sp_inetaddr_lag_event(struct net_device *lag_dev, if (netif_is_bridge_port(lag_dev)) return 0; - return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, 1, - extack); + return __mlxsw_sp_inetaddr_lag_event(lag_dev, lag_dev, event, + MLXSW_SP_DEFAULT_VID, extack); } -static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev, +static int mlxsw_sp_inetaddr_bridge_event(struct mlxsw_sp *mlxsw_sp, + struct net_device *l3_dev, unsigned long event, struct netlink_ext_ack *extack) { - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(l3_dev); struct mlxsw_sp_rif_params params = { .dev = l3_dev, }; @@ -6560,7 +6598,8 @@ static int mlxsw_sp_inetaddr_bridge_event(struct net_device *l3_dev, return 0; } -static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev, +static int mlxsw_sp_inetaddr_vlan_event(struct mlxsw_sp *mlxsw_sp, + struct net_device *vlan_dev, unsigned long event, struct netlink_ext_ack *extack) { @@ -6577,7 +6616,8 @@ static int mlxsw_sp_inetaddr_vlan_event(struct net_device *vlan_dev, return __mlxsw_sp_inetaddr_lag_event(vlan_dev, real_dev, event, vid, extack); else if (netif_is_bridge_master(real_dev) && br_vlan_enabled(real_dev)) - return mlxsw_sp_inetaddr_bridge_event(vlan_dev, event, extack); + return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, vlan_dev, event, + extack); return 0; } @@ -6678,16 +6718,11 @@ void mlxsw_sp_rif_macvlan_del(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_fid_index(rif->fid), false); } -static int mlxsw_sp_inetaddr_macvlan_event(struct net_device *macvlan_dev, +static int mlxsw_sp_inetaddr_macvlan_event(struct mlxsw_sp *mlxsw_sp, + struct net_device *macvlan_dev, unsigned long event, struct netlink_ext_ack *extack) { - struct mlxsw_sp *mlxsw_sp; - - mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev); - if (!mlxsw_sp) - return 0; - switch (event) { case NETDEV_UP: return mlxsw_sp_rif_macvlan_add(mlxsw_sp, macvlan_dev, extack); @@ -6699,7 +6734,35 @@ static int mlxsw_sp_inetaddr_macvlan_event(struct net_device *macvlan_dev, return 0; } -static int __mlxsw_sp_inetaddr_event(struct net_device *dev, +static int mlxsw_sp_router_port_check_rif_addr(struct mlxsw_sp *mlxsw_sp, + struct net_device *dev, + const unsigned char *dev_addr, + struct netlink_ext_ack *extack) +{ + struct mlxsw_sp_rif *rif; + int i; + + /* A RIF is not created for macvlan netdevs. Their MAC is used to + * populate the FDB + */ + if (netif_is_macvlan(dev)) + return 0; + + for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_RIFS); i++) { + rif = mlxsw_sp->router->rifs[i]; + if (rif && rif->dev != dev && + !ether_addr_equal_masked(rif->dev->dev_addr, dev_addr, + mlxsw_sp->mac_mask)) { + NL_SET_ERR_MSG_MOD(extack, "All router interface MAC addresses must have the same prefix"); + return -EINVAL; + } + } + + return 0; +} + +static int __mlxsw_sp_inetaddr_event(struct mlxsw_sp *mlxsw_sp, + struct net_device *dev, unsigned long event, struct netlink_ext_ack *extack) { @@ -6708,21 +6771,24 @@ static int __mlxsw_sp_inetaddr_event(struct net_device *dev, else if (netif_is_lag_master(dev)) return mlxsw_sp_inetaddr_lag_event(dev, event, extack); else if (netif_is_bridge_master(dev)) - return mlxsw_sp_inetaddr_bridge_event(dev, event, extack); + return mlxsw_sp_inetaddr_bridge_event(mlxsw_sp, dev, event, + extack); else if (is_vlan_dev(dev)) - return mlxsw_sp_inetaddr_vlan_event(dev, event, extack); + return mlxsw_sp_inetaddr_vlan_event(mlxsw_sp, dev, event, + extack); else if (netif_is_macvlan(dev)) - return mlxsw_sp_inetaddr_macvlan_event(dev, event, extack); + return mlxsw_sp_inetaddr_macvlan_event(mlxsw_sp, dev, event, + extack); else return 0; } -int mlxsw_sp_inetaddr_event(struct notifier_block *unused, - unsigned long event, void *ptr) +static int mlxsw_sp_inetaddr_event(struct notifier_block *nb, + unsigned long event, void *ptr) { struct in_ifaddr *ifa = (struct in_ifaddr *) ptr; struct net_device *dev = ifa->ifa_dev->dev; - struct mlxsw_sp *mlxsw_sp; + struct mlxsw_sp_router *router; struct mlxsw_sp_rif *rif; int err = 0; @@ -6730,15 +6796,12 @@ int mlxsw_sp_inetaddr_event(struct notifier_block *unused, if (event == NETDEV_UP) goto out; - mlxsw_sp = mlxsw_sp_lower_get(dev); - if (!mlxsw_sp) - goto out; - - rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); + router = container_of(nb, struct mlxsw_sp_router, inetaddr_nb); + rif = mlxsw_sp_rif_find_by_dev(router->mlxsw_sp, dev); if (!mlxsw_sp_rif_should_config(rif, dev, event)) goto out; - err = __mlxsw_sp_inetaddr_event(dev, event, NULL); + err = __mlxsw_sp_inetaddr_event(router->mlxsw_sp, dev, event, NULL); out: return notifier_from_errno(err); } @@ -6760,13 +6823,19 @@ int mlxsw_sp_inetaddr_valid_event(struct notifier_block *unused, if (!mlxsw_sp_rif_should_config(rif, dev, event)) goto out; - err = __mlxsw_sp_inetaddr_event(dev, event, ivi->extack); + err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr, + ivi->extack); + if (err) + goto out; + + err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, ivi->extack); out: return notifier_from_errno(err); } struct mlxsw_sp_inet6addr_event_work { struct work_struct work; + struct mlxsw_sp *mlxsw_sp; struct net_device *dev; unsigned long event; }; @@ -6775,21 +6844,18 @@ static void mlxsw_sp_inet6addr_event_work(struct work_struct *work) { struct mlxsw_sp_inet6addr_event_work *inet6addr_work = container_of(work, struct mlxsw_sp_inet6addr_event_work, work); + struct mlxsw_sp *mlxsw_sp = inet6addr_work->mlxsw_sp; struct net_device *dev = inet6addr_work->dev; unsigned long event = inet6addr_work->event; - struct mlxsw_sp *mlxsw_sp; struct mlxsw_sp_rif *rif; rtnl_lock(); - mlxsw_sp = mlxsw_sp_lower_get(dev); - if (!mlxsw_sp) - goto out; rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); if (!mlxsw_sp_rif_should_config(rif, dev, event)) goto out; - __mlxsw_sp_inetaddr_event(dev, event, NULL); + __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, NULL); out: rtnl_unlock(); dev_put(dev); @@ -6797,25 +6863,25 @@ out: } /* Called with rcu_read_lock() */ -int mlxsw_sp_inet6addr_event(struct notifier_block *unused, - unsigned long event, void *ptr) +static int mlxsw_sp_inet6addr_event(struct notifier_block *nb, + unsigned long event, void *ptr) { struct inet6_ifaddr *if6 = (struct inet6_ifaddr *) ptr; struct mlxsw_sp_inet6addr_event_work *inet6addr_work; struct net_device *dev = if6->idev->dev; + struct mlxsw_sp_router *router; /* NETDEV_UP event is handled by mlxsw_sp_inet6addr_valid_event */ if (event == NETDEV_UP) return NOTIFY_DONE; - if (!mlxsw_sp_port_dev_lower_find_rcu(dev)) - return NOTIFY_DONE; - inet6addr_work = kzalloc(sizeof(*inet6addr_work), GFP_ATOMIC); if (!inet6addr_work) return NOTIFY_BAD; + router = container_of(nb, struct mlxsw_sp_router, inet6addr_nb); INIT_WORK(&inet6addr_work->work, mlxsw_sp_inet6addr_event_work); + inet6addr_work->mlxsw_sp = router->mlxsw_sp; inet6addr_work->dev = dev; inet6addr_work->event = event; dev_hold(dev); @@ -6841,7 +6907,12 @@ int mlxsw_sp_inet6addr_valid_event(struct notifier_block *unused, if (!mlxsw_sp_rif_should_config(rif, dev, event)) goto out; - err = __mlxsw_sp_inetaddr_event(dev, event, i6vi->extack); + err = mlxsw_sp_router_port_check_rif_addr(mlxsw_sp, dev, dev->dev_addr, + i6vi->extack); + if (err) + goto out; + + err = __mlxsw_sp_inetaddr_event(mlxsw_sp, dev, event, i6vi->extack); out: return notifier_from_errno(err); } @@ -6863,20 +6934,14 @@ static int mlxsw_sp_rif_edit(struct mlxsw_sp *mlxsw_sp, u16 rif_index, return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ritr), ritr_pl); } -int mlxsw_sp_netdevice_router_port_event(struct net_device *dev) +static int +mlxsw_sp_router_port_change_event(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_rif *rif) { - struct mlxsw_sp *mlxsw_sp; - struct mlxsw_sp_rif *rif; + struct net_device *dev = rif->dev; u16 fid_index; int err; - mlxsw_sp = mlxsw_sp_lower_get(dev); - if (!mlxsw_sp) - return 0; - - rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); - if (!rif) - return 0; fid_index = mlxsw_sp_fid_index(rif->fid); err = mlxsw_sp_rif_fdb_op(mlxsw_sp, rif->addr, fid_index, false); @@ -6920,6 +6985,41 @@ err_rif_edit: return err; } +static int mlxsw_sp_router_port_pre_changeaddr_event(struct mlxsw_sp_rif *rif, + struct netdev_notifier_pre_changeaddr_info *info) +{ + struct netlink_ext_ack *extack; + + extack = netdev_notifier_info_to_extack(&info->info); + return mlxsw_sp_router_port_check_rif_addr(rif->mlxsw_sp, rif->dev, + info->dev_addr, extack); +} + +int mlxsw_sp_netdevice_router_port_event(struct net_device *dev, + unsigned long event, void *ptr) +{ + struct mlxsw_sp *mlxsw_sp; + struct mlxsw_sp_rif *rif; + + mlxsw_sp = mlxsw_sp_lower_get(dev); + if (!mlxsw_sp) + return 0; + + rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, dev); + if (!rif) + return 0; + + switch (event) { + case NETDEV_CHANGEMTU: /* fall through */ + case NETDEV_CHANGEADDR: + return mlxsw_sp_router_port_change_event(mlxsw_sp, rif); + case NETDEV_PRE_CHANGEADDR: + return mlxsw_sp_router_port_pre_changeaddr_event(rif, ptr); + } + + return 0; +} + static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp, struct net_device *l3_dev, struct netlink_ext_ack *extack) @@ -6931,9 +7031,10 @@ static int mlxsw_sp_port_vrf_join(struct mlxsw_sp *mlxsw_sp, */ rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev); if (rif) - __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, extack); + __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, + extack); - return __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_UP, extack); + return __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_UP, extack); } static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp, @@ -6944,7 +7045,7 @@ static void mlxsw_sp_port_vrf_leave(struct mlxsw_sp *mlxsw_sp, rif = mlxsw_sp_rif_find_by_dev(mlxsw_sp, l3_dev); if (!rif) return; - __mlxsw_sp_inetaddr_event(l3_dev, NETDEV_DOWN, NULL); + __mlxsw_sp_inetaddr_event(mlxsw_sp, l3_dev, NETDEV_DOWN, NULL); } int mlxsw_sp_netdevice_vrf_event(struct net_device *l3_dev, unsigned long event, @@ -6998,18 +7099,13 @@ static int mlxsw_sp_rif_macvlan_flush(struct mlxsw_sp_rif *rif) __mlxsw_sp_rif_macvlan_flush, rif); } -static struct mlxsw_sp_rif_subport * -mlxsw_sp_rif_subport_rif(const struct mlxsw_sp_rif *rif) -{ - return container_of(rif, struct mlxsw_sp_rif_subport, common); -} - static void mlxsw_sp_rif_subport_setup(struct mlxsw_sp_rif *rif, const struct mlxsw_sp_rif_params *params) { struct mlxsw_sp_rif_subport *rif_subport; rif_subport = mlxsw_sp_rif_subport_rif(rif); + refcount_set(&rif_subport->ref_count, 1); rif_subport->vid = params->vid; rif_subport->lag = params->lag; if (params->lag) @@ -7164,11 +7260,15 @@ static struct mlxsw_sp_fid * mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif, struct netlink_ext_ack *extack) { + struct net_device *br_dev = rif->dev; u16 vid; int err; if (is_vlan_dev(rif->dev)) { vid = vlan_dev_vlan_id(rif->dev); + br_dev = vlan_dev_real_dev(rif->dev); + if (WARN_ON(!netif_is_bridge_master(br_dev))) + return ERR_PTR(-EINVAL); } else { err = br_vlan_get_pvid(rif->dev, &vid); if (err < 0 || !vid) { @@ -7177,7 +7277,7 @@ mlxsw_sp_rif_vlan_fid_get(struct mlxsw_sp_rif *rif, } } - return mlxsw_sp_fid_8021q_get(rif->mlxsw_sp, vid); + return mlxsw_sp_bridge_fid_get(rif->mlxsw_sp, br_dev, vid, extack); } static void mlxsw_sp_rif_vlan_fdb_del(struct mlxsw_sp_rif *rif, const char *mac) @@ -7267,7 +7367,7 @@ static struct mlxsw_sp_fid * mlxsw_sp_rif_fid_fid_get(struct mlxsw_sp_rif *rif, struct netlink_ext_ack *extack) { - return mlxsw_sp_fid_8021d_get(rif->mlxsw_sp, rif->dev->ifindex); + return mlxsw_sp_bridge_fid_get(rif->mlxsw_sp, rif->dev, 0, extack); } static void mlxsw_sp_rif_fid_fdb_del(struct mlxsw_sp_rif *rif, const char *mac) @@ -7293,6 +7393,15 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_fid_ops = { .fdb_del = mlxsw_sp_rif_fid_fdb_del, }; +static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_vlan_emu_ops = { + .type = MLXSW_SP_RIF_TYPE_VLAN, + .rif_size = sizeof(struct mlxsw_sp_rif), + .configure = mlxsw_sp_rif_fid_configure, + .deconfigure = mlxsw_sp_rif_fid_deconfigure, + .fid_get = mlxsw_sp_rif_vlan_fid_get, + .fdb_del = mlxsw_sp_rif_vlan_fdb_del, +}; + static struct mlxsw_sp_rif_ipip_lb * mlxsw_sp_rif_ipip_lb_rif(struct mlxsw_sp_rif *rif) { @@ -7361,7 +7470,7 @@ static const struct mlxsw_sp_rif_ops mlxsw_sp_rif_ipip_lb_ops = { static const struct mlxsw_sp_rif_ops *mlxsw_sp_rif_ops_arr[] = { [MLXSW_SP_RIF_TYPE_SUBPORT] = &mlxsw_sp_rif_subport_ops, - [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_ops, + [MLXSW_SP_RIF_TYPE_VLAN] = &mlxsw_sp_rif_vlan_emu_ops, [MLXSW_SP_RIF_TYPE_FID] = &mlxsw_sp_rif_fid_ops, [MLXSW_SP_RIF_TYPE_IPIP_LB] = &mlxsw_sp_rif_ipip_lb_ops, }; @@ -7552,6 +7661,16 @@ int mlxsw_sp_router_init(struct mlxsw_sp *mlxsw_sp) mlxsw_sp->router = router; router->mlxsw_sp = mlxsw_sp; + router->inetaddr_nb.notifier_call = mlxsw_sp_inetaddr_event; + err = register_inetaddr_notifier(&router->inetaddr_nb); + if (err) + goto err_register_inetaddr_notifier; + + router->inet6addr_nb.notifier_call = mlxsw_sp_inet6addr_event; + err = register_inet6addr_notifier(&router->inet6addr_nb); + if (err) + goto err_register_inet6addr_notifier; + INIT_LIST_HEAD(&mlxsw_sp->router->nexthop_neighs_list); err = __mlxsw_sp_router_init(mlxsw_sp); if (err) @@ -7637,6 +7756,10 @@ err_ipips_init: err_rifs_init: __mlxsw_sp_router_fini(mlxsw_sp); err_router_init: + unregister_inet6addr_notifier(&router->inet6addr_nb); +err_register_inet6addr_notifier: + unregister_inetaddr_notifier(&router->inetaddr_nb); +err_register_inetaddr_notifier: kfree(mlxsw_sp->router); return err; } @@ -7654,5 +7777,7 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) mlxsw_sp_ipips_fini(mlxsw_sp); mlxsw_sp_rifs_fini(mlxsw_sp); __mlxsw_sp_router_fini(mlxsw_sp); + unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb); + unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb); kfree(mlxsw_sp->router); } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c index d965fd275c90..ad5a9b9e1466 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c @@ -383,7 +383,7 @@ mlxsw_sp_span_entry_gretap4_deconfigure(struct mlxsw_sp_span_entry *span_entry) } static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap4 = { - .can_handle = is_gretap_dev, + .can_handle = netif_is_gretap, .parms = mlxsw_sp_span_entry_gretap4_parms, .configure = mlxsw_sp_span_entry_gretap4_configure, .deconfigure = mlxsw_sp_span_entry_gretap4_deconfigure, @@ -484,7 +484,7 @@ mlxsw_sp_span_entry_gretap6_deconfigure(struct mlxsw_sp_span_entry *span_entry) static const struct mlxsw_sp_span_entry_ops mlxsw_sp_span_entry_ops_gretap6 = { - .can_handle = is_ip6gretap_dev, + .can_handle = netif_is_ip6gretap, .parms = mlxsw_sp_span_entry_gretap6_parms, .configure = mlxsw_sp_span_entry_gretap6_configure, .deconfigure = mlxsw_sp_span_entry_gretap6_deconfigure, diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 50080c60a279..1bd2c6e15f8d 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -85,13 +85,11 @@ struct mlxsw_sp_bridge_ops { struct mlxsw_sp_bridge_port *bridge_port, struct mlxsw_sp_port *mlxsw_sp_port); int (*vxlan_join)(struct mlxsw_sp_bridge_device *bridge_device, - const struct net_device *vxlan_dev, + const struct net_device *vxlan_dev, u16 vid, struct netlink_ext_ack *extack); - void (*vxlan_leave)(struct mlxsw_sp_bridge_device *bridge_device, - const struct net_device *vxlan_dev); struct mlxsw_sp_fid * (*fid_get)(struct mlxsw_sp_bridge_device *bridge_device, - u16 vid); + u16 vid, struct netlink_ext_ack *extack); struct mlxsw_sp_fid * (*fid_lookup)(struct mlxsw_sp_bridge_device *bridge_device, u16 vid); @@ -292,30 +290,6 @@ mlxsw_sp_bridge_port_destroy(struct mlxsw_sp_bridge_port *bridge_port) kfree(bridge_port); } -static bool -mlxsw_sp_bridge_port_should_destroy(const struct mlxsw_sp_bridge_port * - bridge_port) -{ - struct net_device *dev = bridge_port->dev; - struct mlxsw_sp *mlxsw_sp; - - if (is_vlan_dev(dev)) - mlxsw_sp = mlxsw_sp_lower_get(vlan_dev_real_dev(dev)); - else - mlxsw_sp = mlxsw_sp_lower_get(dev); - - /* In case ports were pulled from out of a bridged LAG, then - * it's possible the reference count isn't zero, yet the bridge - * port should be destroyed, as it's no longer an upper of ours. - */ - if (!mlxsw_sp && list_empty(&bridge_port->vlans_list)) - return true; - else if (bridge_port->ref_count == 0) - return true; - else - return false; -} - static struct mlxsw_sp_bridge_port * mlxsw_sp_bridge_port_get(struct mlxsw_sp_bridge *bridge, struct net_device *brport_dev) @@ -353,8 +327,7 @@ static void mlxsw_sp_bridge_port_put(struct mlxsw_sp_bridge *bridge, { struct mlxsw_sp_bridge_device *bridge_device; - bridge_port->ref_count--; - if (!mlxsw_sp_bridge_port_should_destroy(bridge_port)) + if (--bridge_port->ref_count != 0) return; bridge_device = bridge_port->bridge_device; mlxsw_sp_bridge_port_destroy(bridge_port); @@ -935,7 +908,8 @@ static int mlxsw_sp_port_attr_set(struct net_device *dev, static int mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, - struct mlxsw_sp_bridge_port *bridge_port) + struct mlxsw_sp_bridge_port *bridge_port, + struct netlink_ext_ack *extack) { struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; struct mlxsw_sp_bridge_device *bridge_device; @@ -945,7 +919,7 @@ mlxsw_sp_port_vlan_fid_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, int err; bridge_device = bridge_port->bridge_device; - fid = bridge_device->ops->fid_get(bridge_device, vid); + fid = bridge_device->ops->fid_get(bridge_device, vid, extack); if (IS_ERR(fid)) return PTR_ERR(fid); @@ -1013,7 +987,8 @@ mlxsw_sp_port_pvid_determine(const struct mlxsw_sp_port *mlxsw_sp_port, static int mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, - struct mlxsw_sp_bridge_port *bridge_port) + struct mlxsw_sp_bridge_port *bridge_port, + struct netlink_ext_ack *extack) { struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port; struct mlxsw_sp_bridge_vlan *bridge_vlan; @@ -1021,12 +996,11 @@ mlxsw_sp_port_vlan_bridge_join(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, int err; /* No need to continue if only VLAN flags were changed */ - if (mlxsw_sp_port_vlan->bridge_port) { - mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); + if (mlxsw_sp_port_vlan->bridge_port) return 0; - } - err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port); + err = mlxsw_sp_port_vlan_fid_join(mlxsw_sp_port_vlan, bridge_port, + extack); if (err) return err; @@ -1103,16 +1077,33 @@ mlxsw_sp_port_vlan_bridge_leave(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan) static int mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port, struct mlxsw_sp_bridge_port *bridge_port, - u16 vid, bool is_untagged, bool is_pvid) + u16 vid, bool is_untagged, bool is_pvid, + struct netlink_ext_ack *extack, + struct switchdev_trans *trans) { u16 pvid = mlxsw_sp_port_pvid_determine(mlxsw_sp_port, vid, is_pvid); struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; u16 old_pvid = mlxsw_sp_port->pvid; int err; - mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid); - if (IS_ERR(mlxsw_sp_port_vlan)) - return PTR_ERR(mlxsw_sp_port_vlan); + /* The only valid scenario in which a port-vlan already exists, is if + * the VLAN flags were changed and the port-vlan is associated with the + * correct bridge port + */ + mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); + if (mlxsw_sp_port_vlan && + mlxsw_sp_port_vlan->bridge_port != bridge_port) + return -EEXIST; + + if (switchdev_trans_ph_prepare(trans)) + return 0; + + if (!mlxsw_sp_port_vlan) { + mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port, + vid); + if (IS_ERR(mlxsw_sp_port_vlan)) + return PTR_ERR(mlxsw_sp_port_vlan); + } err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, is_untagged); @@ -1123,7 +1114,8 @@ mlxsw_sp_bridge_port_vlan_add(struct mlxsw_sp_port *mlxsw_sp_port, if (err) goto err_port_pvid_set; - err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port); + err = mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port, + extack); if (err) goto err_port_vlan_bridge_join; @@ -1134,7 +1126,7 @@ err_port_vlan_bridge_join: err_port_pvid_set: mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); err_port_vlan_set: - mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); + mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); return err; } @@ -1173,7 +1165,8 @@ mlxsw_sp_br_ban_rif_pvid_change(struct mlxsw_sp *mlxsw_sp, static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, const struct switchdev_obj_port_vlan *vlan, - struct switchdev_trans *trans) + struct switchdev_trans *trans, + struct netlink_ext_ack *extack) { bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; @@ -1195,9 +1188,6 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, return err; } - if (switchdev_trans_ph_prepare(trans)) - return 0; - bridge_port = mlxsw_sp_bridge_port_find(mlxsw_sp->bridge, orig_dev); if (WARN_ON(!bridge_port)) return -EINVAL; @@ -1210,7 +1200,7 @@ static int mlxsw_sp_port_vlans_add(struct mlxsw_sp_port *mlxsw_sp_port, err = mlxsw_sp_bridge_port_vlan_add(mlxsw_sp_port, bridge_port, vid, flag_untagged, - flag_pvid); + flag_pvid, extack, trans); if (err) return err; } @@ -1779,7 +1769,8 @@ static void mlxsw_sp_span_respin_schedule(struct mlxsw_sp *mlxsw_sp) static int mlxsw_sp_port_obj_add(struct net_device *dev, const struct switchdev_obj *obj, - struct switchdev_trans *trans) + struct switchdev_trans *trans, + struct netlink_ext_ack *extack) { struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev); const struct switchdev_obj_port_vlan *vlan; @@ -1788,7 +1779,8 @@ static int mlxsw_sp_port_obj_add(struct net_device *dev, switch (obj->id) { case SWITCHDEV_OBJ_ID_PORT_VLAN: vlan = SWITCHDEV_OBJ_PORT_VLAN(obj); - err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, trans); + err = mlxsw_sp_port_vlans_add(mlxsw_sp_port, vlan, trans, + extack); if (switchdev_trans_ph_prepare(trans)) { /* The event is emitted before the changes are actually @@ -1826,7 +1818,7 @@ mlxsw_sp_bridge_port_vlan_del(struct mlxsw_sp_port *mlxsw_sp_port, mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); mlxsw_sp_port_pvid_set(mlxsw_sp_port, pvid); mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false); - mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); + mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan); } static int mlxsw_sp_port_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port, @@ -1974,8 +1966,6 @@ static struct mlxsw_sp_port *mlxsw_sp_lag_rep_port(struct mlxsw_sp *mlxsw_sp, static const struct switchdev_ops mlxsw_sp_port_switchdev_ops = { .switchdev_port_attr_get = mlxsw_sp_port_attr_get, .switchdev_port_attr_set = mlxsw_sp_port_attr_set, - .switchdev_port_obj_add = mlxsw_sp_port_obj_add, - .switchdev_port_obj_del = mlxsw_sp_port_obj_del, }; static int @@ -1984,19 +1974,14 @@ mlxsw_sp_bridge_8021q_port_join(struct mlxsw_sp_bridge_device *bridge_device, struct mlxsw_sp_port *mlxsw_sp_port, struct netlink_ext_ack *extack) { - struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan; - if (is_vlan_dev(bridge_port->dev)) { NL_SET_ERR_MSG_MOD(extack, "Can not enslave a VLAN device to a VLAN-aware bridge"); return -EINVAL; } - mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1); - if (WARN_ON(!mlxsw_sp_port_vlan)) - return -EINVAL; - - /* Let VLAN-aware bridge take care of its own VLANs */ - mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan); + /* Port is no longer usable as a router interface */ + if (mlxsw_sp_port->default_vlan->fid) + mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan); return 0; } @@ -2006,41 +1991,133 @@ mlxsw_sp_bridge_8021q_port_leave(struct mlxsw_sp_bridge_device *bridge_device, struct mlxsw_sp_bridge_port *bridge_port, struct mlxsw_sp_port *mlxsw_sp_port) { - mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1); /* Make sure untagged frames are allowed to ingress */ - mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1); + mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID); } static int mlxsw_sp_bridge_8021q_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device, - const struct net_device *vxlan_dev, + const struct net_device *vxlan_dev, u16 vid, struct netlink_ext_ack *extack) { - WARN_ON(1); - return -EINVAL; + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev); + struct vxlan_dev *vxlan = netdev_priv(vxlan_dev); + struct mlxsw_sp_nve_params params = { + .type = MLXSW_SP_NVE_TYPE_VXLAN, + .vni = vxlan->cfg.vni, + .dev = vxlan_dev, + }; + struct mlxsw_sp_fid *fid; + int err; + + /* If the VLAN is 0, we need to find the VLAN that is configured as + * PVID and egress untagged on the bridge port of the VxLAN device. + * It is possible no such VLAN exists + */ + if (!vid) { + err = mlxsw_sp_vxlan_mapped_vid(vxlan_dev, &vid); + if (err || !vid) + return err; + } + + /* If no other port is member in the VLAN, then the FID does not exist. + * NVE will be enabled on the FID once a port joins the VLAN + */ + fid = mlxsw_sp_fid_8021q_lookup(mlxsw_sp, vid); + if (!fid) + return 0; + + if (mlxsw_sp_fid_vni_is_set(fid)) { + err = -EINVAL; + goto err_vni_exists; + } + + err = mlxsw_sp_nve_fid_enable(mlxsw_sp, fid, ¶ms, extack); + if (err) + goto err_nve_fid_enable; + + /* The tunnel port does not hold a reference on the FID. Only + * local ports and the router port + */ + mlxsw_sp_fid_put(fid); + + return 0; + +err_nve_fid_enable: +err_vni_exists: + mlxsw_sp_fid_put(fid); + return err; } -static void -mlxsw_sp_bridge_8021q_vxlan_leave(struct mlxsw_sp_bridge_device *bridge_device, - const struct net_device *vxlan_dev) +static struct net_device * +mlxsw_sp_bridge_8021q_vxlan_dev_find(struct net_device *br_dev, u16 vid) { + struct net_device *dev; + struct list_head *iter; + + netdev_for_each_lower_dev(br_dev, dev, iter) { + u16 pvid; + int err; + + if (!netif_is_vxlan(dev)) + continue; + + err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid); + if (err || pvid != vid) + continue; + + return dev; + } + + return NULL; } static struct mlxsw_sp_fid * mlxsw_sp_bridge_8021q_fid_get(struct mlxsw_sp_bridge_device *bridge_device, - u16 vid) + u16 vid, struct netlink_ext_ack *extack) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev); + struct net_device *vxlan_dev; + struct mlxsw_sp_fid *fid; + int err; + + fid = mlxsw_sp_fid_8021q_get(mlxsw_sp, vid); + if (IS_ERR(fid)) + return fid; + + if (mlxsw_sp_fid_vni_is_set(fid)) + return fid; + + /* Find the VxLAN device that has the specified VLAN configured as + * PVID and egress untagged. There can be at most one such device + */ + vxlan_dev = mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, + vid); + if (!vxlan_dev) + return fid; + + if (!netif_running(vxlan_dev)) + return fid; + + err = mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, vid, + extack); + if (err) + goto err_vxlan_join; + + return fid; - return mlxsw_sp_fid_8021q_get(mlxsw_sp, vid); +err_vxlan_join: + mlxsw_sp_fid_put(fid); + return ERR_PTR(err); } static struct mlxsw_sp_fid * mlxsw_sp_bridge_8021q_fid_lookup(struct mlxsw_sp_bridge_device *bridge_device, u16 vid) { - WARN_ON(1); - return NULL; + struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev); + + return mlxsw_sp_fid_8021q_lookup(mlxsw_sp, vid); } static u16 @@ -2054,7 +2131,6 @@ static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021q_ops = { .port_join = mlxsw_sp_bridge_8021q_port_join, .port_leave = mlxsw_sp_bridge_8021q_port_leave, .vxlan_join = mlxsw_sp_bridge_8021q_vxlan_join, - .vxlan_leave = mlxsw_sp_bridge_8021q_vxlan_leave, .fid_get = mlxsw_sp_bridge_8021q_fid_get, .fid_lookup = mlxsw_sp_bridge_8021q_fid_lookup, .fid_vid = mlxsw_sp_bridge_8021q_fid_vid, @@ -2087,7 +2163,7 @@ mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device, struct net_device *dev = bridge_port->dev; u16 vid; - vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1; + vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID; mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); if (WARN_ON(!mlxsw_sp_port_vlan)) return -EINVAL; @@ -2101,7 +2177,8 @@ mlxsw_sp_bridge_8021d_port_join(struct mlxsw_sp_bridge_device *bridge_device, if (mlxsw_sp_port_vlan->fid) mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan); - return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port); + return mlxsw_sp_port_vlan_bridge_join(mlxsw_sp_port_vlan, bridge_port, + extack); } static void @@ -2113,9 +2190,9 @@ mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device, struct net_device *dev = bridge_port->dev; u16 vid; - vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : 1; + vid = is_vlan_dev(dev) ? vlan_dev_vlan_id(dev) : MLXSW_SP_DEFAULT_VID; mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid); - if (!mlxsw_sp_port_vlan) + if (!mlxsw_sp_port_vlan || !mlxsw_sp_port_vlan->bridge_port) return; mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan); @@ -2123,7 +2200,7 @@ mlxsw_sp_bridge_8021d_port_leave(struct mlxsw_sp_bridge_device *bridge_device, static int mlxsw_sp_bridge_8021d_vxlan_join(struct mlxsw_sp_bridge_device *bridge_device, - const struct net_device *vxlan_dev, + const struct net_device *vxlan_dev, u16 vid, struct netlink_ext_ack *extack) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev); @@ -2162,29 +2239,9 @@ err_vni_exists: return err; } -static void -mlxsw_sp_bridge_8021d_vxlan_leave(struct mlxsw_sp_bridge_device *bridge_device, - const struct net_device *vxlan_dev) -{ - struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev); - struct mlxsw_sp_fid *fid; - - fid = mlxsw_sp_fid_8021d_lookup(mlxsw_sp, bridge_device->dev->ifindex); - if (WARN_ON(!fid)) - return; - - /* If the VxLAN device is down, then the FID does not have a VNI */ - if (!mlxsw_sp_fid_vni_is_set(fid)) - goto out; - - mlxsw_sp_nve_fid_disable(mlxsw_sp, fid); -out: - mlxsw_sp_fid_put(fid); -} - static struct mlxsw_sp_fid * mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device, - u16 vid) + u16 vid, struct netlink_ext_ack *extack) { struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(bridge_device->dev); struct net_device *vxlan_dev; @@ -2205,7 +2262,8 @@ mlxsw_sp_bridge_8021d_fid_get(struct mlxsw_sp_bridge_device *bridge_device, if (!netif_running(vxlan_dev)) return fid; - err = mlxsw_sp_bridge_8021d_vxlan_join(bridge_device, vxlan_dev, NULL); + err = mlxsw_sp_bridge_8021d_vxlan_join(bridge_device, vxlan_dev, 0, + extack); if (err) goto err_vxlan_join; @@ -2240,7 +2298,6 @@ static const struct mlxsw_sp_bridge_ops mlxsw_sp_bridge_8021d_ops = { .port_join = mlxsw_sp_bridge_8021d_port_join, .port_leave = mlxsw_sp_bridge_8021d_port_leave, .vxlan_join = mlxsw_sp_bridge_8021d_vxlan_join, - .vxlan_leave = mlxsw_sp_bridge_8021d_vxlan_leave, .fid_get = mlxsw_sp_bridge_8021d_fid_get, .fid_lookup = mlxsw_sp_bridge_8021d_fid_lookup, .fid_vid = mlxsw_sp_bridge_8021d_fid_vid, @@ -2295,7 +2352,7 @@ void mlxsw_sp_port_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port, int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp, const struct net_device *br_dev, - const struct net_device *vxlan_dev, + const struct net_device *vxlan_dev, u16 vid, struct netlink_ext_ack *extack) { struct mlxsw_sp_bridge_device *bridge_device; @@ -2304,20 +2361,102 @@ int mlxsw_sp_bridge_vxlan_join(struct mlxsw_sp *mlxsw_sp, if (WARN_ON(!bridge_device)) return -EINVAL; - return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, extack); + return bridge_device->ops->vxlan_join(bridge_device, vxlan_dev, vid, + extack); } void mlxsw_sp_bridge_vxlan_leave(struct mlxsw_sp *mlxsw_sp, - const struct net_device *br_dev, const struct net_device *vxlan_dev) { + struct vxlan_dev *vxlan = netdev_priv(vxlan_dev); + struct mlxsw_sp_fid *fid; + + /* If the VxLAN device is down, then the FID does not have a VNI */ + fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vxlan->cfg.vni); + if (!fid) + return; + + mlxsw_sp_nve_fid_disable(mlxsw_sp, fid); + mlxsw_sp_fid_put(fid); +} + +struct mlxsw_sp_fid *mlxsw_sp_bridge_fid_get(struct mlxsw_sp *mlxsw_sp, + const struct net_device *br_dev, + u16 vid, + struct netlink_ext_ack *extack) +{ struct mlxsw_sp_bridge_device *bridge_device; bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); if (WARN_ON(!bridge_device)) - return; + return ERR_PTR(-EINVAL); + + return bridge_device->ops->fid_get(bridge_device, vid, extack); +} + +static void +mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr, + enum mlxsw_sp_l3proto *proto, + union mlxsw_sp_l3addr *addr) +{ + if (vxlan_addr->sa.sa_family == AF_INET) { + addr->addr4 = vxlan_addr->sin.sin_addr.s_addr; + *proto = MLXSW_SP_L3_PROTO_IPV4; + } else { + addr->addr6 = vxlan_addr->sin6.sin6_addr; + *proto = MLXSW_SP_L3_PROTO_IPV6; + } +} - bridge_device->ops->vxlan_leave(bridge_device, vxlan_dev); +static void +mlxsw_sp_switchdev_addr_vxlan_convert(enum mlxsw_sp_l3proto proto, + const union mlxsw_sp_l3addr *addr, + union vxlan_addr *vxlan_addr) +{ + switch (proto) { + case MLXSW_SP_L3_PROTO_IPV4: + vxlan_addr->sa.sa_family = AF_INET; + vxlan_addr->sin.sin_addr.s_addr = addr->addr4; + break; + case MLXSW_SP_L3_PROTO_IPV6: + vxlan_addr->sa.sa_family = AF_INET6; + vxlan_addr->sin6.sin6_addr = addr->addr6; + break; + } +} + +static void mlxsw_sp_fdb_vxlan_call_notifiers(struct net_device *dev, + const char *mac, + enum mlxsw_sp_l3proto proto, + union mlxsw_sp_l3addr *addr, + __be32 vni, bool adding) +{ + struct switchdev_notifier_vxlan_fdb_info info; + struct vxlan_dev *vxlan = netdev_priv(dev); + enum switchdev_notifier_type type; + + type = adding ? SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE : + SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE; + mlxsw_sp_switchdev_addr_vxlan_convert(proto, addr, &info.remote_ip); + info.remote_port = vxlan->cfg.dst_port; + info.remote_vni = vni; + info.remote_ifindex = 0; + ether_addr_copy(info.eth_addr, mac); + info.vni = vni; + info.offloaded = adding; + call_switchdev_notifiers(type, dev, &info.info); +} + +static void mlxsw_sp_fdb_nve_call_notifiers(struct net_device *dev, + const char *mac, + enum mlxsw_sp_l3proto proto, + union mlxsw_sp_l3addr *addr, + __be32 vni, + bool adding) +{ + if (netif_is_vxlan(dev)) + mlxsw_sp_fdb_vxlan_call_notifiers(dev, mac, proto, addr, vni, + adding); } static void @@ -2428,7 +2567,8 @@ static void mlxsw_sp_fdb_notify_mac_lag_process(struct mlxsw_sp *mlxsw_sp, bridge_device = bridge_port->bridge_device; vid = bridge_device->vlan_enabled ? mlxsw_sp_port_vlan->vid : 0; - lag_vid = mlxsw_sp_port_vlan->vid; + lag_vid = mlxsw_sp_fid_lag_vid_valid(mlxsw_sp_port_vlan->fid) ? + mlxsw_sp_port_vlan->vid : 0; do_fdb_op: err = mlxsw_sp_port_fdb_uc_lag_op(mlxsw_sp, lag_id, mac, fid, lag_vid, @@ -2451,6 +2591,122 @@ just_remove: goto do_fdb_op; } +static int +__mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp, + const struct mlxsw_sp_fid *fid, + bool adding, + struct net_device **nve_dev, + u16 *p_vid, __be32 *p_vni) +{ + struct mlxsw_sp_bridge_device *bridge_device; + struct net_device *br_dev, *dev; + int nve_ifindex; + int err; + + err = mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex); + if (err) + return err; + + err = mlxsw_sp_fid_vni(fid, p_vni); + if (err) + return err; + + dev = __dev_get_by_index(&init_net, nve_ifindex); + if (!dev) + return -EINVAL; + *nve_dev = dev; + + if (!netif_running(dev)) + return -EINVAL; + + if (adding && !br_port_flag_is_set(dev, BR_LEARNING)) + return -EINVAL; + + if (adding && netif_is_vxlan(dev)) { + struct vxlan_dev *vxlan = netdev_priv(dev); + + if (!(vxlan->cfg.flags & VXLAN_F_LEARN)) + return -EINVAL; + } + + br_dev = netdev_master_upper_dev_get(dev); + if (!br_dev) + return -EINVAL; + + bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); + if (!bridge_device) + return -EINVAL; + + *p_vid = bridge_device->ops->fid_vid(bridge_device, fid); + + return 0; +} + +static void mlxsw_sp_fdb_notify_mac_uc_tunnel_process(struct mlxsw_sp *mlxsw_sp, + char *sfn_pl, + int rec_index, + bool adding) +{ + enum mlxsw_reg_sfn_uc_tunnel_protocol sfn_proto; + enum switchdev_notifier_type type; + struct net_device *nve_dev; + union mlxsw_sp_l3addr addr; + struct mlxsw_sp_fid *fid; + char mac[ETH_ALEN]; + u16 fid_index, vid; + __be32 vni; + u32 uip; + int err; + + mlxsw_reg_sfn_uc_tunnel_unpack(sfn_pl, rec_index, mac, &fid_index, + &uip, &sfn_proto); + + fid = mlxsw_sp_fid_lookup_by_index(mlxsw_sp, fid_index); + if (!fid) + goto err_fid_lookup; + + err = mlxsw_sp_nve_learned_ip_resolve(mlxsw_sp, uip, + (enum mlxsw_sp_l3proto) sfn_proto, + &addr); + if (err) + goto err_ip_resolve; + + err = __mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, fid, adding, + &nve_dev, &vid, &vni); + if (err) + goto err_fdb_process; + + err = mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index, + (enum mlxsw_sp_l3proto) sfn_proto, + &addr, adding, true); + if (err) + goto err_fdb_op; + + mlxsw_sp_fdb_nve_call_notifiers(nve_dev, mac, + (enum mlxsw_sp_l3proto) sfn_proto, + &addr, vni, adding); + + type = adding ? SWITCHDEV_FDB_ADD_TO_BRIDGE : + SWITCHDEV_FDB_DEL_TO_BRIDGE; + mlxsw_sp_fdb_call_notifiers(type, mac, vid, nve_dev, adding); + + mlxsw_sp_fid_put(fid); + + return; + +err_fdb_op: +err_fdb_process: +err_ip_resolve: + mlxsw_sp_fid_put(fid); +err_fid_lookup: + /* Remove an FDB entry in case we cannot process it. Otherwise the + * device will keep sending the same notification over and over again. + */ + mlxsw_sp_port_fdb_tunnel_uc_op(mlxsw_sp, mac, fid_index, + (enum mlxsw_sp_l3proto) sfn_proto, &addr, + false, true); +} + static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp, char *sfn_pl, int rec_index) { @@ -2471,6 +2727,14 @@ static void mlxsw_sp_fdb_notify_rec_process(struct mlxsw_sp *mlxsw_sp, mlxsw_sp_fdb_notify_mac_lag_process(mlxsw_sp, sfn_pl, rec_index, false); break; + case MLXSW_REG_SFN_REC_TYPE_LEARNED_UNICAST_TUNNEL: + mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl, + rec_index, true); + break; + case MLXSW_REG_SFN_REC_TYPE_AGED_OUT_UNICAST_TUNNEL: + mlxsw_sp_fdb_notify_mac_uc_tunnel_process(mlxsw_sp, sfn_pl, + rec_index, false); + break; } } @@ -2526,20 +2790,6 @@ struct mlxsw_sp_switchdev_event_work { }; static void -mlxsw_sp_switchdev_vxlan_addr_convert(const union vxlan_addr *vxlan_addr, - enum mlxsw_sp_l3proto *proto, - union mlxsw_sp_l3addr *addr) -{ - if (vxlan_addr->sa.sa_family == AF_INET) { - addr->addr4 = vxlan_addr->sin.sin_addr.s_addr; - *proto = MLXSW_SP_L3_PROTO_IPV4; - } else { - addr->addr6 = vxlan_addr->sin6.sin6_addr; - *proto = MLXSW_SP_L3_PROTO_IPV6; - } -} - -static void mlxsw_sp_switchdev_bridge_vxlan_fdb_event(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_switchdev_event_work * switchdev_work, @@ -2604,7 +2854,8 @@ mlxsw_sp_switchdev_bridge_nve_fdb_event(struct mlxsw_sp_switchdev_event_work * switchdev_work->event != SWITCHDEV_FDB_DEL_TO_DEVICE) return; - if (!switchdev_work->fdb_info.added_by_user) + if (switchdev_work->event == SWITCHDEV_FDB_ADD_TO_DEVICE && + !switchdev_work->fdb_info.added_by_user) return; if (!netif_running(dev)) @@ -2947,10 +3198,274 @@ err_addr_alloc: return NOTIFY_BAD; } -static struct notifier_block mlxsw_sp_switchdev_notifier = { +struct notifier_block mlxsw_sp_switchdev_notifier = { .notifier_call = mlxsw_sp_switchdev_event, }; +static int +mlxsw_sp_switchdev_vxlan_vlan_add(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_bridge_device *bridge_device, + const struct net_device *vxlan_dev, u16 vid, + bool flag_untagged, bool flag_pvid, + struct switchdev_trans *trans, + struct netlink_ext_ack *extack) +{ + struct vxlan_dev *vxlan = netdev_priv(vxlan_dev); + __be32 vni = vxlan->cfg.vni; + struct mlxsw_sp_fid *fid; + u16 old_vid; + int err; + + /* We cannot have the same VLAN as PVID and egress untagged on multiple + * VxLAN devices. Note that we get this notification before the VLAN is + * actually added to the bridge's database, so it is not possible for + * the lookup function to return 'vxlan_dev' + */ + if (flag_untagged && flag_pvid && + mlxsw_sp_bridge_8021q_vxlan_dev_find(bridge_device->dev, vid)) + return -EINVAL; + + if (switchdev_trans_ph_prepare(trans)) + return 0; + + if (!netif_running(vxlan_dev)) + return 0; + + /* First case: FID is not associated with this VNI, but the new VLAN + * is both PVID and egress untagged. Need to enable NVE on the FID, if + * it exists + */ + fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni); + if (!fid) { + if (!flag_untagged || !flag_pvid) + return 0; + return mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, + vxlan_dev, vid, extack); + } + + /* Second case: FID is associated with the VNI and the VLAN associated + * with the FID is the same as the notified VLAN. This means the flags + * (PVID / egress untagged) were toggled and that NVE should be + * disabled on the FID + */ + old_vid = mlxsw_sp_fid_8021q_vid(fid); + if (vid == old_vid) { + if (WARN_ON(flag_untagged && flag_pvid)) { + mlxsw_sp_fid_put(fid); + return -EINVAL; + } + mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev); + mlxsw_sp_fid_put(fid); + return 0; + } + + /* Third case: A new VLAN was configured on the VxLAN device, but this + * VLAN is not PVID, so there is nothing to do. + */ + if (!flag_pvid) { + mlxsw_sp_fid_put(fid); + return 0; + } + + /* Fourth case: Thew new VLAN is PVID, which means the VLAN currently + * mapped to the VNI should be unmapped + */ + mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev); + mlxsw_sp_fid_put(fid); + + /* Fifth case: The new VLAN is also egress untagged, which means the + * VLAN needs to be mapped to the VNI + */ + if (!flag_untagged) + return 0; + + err = mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, vid, + extack); + if (err) + goto err_vxlan_join; + + return 0; + +err_vxlan_join: + mlxsw_sp_bridge_8021q_vxlan_join(bridge_device, vxlan_dev, old_vid, + NULL); + return err; +} + +static void +mlxsw_sp_switchdev_vxlan_vlan_del(struct mlxsw_sp *mlxsw_sp, + struct mlxsw_sp_bridge_device *bridge_device, + const struct net_device *vxlan_dev, u16 vid) +{ + struct vxlan_dev *vxlan = netdev_priv(vxlan_dev); + __be32 vni = vxlan->cfg.vni; + struct mlxsw_sp_fid *fid; + + if (!netif_running(vxlan_dev)) + return; + + fid = mlxsw_sp_fid_lookup_by_vni(mlxsw_sp, vni); + if (!fid) + return; + + /* A different VLAN than the one mapped to the VNI is deleted */ + if (mlxsw_sp_fid_8021q_vid(fid) != vid) + goto out; + + mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, vxlan_dev); + +out: + mlxsw_sp_fid_put(fid); +} + +static int +mlxsw_sp_switchdev_vxlan_vlans_add(struct net_device *vxlan_dev, + struct switchdev_notifier_port_obj_info * + port_obj_info) +{ + struct switchdev_obj_port_vlan *vlan = + SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj); + bool flag_untagged = vlan->flags & BRIDGE_VLAN_INFO_UNTAGGED; + bool flag_pvid = vlan->flags & BRIDGE_VLAN_INFO_PVID; + struct switchdev_trans *trans = port_obj_info->trans; + struct mlxsw_sp_bridge_device *bridge_device; + struct netlink_ext_ack *extack; + struct mlxsw_sp *mlxsw_sp; + struct net_device *br_dev; + u16 vid; + + extack = switchdev_notifier_info_to_extack(&port_obj_info->info); + br_dev = netdev_master_upper_dev_get(vxlan_dev); + if (!br_dev) + return 0; + + mlxsw_sp = mlxsw_sp_lower_get(br_dev); + if (!mlxsw_sp) + return 0; + + port_obj_info->handled = true; + + bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); + if (!bridge_device) + return -EINVAL; + + if (!bridge_device->vlan_enabled) + return 0; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) { + int err; + + err = mlxsw_sp_switchdev_vxlan_vlan_add(mlxsw_sp, bridge_device, + vxlan_dev, vid, + flag_untagged, + flag_pvid, trans, + extack); + if (err) + return err; + } + + return 0; +} + +static void +mlxsw_sp_switchdev_vxlan_vlans_del(struct net_device *vxlan_dev, + struct switchdev_notifier_port_obj_info * + port_obj_info) +{ + struct switchdev_obj_port_vlan *vlan = + SWITCHDEV_OBJ_PORT_VLAN(port_obj_info->obj); + struct mlxsw_sp_bridge_device *bridge_device; + struct mlxsw_sp *mlxsw_sp; + struct net_device *br_dev; + u16 vid; + + br_dev = netdev_master_upper_dev_get(vxlan_dev); + if (!br_dev) + return; + + mlxsw_sp = mlxsw_sp_lower_get(br_dev); + if (!mlxsw_sp) + return; + + port_obj_info->handled = true; + + bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); + if (!bridge_device) + return; + + if (!bridge_device->vlan_enabled) + return; + + for (vid = vlan->vid_begin; vid <= vlan->vid_end; vid++) + mlxsw_sp_switchdev_vxlan_vlan_del(mlxsw_sp, bridge_device, + vxlan_dev, vid); +} + +static int +mlxsw_sp_switchdev_handle_vxlan_obj_add(struct net_device *vxlan_dev, + struct switchdev_notifier_port_obj_info * + port_obj_info) +{ + int err = 0; + + switch (port_obj_info->obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + err = mlxsw_sp_switchdev_vxlan_vlans_add(vxlan_dev, + port_obj_info); + break; + default: + break; + } + + return err; +} + +static void +mlxsw_sp_switchdev_handle_vxlan_obj_del(struct net_device *vxlan_dev, + struct switchdev_notifier_port_obj_info * + port_obj_info) +{ + switch (port_obj_info->obj->id) { + case SWITCHDEV_OBJ_ID_PORT_VLAN: + mlxsw_sp_switchdev_vxlan_vlans_del(vxlan_dev, port_obj_info); + break; + default: + break; + } +} + +static int mlxsw_sp_switchdev_blocking_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + int err = 0; + + switch (event) { + case SWITCHDEV_PORT_OBJ_ADD: + if (netif_is_vxlan(dev)) + err = mlxsw_sp_switchdev_handle_vxlan_obj_add(dev, ptr); + else + err = switchdev_handle_port_obj_add(dev, ptr, + mlxsw_sp_port_dev_check, + mlxsw_sp_port_obj_add); + return notifier_from_errno(err); + case SWITCHDEV_PORT_OBJ_DEL: + if (netif_is_vxlan(dev)) + mlxsw_sp_switchdev_handle_vxlan_obj_del(dev, ptr); + else + err = switchdev_handle_port_obj_del(dev, ptr, + mlxsw_sp_port_dev_check, + mlxsw_sp_port_obj_del); + return notifier_from_errno(err); + } + + return NOTIFY_DONE; +} + +static struct notifier_block mlxsw_sp_switchdev_blocking_notifier = { + .notifier_call = mlxsw_sp_switchdev_blocking_event, +}; + u8 mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port) { @@ -2960,6 +3475,7 @@ mlxsw_sp_bridge_port_stp_state(struct mlxsw_sp_bridge_port *bridge_port) static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp) { struct mlxsw_sp_bridge *bridge = mlxsw_sp->bridge; + struct notifier_block *nb; int err; err = mlxsw_sp_ageing_set(mlxsw_sp, MLXSW_SP_DEFAULT_AGEING_TIME); @@ -2974,17 +3490,33 @@ static int mlxsw_sp_fdb_init(struct mlxsw_sp *mlxsw_sp) return err; } + nb = &mlxsw_sp_switchdev_blocking_notifier; + err = register_switchdev_blocking_notifier(nb); + if (err) { + dev_err(mlxsw_sp->bus_info->dev, "Failed to register switchdev blocking notifier\n"); + goto err_register_switchdev_blocking_notifier; + } + INIT_DELAYED_WORK(&bridge->fdb_notify.dw, mlxsw_sp_fdb_notify_work); bridge->fdb_notify.interval = MLXSW_SP_DEFAULT_LEARNING_INTERVAL; mlxsw_sp_fdb_notify_work_schedule(mlxsw_sp); return 0; + +err_register_switchdev_blocking_notifier: + unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier); + return err; } static void mlxsw_sp_fdb_fini(struct mlxsw_sp *mlxsw_sp) { + struct notifier_block *nb; + cancel_delayed_work_sync(&mlxsw_sp->bridge->fdb_notify.dw); - unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier); + nb = &mlxsw_sp_switchdev_blocking_notifier; + unregister_switchdev_blocking_notifier(nb); + + unregister_switchdev_notifier(&mlxsw_sp_switchdev_notifier); } int mlxsw_sp_switchdev_init(struct mlxsw_sp *mlxsw_sp) diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index c84074fa4c95..0dca2fa51dc3 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -15,6 +15,7 @@ #include <linux/netdevice.h> #include <linux/phy.h> #include <linux/skbuff.h> +#include <linux/iopoll.h> #include <net/arp.h> #include <net/netevent.h> #include <net/rtnetlink.h> @@ -22,6 +23,9 @@ #include "ocelot.h" +#define TABLE_UPDATE_SLEEP_US 10 +#define TABLE_UPDATE_TIMEOUT_US 100000 + /* MAC table entry types. * ENTRYTYPE_NORMAL is subject to aging. * ENTRYTYPE_LOCKED is not subject to aging. @@ -41,23 +45,20 @@ struct ocelot_mact_entry { enum macaccess_entry_type type; }; -static inline int ocelot_mact_wait_for_completion(struct ocelot *ocelot) +static inline u32 ocelot_mact_read_macaccess(struct ocelot *ocelot) { - unsigned int val, timeout = 10; - - /* Wait for the issued mac table command to be completed, or timeout. - * When the command read from ANA_TABLES_MACACCESS is - * MACACCESS_CMD_IDLE, the issued command completed successfully. - */ - do { - val = ocelot_read(ocelot, ANA_TABLES_MACACCESS); - val &= ANA_TABLES_MACACCESS_MAC_TABLE_CMD_M; - } while (val != MACACCESS_CMD_IDLE && timeout--); + return ocelot_read(ocelot, ANA_TABLES_MACACCESS); +} - if (!timeout) - return -ETIMEDOUT; +static inline int ocelot_mact_wait_for_completion(struct ocelot *ocelot) +{ + u32 val; - return 0; + return readx_poll_timeout(ocelot_mact_read_macaccess, + ocelot, val, + (val & ANA_TABLES_MACACCESS_MAC_TABLE_CMD_M) == + MACACCESS_CMD_IDLE, + TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US); } static void ocelot_mact_select(struct ocelot *ocelot, @@ -129,23 +130,21 @@ static void ocelot_mact_init(struct ocelot *ocelot) ocelot_write(ocelot, MACACCESS_CMD_INIT, ANA_TABLES_MACACCESS); } -static inline int ocelot_vlant_wait_for_completion(struct ocelot *ocelot) +static inline u32 ocelot_vlant_read_vlanaccess(struct ocelot *ocelot) { - unsigned int val, timeout = 10; - - /* Wait for the issued vlan table command to be completed, or timeout. - * When the command read from ANA_TABLES_VLANACCESS is - * VLANACCESS_CMD_IDLE, the issued command completed successfully. - */ - do { - val = ocelot_read(ocelot, ANA_TABLES_VLANACCESS); - val &= ANA_TABLES_VLANACCESS_VLAN_TBL_CMD_M; - } while (val != ANA_TABLES_VLANACCESS_CMD_IDLE && timeout--); + return ocelot_read(ocelot, ANA_TABLES_VLANACCESS); +} - if (!timeout) - return -ETIMEDOUT; +static inline int ocelot_vlant_wait_for_completion(struct ocelot *ocelot) +{ + u32 val; - return 0; + return readx_poll_timeout(ocelot_vlant_read_vlanaccess, + ocelot, + val, + (val & ANA_TABLES_VLANACCESS_VLAN_TBL_CMD_M) == + ANA_TABLES_VLANACCESS_CMD_IDLE, + TABLE_UPDATE_SLEEP_US, TABLE_UPDATE_TIMEOUT_US); } static int ocelot_vlant_set_mask(struct ocelot *ocelot, u16 vid, u32 mask) @@ -1293,7 +1292,8 @@ static int ocelot_port_obj_del_mdb(struct net_device *dev, static int ocelot_port_obj_add(struct net_device *dev, const struct switchdev_obj *obj, - struct switchdev_trans *trans) + struct switchdev_trans *trans, + struct netlink_ext_ack *extack) { int ret = 0; @@ -1337,8 +1337,6 @@ static int ocelot_port_obj_del(struct net_device *dev, static const struct switchdev_ops ocelot_port_switchdev_ops = { .switchdev_port_attr_get = ocelot_port_attr_get, .switchdev_port_attr_set = ocelot_port_attr_set, - .switchdev_port_obj_add = ocelot_port_obj_add, - .switchdev_port_obj_del = ocelot_port_obj_del, }; static int ocelot_port_bridge_join(struct ocelot_port *ocelot_port, @@ -1595,6 +1593,34 @@ struct notifier_block ocelot_netdevice_nb __read_mostly = { }; EXPORT_SYMBOL(ocelot_netdevice_nb); +static int ocelot_switchdev_blocking_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + int err; + + switch (event) { + /* Blocking events. */ + case SWITCHDEV_PORT_OBJ_ADD: + err = switchdev_handle_port_obj_add(dev, ptr, + ocelot_netdevice_dev_check, + ocelot_port_obj_add); + return notifier_from_errno(err); + case SWITCHDEV_PORT_OBJ_DEL: + err = switchdev_handle_port_obj_del(dev, ptr, + ocelot_netdevice_dev_check, + ocelot_port_obj_del); + return notifier_from_errno(err); + } + + return NOTIFY_DONE; +} + +struct notifier_block ocelot_switchdev_blocking_nb __read_mostly = { + .notifier_call = ocelot_switchdev_blocking_event, +}; +EXPORT_SYMBOL(ocelot_switchdev_blocking_nb); + int ocelot_probe_port(struct ocelot *ocelot, u8 port, void __iomem *regs, struct phy_device *phy) diff --git a/drivers/net/ethernet/mscc/ocelot.h b/drivers/net/ethernet/mscc/ocelot.h index 62c7c8eb00d9..086775f7b52f 100644 --- a/drivers/net/ethernet/mscc/ocelot.h +++ b/drivers/net/ethernet/mscc/ocelot.h @@ -499,5 +499,6 @@ int ocelot_probe_port(struct ocelot *ocelot, u8 port, struct phy_device *phy); extern struct notifier_block ocelot_netdevice_nb; +extern struct notifier_block ocelot_switchdev_blocking_nb; #endif diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c index 4c23d18bbf44..ca3ea2fbfcd0 100644 --- a/drivers/net/ethernet/mscc/ocelot_board.c +++ b/drivers/net/ethernet/mscc/ocelot_board.c @@ -12,6 +12,7 @@ #include <linux/of_platform.h> #include <linux/mfd/syscon.h> #include <linux/skbuff.h> +#include <net/switchdev.h> #include "ocelot.h" @@ -328,6 +329,7 @@ static int mscc_ocelot_probe(struct platform_device *pdev) } register_netdevice_notifier(&ocelot_netdevice_nb); + register_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb); dev_info(&pdev->dev, "Ocelot switch probed\n"); @@ -342,6 +344,7 @@ static int mscc_ocelot_remove(struct platform_device *pdev) struct ocelot *ocelot = platform_get_drvdata(pdev); ocelot_deinit(ocelot); + unregister_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb); unregister_netdevice_notifier(&ocelot_netdevice_nb); return 0; diff --git a/drivers/net/ethernet/neterion/Kconfig b/drivers/net/ethernet/neterion/Kconfig index c26e0f70c494..7df20561e3fa 100644 --- a/drivers/net/ethernet/neterion/Kconfig +++ b/drivers/net/ethernet/neterion/Kconfig @@ -26,7 +26,7 @@ config S2IO on its age. More specific information on configuring the driver is in - <file:Documentation/networking/s2io.txt>. + <file:Documentation/networking/device_drivers/neterion/s2io.txt>. To compile this driver as a module, choose M here. The module will be called s2io. @@ -41,7 +41,7 @@ config VXGE labeled as either one, depending on its age. More specific information on configuring the driver is in - <file:Documentation/networking/vxge.txt>. + <file:Documentation/networking/device_drivers/neterion/vxge.txt>. To compile this driver as a module, choose M here. The module will be called vxge. diff --git a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c index f7a0d1d5885e..59e77e3086bb 100644 --- a/drivers/net/ethernet/neterion/vxge/vxge-traffic.c +++ b/drivers/net/ethernet/neterion/vxge/vxge-traffic.c @@ -1695,17 +1695,10 @@ exit: */ void vxge_hw_fifo_txdl_free(struct __vxge_hw_fifo *fifo, void *txdlh) { - struct __vxge_hw_fifo_txdl_priv *txdl_priv; - u32 max_frags; struct __vxge_hw_channel *channel; channel = &fifo->channel; - txdl_priv = __vxge_hw_fifo_txdl_priv(fifo, - (struct vxge_hw_fifo_txd *)txdlh); - - max_frags = fifo->config->max_frags; - vxge_hw_channel_dtr_free(channel, txdlh); } diff --git a/drivers/net/ethernet/netronome/nfp/Makefile b/drivers/net/ethernet/netronome/nfp/Makefile index 4afb10375397..47c708f08ade 100644 --- a/drivers/net/ethernet/netronome/nfp/Makefile +++ b/drivers/net/ethernet/netronome/nfp/Makefile @@ -56,7 +56,9 @@ endif ifeq ($(CONFIG_NFP_APP_ABM_NIC),y) nfp-objs += \ + abm/cls.o \ abm/ctrl.o \ + abm/qdisc.o \ abm/main.o endif diff --git a/drivers/net/ethernet/netronome/nfp/abm/cls.c b/drivers/net/ethernet/netronome/nfp/abm/cls.c new file mode 100644 index 000000000000..9852080cf454 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/abm/cls.c @@ -0,0 +1,283 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2018 Netronome Systems, Inc. */ + +#include <linux/bitfield.h> +#include <net/pkt_cls.h> + +#include "../nfpcore/nfp_cpp.h" +#include "../nfp_app.h" +#include "../nfp_net_repr.h" +#include "main.h" + +struct nfp_abm_u32_match { + u32 handle; + u32 band; + u8 mask; + u8 val; + struct list_head list; +}; + +static bool +nfp_abm_u32_check_knode(struct nfp_abm *abm, struct tc_cls_u32_knode *knode, + __be16 proto, struct netlink_ext_ack *extack) +{ + struct tc_u32_key *k; + unsigned int tos_off; + + if (knode->exts && tcf_exts_has_actions(knode->exts)) { + NL_SET_ERR_MSG_MOD(extack, "action offload not supported"); + return false; + } + if (knode->link_handle) { + NL_SET_ERR_MSG_MOD(extack, "linking not supported"); + return false; + } + if (knode->sel->flags != TC_U32_TERMINAL) { + NL_SET_ERR_MSG_MOD(extack, + "flags must be equal to TC_U32_TERMINAL"); + return false; + } + if (knode->sel->off || knode->sel->offshift || knode->sel->offmask || + knode->sel->offoff || knode->fshift) { + NL_SET_ERR_MSG_MOD(extack, "variable offseting not supported"); + return false; + } + if (knode->sel->hoff || knode->sel->hmask) { + NL_SET_ERR_MSG_MOD(extack, "hashing not supported"); + return false; + } + if (knode->val || knode->mask) { + NL_SET_ERR_MSG_MOD(extack, "matching on mark not supported"); + return false; + } + if (knode->res && knode->res->class) { + NL_SET_ERR_MSG_MOD(extack, "setting non-0 class not supported"); + return false; + } + if (knode->res && knode->res->classid >= abm->num_bands) { + NL_SET_ERR_MSG_MOD(extack, + "classid higher than number of bands"); + return false; + } + if (knode->sel->nkeys != 1) { + NL_SET_ERR_MSG_MOD(extack, "exactly one key required"); + return false; + } + + switch (proto) { + case htons(ETH_P_IP): + tos_off = 16; + break; + case htons(ETH_P_IPV6): + tos_off = 20; + break; + default: + NL_SET_ERR_MSG_MOD(extack, "only IP and IPv6 supported as filter protocol"); + return false; + } + + k = &knode->sel->keys[0]; + if (k->offmask) { + NL_SET_ERR_MSG_MOD(extack, "offset mask - variable offseting not supported"); + return false; + } + if (k->off) { + NL_SET_ERR_MSG_MOD(extack, "only DSCP fields can be matched"); + return false; + } + if (k->val & ~k->mask) { + NL_SET_ERR_MSG_MOD(extack, "mask does not cover the key"); + return false; + } + if (be32_to_cpu(k->mask) >> tos_off & ~abm->dscp_mask) { + NL_SET_ERR_MSG_MOD(extack, "only high DSCP class selector bits can be used"); + nfp_err(abm->app->cpp, + "u32 offload: requested mask %x FW can support only %x\n", + be32_to_cpu(k->mask) >> tos_off, abm->dscp_mask); + return false; + } + + return true; +} + +/* This filter list -> map conversion is O(n * m), we expect single digit or + * low double digit number of prios and likewise for the filters. Also u32 + * doesn't report stats, so it's really only setup time cost. + */ +static unsigned int +nfp_abm_find_band_for_prio(struct nfp_abm_link *alink, unsigned int prio) +{ + struct nfp_abm_u32_match *iter; + + list_for_each_entry(iter, &alink->dscp_map, list) + if ((prio & iter->mask) == iter->val) + return iter->band; + + return alink->def_band; +} + +static int nfp_abm_update_band_map(struct nfp_abm_link *alink) +{ + unsigned int i, bits_per_prio, prios_per_word, base_shift; + struct nfp_abm *abm = alink->abm; + u32 field_mask; + + alink->has_prio = !list_empty(&alink->dscp_map); + + bits_per_prio = roundup_pow_of_two(order_base_2(abm->num_bands)); + field_mask = (1 << bits_per_prio) - 1; + prios_per_word = sizeof(u32) * BITS_PER_BYTE / bits_per_prio; + + /* FW mask applies from top bits */ + base_shift = 8 - order_base_2(abm->num_prios); + + for (i = 0; i < abm->num_prios; i++) { + unsigned int offset; + u32 *word; + u8 band; + + word = &alink->prio_map[i / prios_per_word]; + offset = (i % prios_per_word) * bits_per_prio; + + band = nfp_abm_find_band_for_prio(alink, i << base_shift); + + *word &= ~(field_mask << offset); + *word |= band << offset; + } + + /* Qdisc offload status may change if has_prio changed */ + nfp_abm_qdisc_offload_update(alink); + + return nfp_abm_ctrl_prio_map_update(alink, alink->prio_map); +} + +static void +nfp_abm_u32_knode_delete(struct nfp_abm_link *alink, + struct tc_cls_u32_knode *knode) +{ + struct nfp_abm_u32_match *iter; + + list_for_each_entry(iter, &alink->dscp_map, list) + if (iter->handle == knode->handle) { + list_del(&iter->list); + kfree(iter); + nfp_abm_update_band_map(alink); + return; + } +} + +static int +nfp_abm_u32_knode_replace(struct nfp_abm_link *alink, + struct tc_cls_u32_knode *knode, + __be16 proto, struct netlink_ext_ack *extack) +{ + struct nfp_abm_u32_match *match = NULL, *iter; + unsigned int tos_off; + u8 mask, val; + int err; + + if (!nfp_abm_u32_check_knode(alink->abm, knode, proto, extack)) + goto err_delete; + + tos_off = proto == htons(ETH_P_IP) ? 16 : 20; + + /* Extract the DSCP Class Selector bits */ + val = be32_to_cpu(knode->sel->keys[0].val) >> tos_off & 0xff; + mask = be32_to_cpu(knode->sel->keys[0].mask) >> tos_off & 0xff; + + /* Check if there is no conflicting mapping and find match by handle */ + list_for_each_entry(iter, &alink->dscp_map, list) { + u32 cmask; + + if (iter->handle == knode->handle) { + match = iter; + continue; + } + + cmask = iter->mask & mask; + if ((iter->val & cmask) == (val & cmask) && + iter->band != knode->res->classid) { + NL_SET_ERR_MSG_MOD(extack, "conflict with already offloaded filter"); + goto err_delete; + } + } + + if (!match) { + match = kzalloc(sizeof(*match), GFP_KERNEL); + if (!match) + return -ENOMEM; + list_add(&match->list, &alink->dscp_map); + } + match->handle = knode->handle; + match->band = knode->res->classid; + match->mask = mask; + match->val = val; + + err = nfp_abm_update_band_map(alink); + if (err) + goto err_delete; + + return 0; + +err_delete: + nfp_abm_u32_knode_delete(alink, knode); + return -EOPNOTSUPP; +} + +static int nfp_abm_setup_tc_block_cb(enum tc_setup_type type, + void *type_data, void *cb_priv) +{ + struct tc_cls_u32_offload *cls_u32 = type_data; + struct nfp_repr *repr = cb_priv; + struct nfp_abm_link *alink; + + alink = repr->app_priv; + + if (type != TC_SETUP_CLSU32) { + NL_SET_ERR_MSG_MOD(cls_u32->common.extack, + "only offload of u32 classifier supported"); + return -EOPNOTSUPP; + } + if (!tc_cls_can_offload_and_chain0(repr->netdev, &cls_u32->common)) + return -EOPNOTSUPP; + + if (cls_u32->common.protocol != htons(ETH_P_IP) && + cls_u32->common.protocol != htons(ETH_P_IPV6)) { + NL_SET_ERR_MSG_MOD(cls_u32->common.extack, + "only IP and IPv6 supported as filter protocol"); + return -EOPNOTSUPP; + } + + switch (cls_u32->command) { + case TC_CLSU32_NEW_KNODE: + case TC_CLSU32_REPLACE_KNODE: + return nfp_abm_u32_knode_replace(alink, &cls_u32->knode, + cls_u32->common.protocol, + cls_u32->common.extack); + case TC_CLSU32_DELETE_KNODE: + nfp_abm_u32_knode_delete(alink, &cls_u32->knode); + return 0; + default: + return -EOPNOTSUPP; + } +} + +int nfp_abm_setup_cls_block(struct net_device *netdev, struct nfp_repr *repr, + struct tc_block_offload *f) +{ + if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_EGRESS) + return -EOPNOTSUPP; + + switch (f->command) { + case TC_BLOCK_BIND: + return tcf_block_cb_register(f->block, + nfp_abm_setup_tc_block_cb, + repr, repr, f->extack); + case TC_BLOCK_UNBIND: + tcf_block_cb_unregister(f->block, nfp_abm_setup_tc_block_cb, + repr); + return 0; + default: + return -EOPNOTSUPP; + } +} diff --git a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c index 3c661f422688..9584f03f3efa 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/ctrl.c +++ b/drivers/net/ethernet/netronome/nfp/abm/ctrl.c @@ -1,7 +1,9 @@ // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) /* Copyright (C) 2018 Netronome Systems, Inc. */ +#include <linux/bitops.h> #include <linux/kernel.h> +#include <linux/log2.h> #include "../nfpcore/nfp_cpp.h" #include "../nfpcore/nfp_nffw.h" @@ -11,38 +13,58 @@ #include "../nfp_net.h" #include "main.h" -#define NFP_QLVL_SYM_NAME "_abi_nfd_out_q_lvls_%u" +#define NFP_NUM_PRIOS_SYM_NAME "_abi_pci_dscp_num_prio_%u" +#define NFP_NUM_BANDS_SYM_NAME "_abi_pci_dscp_num_band_%u" +#define NFP_ACT_MASK_SYM_NAME "_abi_nfd_out_q_actions_%u" + +#define NFP_RED_SUPPORT_SYM_NAME "_abi_nfd_out_red_offload_%u" + +#define NFP_QLVL_SYM_NAME "_abi_nfd_out_q_lvls_%u%s" #define NFP_QLVL_STRIDE 16 #define NFP_QLVL_BLOG_BYTES 0 #define NFP_QLVL_BLOG_PKTS 4 #define NFP_QLVL_THRS 8 +#define NFP_QLVL_ACT 12 -#define NFP_QMSTAT_SYM_NAME "_abi_nfdqm%u_stats" +#define NFP_QMSTAT_SYM_NAME "_abi_nfdqm%u_stats%s" #define NFP_QMSTAT_STRIDE 32 #define NFP_QMSTAT_NON_STO 0 #define NFP_QMSTAT_STO 8 #define NFP_QMSTAT_DROP 16 #define NFP_QMSTAT_ECN 24 +#define NFP_Q_STAT_SYM_NAME "_abi_nfd_rxq_stats%u%s" +#define NFP_Q_STAT_STRIDE 16 +#define NFP_Q_STAT_PKTS 0 +#define NFP_Q_STAT_BYTES 8 + +#define NFP_NET_ABM_MBOX_CMD NFP_NET_CFG_MBOX_SIMPLE_CMD +#define NFP_NET_ABM_MBOX_RET NFP_NET_CFG_MBOX_SIMPLE_RET +#define NFP_NET_ABM_MBOX_DATALEN NFP_NET_CFG_MBOX_SIMPLE_VAL +#define NFP_NET_ABM_MBOX_RESERVED (NFP_NET_CFG_MBOX_SIMPLE_VAL + 4) +#define NFP_NET_ABM_MBOX_DATA (NFP_NET_CFG_MBOX_SIMPLE_VAL + 8) + static int nfp_abm_ctrl_stat(struct nfp_abm_link *alink, const struct nfp_rtsym *sym, - unsigned int stride, unsigned int offset, unsigned int i, - bool is_u64, u64 *res) + unsigned int stride, unsigned int offset, unsigned int band, + unsigned int queue, bool is_u64, u64 *res) { struct nfp_cpp *cpp = alink->abm->app->cpp; u64 val, sym_offset; + unsigned int qid; u32 val32; int err; - sym_offset = (alink->queue_base + i) * stride + offset; + qid = band * NFP_NET_MAX_RX_RINGS + alink->queue_base + queue; + + sym_offset = qid * stride + offset; if (is_u64) err = __nfp_rtsym_readq(cpp, sym, 3, 0, sym_offset, &val); else err = __nfp_rtsym_readl(cpp, sym, 3, 0, sym_offset, &val32); if (err) { - nfp_err(cpp, - "RED offload reading stat failed on vNIC %d queue %d\n", - alink->id, i); + nfp_err(cpp, "RED offload reading stat failed on vNIC %d band %d queue %d (+ %d)\n", + alink->id, band, queue, alink->queue_base); return err; } @@ -50,175 +72,179 @@ nfp_abm_ctrl_stat(struct nfp_abm_link *alink, const struct nfp_rtsym *sym, return 0; } -static int -nfp_abm_ctrl_stat_all(struct nfp_abm_link *alink, const struct nfp_rtsym *sym, - unsigned int stride, unsigned int offset, bool is_u64, - u64 *res) +int __nfp_abm_ctrl_set_q_lvl(struct nfp_abm *abm, unsigned int id, u32 val) { - u64 val, sum = 0; - unsigned int i; + struct nfp_cpp *cpp = abm->app->cpp; + u64 sym_offset; int err; - for (i = 0; i < alink->vnic->max_rx_rings; i++) { - err = nfp_abm_ctrl_stat(alink, sym, stride, offset, i, - is_u64, &val); - if (err) - return err; - sum += val; + __clear_bit(id, abm->threshold_undef); + if (abm->thresholds[id] == val) + return 0; + + sym_offset = id * NFP_QLVL_STRIDE + NFP_QLVL_THRS; + err = __nfp_rtsym_writel(cpp, abm->q_lvls, 4, 0, sym_offset, val); + if (err) { + nfp_err(cpp, + "RED offload setting level failed on subqueue %d\n", + id); + return err; } - *res = sum; + abm->thresholds[id] = val; return 0; } -int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int i, u32 val) +int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int band, + unsigned int queue, u32 val) { - struct nfp_cpp *cpp = alink->abm->app->cpp; + unsigned int threshold; + + threshold = band * NFP_NET_MAX_RX_RINGS + alink->queue_base + queue; + + return __nfp_abm_ctrl_set_q_lvl(alink->abm, threshold, val); +} + +int __nfp_abm_ctrl_set_q_act(struct nfp_abm *abm, unsigned int id, + enum nfp_abm_q_action act) +{ + struct nfp_cpp *cpp = abm->app->cpp; u64 sym_offset; int err; - sym_offset = (alink->queue_base + i) * NFP_QLVL_STRIDE + NFP_QLVL_THRS; - err = __nfp_rtsym_writel(cpp, alink->abm->q_lvls, 4, 0, - sym_offset, val); + if (abm->actions[id] == act) + return 0; + + sym_offset = id * NFP_QLVL_STRIDE + NFP_QLVL_ACT; + err = __nfp_rtsym_writel(cpp, abm->q_lvls, 4, 0, sym_offset, act); if (err) { - nfp_err(cpp, "RED offload setting level failed on vNIC %d queue %d\n", - alink->id, i); + nfp_err(cpp, + "RED offload setting action failed on subqueue %d\n", + id); return err; } + abm->actions[id] = act; return 0; } -int nfp_abm_ctrl_set_all_q_lvls(struct nfp_abm_link *alink, u32 val) +int nfp_abm_ctrl_set_q_act(struct nfp_abm_link *alink, unsigned int band, + unsigned int queue, enum nfp_abm_q_action act) +{ + unsigned int qid; + + qid = band * NFP_NET_MAX_RX_RINGS + alink->queue_base + queue; + + return __nfp_abm_ctrl_set_q_act(alink->abm, qid, act); +} + +u64 nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link *alink, unsigned int queue) { - int i, err; + unsigned int band; + u64 val, sum = 0; - for (i = 0; i < alink->vnic->max_rx_rings; i++) { - err = nfp_abm_ctrl_set_q_lvl(alink, i, val); - if (err) - return err; + for (band = 0; band < alink->abm->num_bands; band++) { + if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, + NFP_QMSTAT_STRIDE, NFP_QMSTAT_NON_STO, + band, queue, true, &val)) + return 0; + sum += val; } - return 0; + return sum; } -u64 nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link *alink, unsigned int i) +u64 nfp_abm_ctrl_stat_sto(struct nfp_abm_link *alink, unsigned int queue) { - u64 val; + unsigned int band; + u64 val, sum = 0; - if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, NFP_QMSTAT_STRIDE, - NFP_QMSTAT_NON_STO, i, true, &val)) - return 0; - return val; + for (band = 0; band < alink->abm->num_bands; band++) { + if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, + NFP_QMSTAT_STRIDE, NFP_QMSTAT_STO, + band, queue, true, &val)) + return 0; + sum += val; + } + + return sum; } -u64 nfp_abm_ctrl_stat_sto(struct nfp_abm_link *alink, unsigned int i) +static int +nfp_abm_ctrl_stat_basic(struct nfp_abm_link *alink, unsigned int band, + unsigned int queue, unsigned int off, u64 *val) { - u64 val; + if (!nfp_abm_has_prio(alink->abm)) { + if (!band) { + unsigned int id = alink->queue_base + queue; + + *val = nn_readq(alink->vnic, + NFP_NET_CFG_RXR_STATS(id) + off); + } else { + *val = 0; + } - if (nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, NFP_QMSTAT_STRIDE, - NFP_QMSTAT_STO, i, true, &val)) return 0; - return val; + } else { + return nfp_abm_ctrl_stat(alink, alink->abm->q_stats, + NFP_Q_STAT_STRIDE, off, band, queue, + true, val); + } } -int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink, unsigned int i, - struct nfp_alink_stats *stats) +int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink, unsigned int band, + unsigned int queue, struct nfp_alink_stats *stats) { int err; - stats->tx_pkts = nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i)); - stats->tx_bytes = nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i) + 8); + err = nfp_abm_ctrl_stat_basic(alink, band, queue, NFP_Q_STAT_PKTS, + &stats->tx_pkts); + if (err) + return err; - err = nfp_abm_ctrl_stat(alink, alink->abm->q_lvls, - NFP_QLVL_STRIDE, NFP_QLVL_BLOG_BYTES, - i, false, &stats->backlog_bytes); + err = nfp_abm_ctrl_stat_basic(alink, band, queue, NFP_Q_STAT_BYTES, + &stats->tx_bytes); + if (err) + return err; + + err = nfp_abm_ctrl_stat(alink, alink->abm->q_lvls, NFP_QLVL_STRIDE, + NFP_QLVL_BLOG_BYTES, band, queue, false, + &stats->backlog_bytes); if (err) return err; err = nfp_abm_ctrl_stat(alink, alink->abm->q_lvls, NFP_QLVL_STRIDE, NFP_QLVL_BLOG_PKTS, - i, false, &stats->backlog_pkts); + band, queue, false, &stats->backlog_pkts); if (err) return err; err = nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP, - i, true, &stats->drops); + band, queue, true, &stats->drops); if (err) return err; return nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN, - i, true, &stats->overlimits); + band, queue, true, &stats->overlimits); } -int nfp_abm_ctrl_read_stats(struct nfp_abm_link *alink, - struct nfp_alink_stats *stats) -{ - u64 pkts = 0, bytes = 0; - int i, err; - - for (i = 0; i < alink->vnic->max_rx_rings; i++) { - pkts += nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i)); - bytes += nn_readq(alink->vnic, NFP_NET_CFG_RXR_STATS(i) + 8); - } - stats->tx_pkts = pkts; - stats->tx_bytes = bytes; - - err = nfp_abm_ctrl_stat_all(alink, alink->abm->q_lvls, - NFP_QLVL_STRIDE, NFP_QLVL_BLOG_BYTES, - false, &stats->backlog_bytes); - if (err) - return err; - - err = nfp_abm_ctrl_stat_all(alink, alink->abm->q_lvls, - NFP_QLVL_STRIDE, NFP_QLVL_BLOG_PKTS, - false, &stats->backlog_pkts); - if (err) - return err; - - err = nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats, - NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP, - true, &stats->drops); - if (err) - return err; - - return nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats, - NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN, - true, &stats->overlimits); -} - -int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink, unsigned int i, +int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink, + unsigned int band, unsigned int queue, struct nfp_alink_xstats *xstats) { int err; err = nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP, - i, true, &xstats->pdrop); + band, queue, true, &xstats->pdrop); if (err) return err; return nfp_abm_ctrl_stat(alink, alink->abm->qm_stats, NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN, - i, true, &xstats->ecn_marked); -} - -int nfp_abm_ctrl_read_xstats(struct nfp_abm_link *alink, - struct nfp_alink_xstats *xstats) -{ - int err; - - err = nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats, - NFP_QMSTAT_STRIDE, NFP_QMSTAT_DROP, - true, &xstats->pdrop); - if (err) - return err; - - return nfp_abm_ctrl_stat_all(alink, alink->abm->qm_stats, - NFP_QMSTAT_STRIDE, NFP_QMSTAT_ECN, - true, &xstats->ecn_marked); + band, queue, true, &xstats->ecn_marked); } int nfp_abm_ctrl_qm_enable(struct nfp_abm *abm) @@ -233,10 +259,64 @@ int nfp_abm_ctrl_qm_disable(struct nfp_abm *abm) NULL, 0, NULL, 0); } -void nfp_abm_ctrl_read_params(struct nfp_abm_link *alink) +int nfp_abm_ctrl_prio_map_update(struct nfp_abm_link *alink, u32 *packed) +{ + struct nfp_net *nn = alink->vnic; + unsigned int i; + int err; + + /* Write data_len and wipe reserved */ + nn_writeq(nn, nn->tlv_caps.mbox_off + NFP_NET_ABM_MBOX_DATALEN, + alink->abm->prio_map_len); + + for (i = 0; i < alink->abm->prio_map_len; i += sizeof(u32)) + nn_writel(nn, nn->tlv_caps.mbox_off + NFP_NET_ABM_MBOX_DATA + i, + packed[i / sizeof(u32)]); + + err = nfp_net_reconfig_mbox(nn, + NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET); + if (err) + nfp_err(alink->abm->app->cpp, + "setting DSCP -> VQ map failed with error %d\n", err); + return err; +} + +static int nfp_abm_ctrl_prio_check_params(struct nfp_abm_link *alink) +{ + struct nfp_abm *abm = alink->abm; + struct nfp_net *nn = alink->vnic; + unsigned int min_mbox_sz; + + if (!nfp_abm_has_prio(alink->abm)) + return 0; + + min_mbox_sz = NFP_NET_ABM_MBOX_DATA + alink->abm->prio_map_len; + if (nn->tlv_caps.mbox_len < min_mbox_sz) { + nfp_err(abm->app->pf->cpp, "vNIC mailbox too small for prio offload: %u, need: %u\n", + nn->tlv_caps.mbox_len, min_mbox_sz); + return -EINVAL; + } + + return 0; +} + +int nfp_abm_ctrl_read_params(struct nfp_abm_link *alink) { alink->queue_base = nn_readl(alink->vnic, NFP_NET_CFG_START_RXQ); alink->queue_base /= alink->vnic->stride_rx; + + return nfp_abm_ctrl_prio_check_params(alink); +} + +static unsigned int nfp_abm_ctrl_prio_map_size(struct nfp_abm *abm) +{ + unsigned int size; + + size = roundup_pow_of_two(order_base_2(abm->num_bands)); + size = DIV_ROUND_UP(size * abm->num_prios, BITS_PER_BYTE); + size = round_up(size, sizeof(u32)); + + return size; } static const struct nfp_rtsym * @@ -260,33 +340,86 @@ nfp_abm_ctrl_find_rtsym(struct nfp_pf *pf, const char *name, unsigned int size) } static const struct nfp_rtsym * -nfp_abm_ctrl_find_q_rtsym(struct nfp_pf *pf, const char *name, - unsigned int size) +nfp_abm_ctrl_find_q_rtsym(struct nfp_abm *abm, const char *name_fmt, + size_t size) { - return nfp_abm_ctrl_find_rtsym(pf, name, size * NFP_NET_MAX_RX_RINGS); + char pf_symbol[64]; + + size = array3_size(size, abm->num_bands, NFP_NET_MAX_RX_RINGS); + snprintf(pf_symbol, sizeof(pf_symbol), name_fmt, + abm->pf_id, nfp_abm_has_prio(abm) ? "_per_band" : ""); + + return nfp_abm_ctrl_find_rtsym(abm->app->pf, pf_symbol, size); } int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm) { struct nfp_pf *pf = abm->app->pf; const struct nfp_rtsym *sym; - unsigned int pf_id; - char pf_symbol[64]; + int res; + + abm->pf_id = nfp_cppcore_pcie_unit(pf->cpp); + + /* Check if Qdisc offloads are supported */ + res = nfp_pf_rtsym_read_optional(pf, NFP_RED_SUPPORT_SYM_NAME, 1); + if (res < 0) + return res; + abm->red_support = res; + + /* Read count of prios and prio bands */ + res = nfp_pf_rtsym_read_optional(pf, NFP_NUM_BANDS_SYM_NAME, 1); + if (res < 0) + return res; + abm->num_bands = res; + + res = nfp_pf_rtsym_read_optional(pf, NFP_NUM_PRIOS_SYM_NAME, 1); + if (res < 0) + return res; + abm->num_prios = res; + + /* Read available actions */ + res = nfp_pf_rtsym_read_optional(pf, NFP_ACT_MASK_SYM_NAME, + BIT(NFP_ABM_ACT_MARK_DROP)); + if (res < 0) + return res; + abm->action_mask = res; + + abm->prio_map_len = nfp_abm_ctrl_prio_map_size(abm); + abm->dscp_mask = GENMASK(7, 8 - order_base_2(abm->num_prios)); + + /* Check values are sane, U16_MAX is arbitrarily chosen as max */ + if (!is_power_of_2(abm->num_bands) || !is_power_of_2(abm->num_prios) || + abm->num_bands > U16_MAX || abm->num_prios > U16_MAX || + (abm->num_bands == 1) != (abm->num_prios == 1)) { + nfp_err(pf->cpp, + "invalid priomap description num bands: %u and num prios: %u\n", + abm->num_bands, abm->num_prios); + return -EINVAL; + } - pf_id = nfp_cppcore_pcie_unit(pf->cpp); - abm->pf_id = pf_id; + /* Find level and stat symbols */ + if (!abm->red_support) + return 0; - snprintf(pf_symbol, sizeof(pf_symbol), NFP_QLVL_SYM_NAME, pf_id); - sym = nfp_abm_ctrl_find_q_rtsym(pf, pf_symbol, NFP_QLVL_STRIDE); + sym = nfp_abm_ctrl_find_q_rtsym(abm, NFP_QLVL_SYM_NAME, + NFP_QLVL_STRIDE); if (IS_ERR(sym)) return PTR_ERR(sym); abm->q_lvls = sym; - snprintf(pf_symbol, sizeof(pf_symbol), NFP_QMSTAT_SYM_NAME, pf_id); - sym = nfp_abm_ctrl_find_q_rtsym(pf, pf_symbol, NFP_QMSTAT_STRIDE); + sym = nfp_abm_ctrl_find_q_rtsym(abm, NFP_QMSTAT_SYM_NAME, + NFP_QMSTAT_STRIDE); if (IS_ERR(sym)) return PTR_ERR(sym); abm->qm_stats = sym; + if (nfp_abm_has_prio(abm)) { + sym = nfp_abm_ctrl_find_q_rtsym(abm, NFP_Q_STAT_SYM_NAME, + NFP_Q_STAT_STRIDE); + if (IS_ERR(sym)) + return PTR_ERR(sym); + abm->q_stats = sym; + } + return 0; } diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.c b/drivers/net/ethernet/netronome/nfp/abm/main.c index c0830c0c2c3f..4d4ff5844c47 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.c +++ b/drivers/net/ethernet/netronome/nfp/abm/main.c @@ -2,14 +2,13 @@ /* Copyright (C) 2018 Netronome Systems, Inc. */ #include <linux/bitfield.h> +#include <linux/bitmap.h> #include <linux/etherdevice.h> #include <linux/lockdep.h> #include <linux/netdevice.h> #include <linux/rcupdate.h> +#include <linux/rtnetlink.h> #include <linux/slab.h> -#include <net/pkt_cls.h> -#include <net/pkt_sched.h> -#include <net/red.h> #include "../nfpcore/nfp.h" #include "../nfpcore/nfp_cpp.h" @@ -28,269 +27,6 @@ static u32 nfp_abm_portid(enum nfp_repr_type rtype, unsigned int id) } static int -__nfp_abm_reset_root(struct net_device *netdev, struct nfp_abm_link *alink, - u32 handle, unsigned int qs, u32 init_val) -{ - struct nfp_port *port = nfp_port_from_netdev(netdev); - int ret; - - ret = nfp_abm_ctrl_set_all_q_lvls(alink, init_val); - memset(alink->qdiscs, 0, sizeof(*alink->qdiscs) * alink->num_qdiscs); - - alink->parent = handle; - alink->num_qdiscs = qs; - port->tc_offload_cnt = qs; - - return ret; -} - -static void -nfp_abm_reset_root(struct net_device *netdev, struct nfp_abm_link *alink, - u32 handle, unsigned int qs) -{ - __nfp_abm_reset_root(netdev, alink, handle, qs, ~0); -} - -static int -nfp_abm_red_find(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt) -{ - unsigned int i = TC_H_MIN(opt->parent) - 1; - - if (opt->parent == TC_H_ROOT) - i = 0; - else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent)) - i = TC_H_MIN(opt->parent) - 1; - else - return -EOPNOTSUPP; - - if (i >= alink->num_qdiscs || opt->handle != alink->qdiscs[i].handle) - return -EOPNOTSUPP; - - return i; -} - -static void -nfp_abm_red_destroy(struct net_device *netdev, struct nfp_abm_link *alink, - u32 handle) -{ - unsigned int i; - - for (i = 0; i < alink->num_qdiscs; i++) - if (handle == alink->qdiscs[i].handle) - break; - if (i == alink->num_qdiscs) - return; - - if (alink->parent == TC_H_ROOT) { - nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0); - } else { - nfp_abm_ctrl_set_q_lvl(alink, i, ~0); - memset(&alink->qdiscs[i], 0, sizeof(*alink->qdiscs)); - } -} - -static int -nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink, - struct tc_red_qopt_offload *opt) -{ - bool existing; - int i, err; - - i = nfp_abm_red_find(alink, opt); - existing = i >= 0; - - if (opt->set.min != opt->set.max || !opt->set.is_ecn) { - nfp_warn(alink->abm->app->cpp, - "RED offload failed - unsupported parameters\n"); - err = -EINVAL; - goto err_destroy; - } - - if (existing) { - if (alink->parent == TC_H_ROOT) - err = nfp_abm_ctrl_set_all_q_lvls(alink, opt->set.min); - else - err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min); - if (err) - goto err_destroy; - return 0; - } - - if (opt->parent == TC_H_ROOT) { - i = 0; - err = __nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 1, - opt->set.min); - } else if (TC_H_MAJ(alink->parent) == TC_H_MAJ(opt->parent)) { - i = TC_H_MIN(opt->parent) - 1; - err = nfp_abm_ctrl_set_q_lvl(alink, i, opt->set.min); - } else { - return -EINVAL; - } - /* Set the handle to try full clean up, in case IO failed */ - alink->qdiscs[i].handle = opt->handle; - if (err) - goto err_destroy; - - if (opt->parent == TC_H_ROOT) - err = nfp_abm_ctrl_read_stats(alink, &alink->qdiscs[i].stats); - else - err = nfp_abm_ctrl_read_q_stats(alink, i, - &alink->qdiscs[i].stats); - if (err) - goto err_destroy; - - if (opt->parent == TC_H_ROOT) - err = nfp_abm_ctrl_read_xstats(alink, - &alink->qdiscs[i].xstats); - else - err = nfp_abm_ctrl_read_q_xstats(alink, i, - &alink->qdiscs[i].xstats); - if (err) - goto err_destroy; - - alink->qdiscs[i].stats.backlog_pkts = 0; - alink->qdiscs[i].stats.backlog_bytes = 0; - - return 0; -err_destroy: - /* If the qdisc keeps on living, but we can't offload undo changes */ - if (existing) { - opt->set.qstats->qlen -= alink->qdiscs[i].stats.backlog_pkts; - opt->set.qstats->backlog -= - alink->qdiscs[i].stats.backlog_bytes; - } - nfp_abm_red_destroy(netdev, alink, opt->handle); - - return err; -} - -static void -nfp_abm_update_stats(struct nfp_alink_stats *new, struct nfp_alink_stats *old, - struct tc_qopt_offload_stats *stats) -{ - _bstats_update(stats->bstats, new->tx_bytes - old->tx_bytes, - new->tx_pkts - old->tx_pkts); - stats->qstats->qlen += new->backlog_pkts - old->backlog_pkts; - stats->qstats->backlog += new->backlog_bytes - old->backlog_bytes; - stats->qstats->overlimits += new->overlimits - old->overlimits; - stats->qstats->drops += new->drops - old->drops; -} - -static int -nfp_abm_red_stats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt) -{ - struct nfp_alink_stats *prev_stats; - struct nfp_alink_stats stats; - int i, err; - - i = nfp_abm_red_find(alink, opt); - if (i < 0) - return i; - prev_stats = &alink->qdiscs[i].stats; - - if (alink->parent == TC_H_ROOT) - err = nfp_abm_ctrl_read_stats(alink, &stats); - else - err = nfp_abm_ctrl_read_q_stats(alink, i, &stats); - if (err) - return err; - - nfp_abm_update_stats(&stats, prev_stats, &opt->stats); - - *prev_stats = stats; - - return 0; -} - -static int -nfp_abm_red_xstats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt) -{ - struct nfp_alink_xstats *prev_xstats; - struct nfp_alink_xstats xstats; - int i, err; - - i = nfp_abm_red_find(alink, opt); - if (i < 0) - return i; - prev_xstats = &alink->qdiscs[i].xstats; - - if (alink->parent == TC_H_ROOT) - err = nfp_abm_ctrl_read_xstats(alink, &xstats); - else - err = nfp_abm_ctrl_read_q_xstats(alink, i, &xstats); - if (err) - return err; - - opt->xstats->forced_mark += xstats.ecn_marked - prev_xstats->ecn_marked; - opt->xstats->pdrop += xstats.pdrop - prev_xstats->pdrop; - - *prev_xstats = xstats; - - return 0; -} - -static int -nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink, - struct tc_red_qopt_offload *opt) -{ - switch (opt->command) { - case TC_RED_REPLACE: - return nfp_abm_red_replace(netdev, alink, opt); - case TC_RED_DESTROY: - nfp_abm_red_destroy(netdev, alink, opt->handle); - return 0; - case TC_RED_STATS: - return nfp_abm_red_stats(alink, opt); - case TC_RED_XSTATS: - return nfp_abm_red_xstats(alink, opt); - default: - return -EOPNOTSUPP; - } -} - -static int -nfp_abm_mq_stats(struct nfp_abm_link *alink, struct tc_mq_qopt_offload *opt) -{ - struct nfp_alink_stats stats; - unsigned int i; - int err; - - for (i = 0; i < alink->num_qdiscs; i++) { - if (alink->qdiscs[i].handle == TC_H_UNSPEC) - continue; - - err = nfp_abm_ctrl_read_q_stats(alink, i, &stats); - if (err) - return err; - - nfp_abm_update_stats(&stats, &alink->qdiscs[i].stats, - &opt->stats); - } - - return 0; -} - -static int -nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink, - struct tc_mq_qopt_offload *opt) -{ - switch (opt->command) { - case TC_MQ_CREATE: - nfp_abm_reset_root(netdev, alink, opt->handle, - alink->total_queues); - return 0; - case TC_MQ_DESTROY: - if (opt->handle == alink->parent) - nfp_abm_reset_root(netdev, alink, TC_H_ROOT, 0); - return 0; - case TC_MQ_STATS: - return nfp_abm_mq_stats(alink, opt); - default: - return -EOPNOTSUPP; - } -} - -static int nfp_abm_setup_tc(struct nfp_app *app, struct net_device *netdev, enum tc_setup_type type, void *type_data) { @@ -302,10 +38,16 @@ nfp_abm_setup_tc(struct nfp_app *app, struct net_device *netdev, return -EOPNOTSUPP; switch (type) { + case TC_SETUP_ROOT_QDISC: + return nfp_abm_setup_root(netdev, repr->app_priv, type_data); case TC_SETUP_QDISC_MQ: return nfp_abm_setup_tc_mq(netdev, repr->app_priv, type_data); case TC_SETUP_QDISC_RED: return nfp_abm_setup_tc_red(netdev, repr->app_priv, type_data); + case TC_SETUP_QDISC_GRED: + return nfp_abm_setup_tc_gred(netdev, repr->app_priv, type_data); + case TC_SETUP_BLOCK: + return nfp_abm_setup_cls_block(netdev, repr, type_data); default: return -EOPNOTSUPP; } @@ -384,7 +126,9 @@ nfp_abm_spawn_repr(struct nfp_app *app, struct nfp_abm_link *alink, reprs = nfp_reprs_get_locked(app, rtype); WARN(nfp_repr_get_locked(app, reprs, alink->id), "duplicate repr"); + rtnl_lock(); rcu_assign_pointer(reprs->reprs[alink->id], netdev); + rtnl_unlock(); nfp_info(app->cpp, "%s Port %d Representor(%s) created\n", ptype == NFP_PORT_PF_PORT ? "PCIe" : "Phys", @@ -410,7 +154,9 @@ nfp_abm_kill_repr(struct nfp_app *app, struct nfp_abm_link *alink, netdev = nfp_repr_get_locked(app, reprs, alink->id); if (!netdev) return; + rtnl_lock(); rcu_assign_pointer(reprs->reprs[alink->id], NULL); + rtnl_unlock(); synchronize_rcu(); /* Cast to make sure nfp_repr_clean_and_free() takes a nfp_repr */ nfp_repr_clean_and_free((struct nfp_repr *)netdev_priv(netdev)); @@ -461,6 +207,9 @@ static int nfp_abm_eswitch_set_switchdev(struct nfp_abm *abm) struct nfp_net *nn; int err; + if (!abm->red_support) + return -EOPNOTSUPP; + err = nfp_abm_ctrl_qm_enable(abm); if (err) return err; @@ -573,31 +322,34 @@ nfp_abm_vnic_alloc(struct nfp_app *app, struct nfp_net *nn, unsigned int id) alink->abm = abm; alink->vnic = nn; alink->id = id; - alink->parent = TC_H_ROOT; alink->total_queues = alink->vnic->max_rx_rings; - alink->qdiscs = kvcalloc(alink->total_queues, sizeof(*alink->qdiscs), - GFP_KERNEL); - if (!alink->qdiscs) { - err = -ENOMEM; + + INIT_LIST_HEAD(&alink->dscp_map); + + err = nfp_abm_ctrl_read_params(alink); + if (err) + goto err_free_alink; + + alink->prio_map = kzalloc(abm->prio_map_len, GFP_KERNEL); + if (!alink->prio_map) goto err_free_alink; - } /* This is a multi-host app, make sure MAC/PHY is up, but don't * make the MAC/PHY state follow the state of any of the ports. */ err = nfp_eth_set_configured(app->cpp, eth_port->index, true); if (err < 0) - goto err_free_qdiscs; + goto err_free_priomap; netif_keep_dst(nn->dp.netdev); nfp_abm_vnic_set_mac(app->pf, abm, nn, id); - nfp_abm_ctrl_read_params(alink); + INIT_RADIX_TREE(&alink->qdiscs, GFP_KERNEL); return 0; -err_free_qdiscs: - kvfree(alink->qdiscs); +err_free_priomap: + kfree(alink->prio_map); err_free_alink: kfree(alink); return err; @@ -608,10 +360,20 @@ static void nfp_abm_vnic_free(struct nfp_app *app, struct nfp_net *nn) struct nfp_abm_link *alink = nn->app_priv; nfp_abm_kill_reprs(alink->abm, alink); - kvfree(alink->qdiscs); + WARN(!radix_tree_empty(&alink->qdiscs), "left over qdiscs\n"); + kfree(alink->prio_map); kfree(alink); } +static int nfp_abm_vnic_init(struct nfp_app *app, struct nfp_net *nn) +{ + struct nfp_abm_link *alink = nn->app_priv; + + if (nfp_abm_has_prio(alink->abm)) + return nfp_abm_ctrl_prio_map_update(alink, alink->prio_map); + return 0; +} + static u64 * nfp_abm_port_get_stats(struct nfp_app *app, struct nfp_port *port, u64 *data) { @@ -659,6 +421,21 @@ nfp_abm_port_get_stats_strings(struct nfp_app *app, struct nfp_port *port, return data; } +static int nfp_abm_fw_init_reset(struct nfp_abm *abm) +{ + unsigned int i; + + if (!abm->red_support) + return 0; + + for (i = 0; i < abm->num_bands * NFP_NET_MAX_RX_RINGS; i++) { + __nfp_abm_ctrl_set_q_lvl(abm, i, NFP_ABM_LVL_INFINITY); + __nfp_abm_ctrl_set_q_act(abm, i, NFP_ABM_ACT_DROP); + } + + return nfp_abm_ctrl_qm_disable(abm); +} + static int nfp_abm_init(struct nfp_app *app) { struct nfp_pf *pf = app->pf; @@ -690,15 +467,31 @@ static int nfp_abm_init(struct nfp_app *app) if (err) goto err_free_abm; + err = -ENOMEM; + abm->num_thresholds = array_size(abm->num_bands, NFP_NET_MAX_RX_RINGS); + abm->threshold_undef = bitmap_zalloc(abm->num_thresholds, GFP_KERNEL); + if (!abm->threshold_undef) + goto err_free_abm; + + abm->thresholds = kvcalloc(abm->num_thresholds, + sizeof(*abm->thresholds), GFP_KERNEL); + if (!abm->thresholds) + goto err_free_thresh_umap; + + abm->actions = kvcalloc(abm->num_thresholds, sizeof(*abm->actions), + GFP_KERNEL); + if (!abm->actions) + goto err_free_thresh; + /* We start in legacy mode, make sure advanced queuing is disabled */ - err = nfp_abm_ctrl_qm_disable(abm); + err = nfp_abm_fw_init_reset(abm); if (err) - goto err_free_abm; + goto err_free_act; err = -ENOMEM; reprs = nfp_reprs_alloc(pf->max_data_vnics); if (!reprs) - goto err_free_abm; + goto err_free_act; RCU_INIT_POINTER(app->reprs[NFP_REPR_TYPE_PHYS_PORT], reprs); reprs = nfp_reprs_alloc(pf->max_data_vnics); @@ -710,6 +503,12 @@ static int nfp_abm_init(struct nfp_app *app) err_free_phys: nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT); +err_free_act: + kvfree(abm->actions); +err_free_thresh: + kvfree(abm->thresholds); +err_free_thresh_umap: + bitmap_free(abm->threshold_undef); err_free_abm: kfree(abm); app->priv = NULL; @@ -723,6 +522,9 @@ static void nfp_abm_clean(struct nfp_app *app) nfp_abm_eswitch_clean_up(abm); nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF); nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT); + bitmap_free(abm->threshold_undef); + kvfree(abm->actions); + kvfree(abm->thresholds); kfree(abm); app->priv = NULL; } @@ -736,6 +538,7 @@ const struct nfp_app_type app_abm = { .vnic_alloc = nfp_abm_vnic_alloc, .vnic_free = nfp_abm_vnic_free, + .vnic_init = nfp_abm_vnic_init, .port_get_stats = nfp_abm_port_get_stats, .port_get_stats_count = nfp_abm_port_get_stats_count, diff --git a/drivers/net/ethernet/netronome/nfp/abm/main.h b/drivers/net/ethernet/netronome/nfp/abm/main.h index f907b7d98917..49749c60885e 100644 --- a/drivers/net/ethernet/netronome/nfp/abm/main.h +++ b/drivers/net/ethernet/netronome/nfp/abm/main.h @@ -4,7 +4,19 @@ #ifndef __NFP_ABM_H__ #define __NFP_ABM_H__ 1 +#include <linux/bits.h> +#include <linux/list.h> +#include <linux/radix-tree.h> #include <net/devlink.h> +#include <net/pkt_cls.h> +#include <net/pkt_sched.h> + +/* Dump of 64 PRIOs and 256 REDs seems to take 850us on Xeon v4 @ 2.20GHz; + * 2.5ms / 400Hz seems more than sufficient for stats resolution. + */ +#define NFP_ABM_STATS_REFRESH_IVAL (2500 * 1000) /* ns */ + +#define NFP_ABM_LVL_INFINITY S32_MAX struct nfp_app; struct nfp_net; @@ -12,21 +24,64 @@ struct nfp_net; #define NFP_ABM_PORTID_TYPE GENMASK(23, 16) #define NFP_ABM_PORTID_ID GENMASK(7, 0) +/* The possible actions if thresholds are exceeded */ +enum nfp_abm_q_action { + /* mark if ECN capable, otherwise drop */ + NFP_ABM_ACT_MARK_DROP = 0, + /* mark if ECN capable, otherwise goto QM */ + NFP_ABM_ACT_MARK_QUEUE = 1, + NFP_ABM_ACT_DROP = 2, + NFP_ABM_ACT_QUEUE = 3, + NFP_ABM_ACT_NOQUEUE = 4, +}; + /** * struct nfp_abm - ABM NIC app structure * @app: back pointer to nfp_app * @pf_id: ID of our PF link + * + * @red_support: is RED offload supported + * @num_prios: number of supported DSCP priorities + * @num_bands: number of supported DSCP priority bands + * @action_mask: bitmask of supported actions + * + * @thresholds: current threshold configuration + * @threshold_undef: bitmap of thresholds which have not been set + * @actions: current FW action configuration + * @num_thresholds: number of @thresholds and bits in @threshold_undef + * + * @prio_map_len: computed length of FW priority map (in bytes) + * @dscp_mask: mask FW will apply on DSCP field + * * @eswitch_mode: devlink eswitch mode, advanced functions only visible * in switchdev mode + * * @q_lvls: queue level control area * @qm_stats: queue statistics symbol + * @q_stats: basic queue statistics (only in per-band case) */ struct nfp_abm { struct nfp_app *app; unsigned int pf_id; + + unsigned int red_support; + unsigned int num_prios; + unsigned int num_bands; + unsigned int action_mask; + + u32 *thresholds; + unsigned long *threshold_undef; + u8 *actions; + size_t num_thresholds; + + unsigned int prio_map_len; + u8 dscp_mask; + enum devlink_eswitch_mode eswitch_mode; + const struct nfp_rtsym *q_lvls; const struct nfp_rtsym *qm_stats; + const struct nfp_rtsym *q_stats; }; /** @@ -57,16 +112,76 @@ struct nfp_alink_xstats { u64 pdrop; }; +enum nfp_qdisc_type { + NFP_QDISC_NONE = 0, + NFP_QDISC_MQ, + NFP_QDISC_RED, + NFP_QDISC_GRED, +}; + +#define NFP_QDISC_UNTRACKED ((struct nfp_qdisc *)1UL) + /** - * struct nfp_red_qdisc - representation of single RED Qdisc - * @handle: handle of currently offloaded RED Qdisc - * @stats: statistics from last refresh - * @xstats: base of extended statistics + * struct nfp_qdisc - tracked TC Qdisc + * @netdev: netdev on which Qdisc was created + * @type: Qdisc type + * @handle: handle of this Qdisc + * @parent_handle: handle of the parent (unreliable if Qdisc was grafted) + * @use_cnt: number of attachment points in the hierarchy + * @num_children: current size of the @children array + * @children: pointers to children + * + * @params_ok: parameters of this Qdisc are OK for offload + * @offload_mark: offload refresh state - selected for offload + * @offloaded: Qdisc is currently offloaded to the HW + * + * @mq: MQ Qdisc specific parameters and state + * @mq.stats: current stats of the MQ Qdisc + * @mq.prev_stats: previously reported @mq.stats + * + * @red: RED Qdisc specific parameters and state + * @red.num_bands: Number of valid entries in the @red.band table + * @red.band: Per-band array of RED instances + * @red.band.ecn: ECN marking is enabled (rather than drop) + * @red.band.threshold: ECN marking threshold + * @red.band.stats: current stats of the RED Qdisc + * @red.band.prev_stats: previously reported @red.stats + * @red.band.xstats: extended stats for RED - current + * @red.band.prev_xstats: extended stats for RED - previously reported */ -struct nfp_red_qdisc { +struct nfp_qdisc { + struct net_device *netdev; + enum nfp_qdisc_type type; u32 handle; - struct nfp_alink_stats stats; - struct nfp_alink_xstats xstats; + u32 parent_handle; + unsigned int use_cnt; + unsigned int num_children; + struct nfp_qdisc **children; + + bool params_ok; + bool offload_mark; + bool offloaded; + + union { + /* NFP_QDISC_MQ */ + struct { + struct nfp_alink_stats stats; + struct nfp_alink_stats prev_stats; + } mq; + /* TC_SETUP_QDISC_RED, TC_SETUP_QDISC_GRED */ + struct { + unsigned int num_bands; + + struct { + bool ecn; + u32 threshold; + struct nfp_alink_stats stats; + struct nfp_alink_stats prev_stats; + struct nfp_alink_xstats xstats; + struct nfp_alink_xstats prev_xstats; + } band[MAX_DPs]; + } red; + }; }; /** @@ -76,9 +191,17 @@ struct nfp_red_qdisc { * @id: id of the data vNIC * @queue_base: id of base to host queue within PCIe (not QC idx) * @total_queues: number of PF queues - * @parent: handle of expected parent, i.e. handle of MQ, or TC_H_ROOT - * @num_qdiscs: number of currently used qdiscs - * @qdiscs: array of qdiscs + * + * @last_stats_update: ktime of last stats update + * + * @prio_map: current map of priorities + * @has_prio: @prio_map is valid + * + * @def_band: default band to use + * @dscp_map: list of DSCP to band mappings + * + * @root_qdisc: pointer to the current root of the Qdisc hierarchy + * @qdiscs: all qdiscs recorded by major part of the handle */ struct nfp_abm_link { struct nfp_abm *abm; @@ -86,26 +209,65 @@ struct nfp_abm_link { unsigned int id; unsigned int queue_base; unsigned int total_queues; - u32 parent; - unsigned int num_qdiscs; - struct nfp_red_qdisc *qdiscs; + + u64 last_stats_update; + + u32 *prio_map; + bool has_prio; + + u8 def_band; + struct list_head dscp_map; + + struct nfp_qdisc *root_qdisc; + struct radix_tree_root qdiscs; }; -void nfp_abm_ctrl_read_params(struct nfp_abm_link *alink); +static inline bool nfp_abm_has_prio(struct nfp_abm *abm) +{ + return abm->num_bands > 1; +} + +static inline bool nfp_abm_has_drop(struct nfp_abm *abm) +{ + return abm->action_mask & BIT(NFP_ABM_ACT_DROP); +} + +static inline bool nfp_abm_has_mark(struct nfp_abm *abm) +{ + return abm->action_mask & BIT(NFP_ABM_ACT_MARK_DROP); +} + +void nfp_abm_qdisc_offload_update(struct nfp_abm_link *alink); +int nfp_abm_setup_root(struct net_device *netdev, struct nfp_abm_link *alink, + struct tc_root_qopt_offload *opt); +int nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink, + struct tc_red_qopt_offload *opt); +int nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink, + struct tc_mq_qopt_offload *opt); +int nfp_abm_setup_tc_gred(struct net_device *netdev, struct nfp_abm_link *alink, + struct tc_gred_qopt_offload *opt); +int nfp_abm_setup_cls_block(struct net_device *netdev, struct nfp_repr *repr, + struct tc_block_offload *opt); + +int nfp_abm_ctrl_read_params(struct nfp_abm_link *alink); int nfp_abm_ctrl_find_addrs(struct nfp_abm *abm); -int nfp_abm_ctrl_set_all_q_lvls(struct nfp_abm_link *alink, u32 val); -int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int i, - u32 val); -int nfp_abm_ctrl_read_stats(struct nfp_abm_link *alink, - struct nfp_alink_stats *stats); -int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink, unsigned int i, +int __nfp_abm_ctrl_set_q_lvl(struct nfp_abm *abm, unsigned int id, u32 val); +int nfp_abm_ctrl_set_q_lvl(struct nfp_abm_link *alink, unsigned int band, + unsigned int queue, u32 val); +int __nfp_abm_ctrl_set_q_act(struct nfp_abm *abm, unsigned int id, + enum nfp_abm_q_action act); +int nfp_abm_ctrl_set_q_act(struct nfp_abm_link *alink, unsigned int band, + unsigned int queue, enum nfp_abm_q_action act); +int nfp_abm_ctrl_read_q_stats(struct nfp_abm_link *alink, + unsigned int band, unsigned int queue, struct nfp_alink_stats *stats); -int nfp_abm_ctrl_read_xstats(struct nfp_abm_link *alink, - struct nfp_alink_xstats *xstats); -int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink, unsigned int i, +int nfp_abm_ctrl_read_q_xstats(struct nfp_abm_link *alink, + unsigned int band, unsigned int queue, struct nfp_alink_xstats *xstats); u64 nfp_abm_ctrl_stat_non_sto(struct nfp_abm_link *alink, unsigned int i); u64 nfp_abm_ctrl_stat_sto(struct nfp_abm_link *alink, unsigned int i); int nfp_abm_ctrl_qm_enable(struct nfp_abm *abm); int nfp_abm_ctrl_qm_disable(struct nfp_abm *abm); +void nfp_abm_prio_map_update(struct nfp_abm *abm); +int nfp_abm_ctrl_prio_map_update(struct nfp_abm_link *alink, u32 *packed); #endif diff --git a/drivers/net/ethernet/netronome/nfp/abm/qdisc.c b/drivers/net/ethernet/netronome/nfp/abm/qdisc.c new file mode 100644 index 000000000000..2473fb5f75e5 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/abm/qdisc.c @@ -0,0 +1,850 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2018 Netronome Systems, Inc. */ + +#include <linux/rtnetlink.h> +#include <net/pkt_cls.h> +#include <net/pkt_sched.h> +#include <net/red.h> + +#include "../nfpcore/nfp_cpp.h" +#include "../nfp_app.h" +#include "../nfp_main.h" +#include "../nfp_net.h" +#include "../nfp_port.h" +#include "main.h" + +static bool nfp_abm_qdisc_is_red(struct nfp_qdisc *qdisc) +{ + return qdisc->type == NFP_QDISC_RED || qdisc->type == NFP_QDISC_GRED; +} + +static bool nfp_abm_qdisc_child_valid(struct nfp_qdisc *qdisc, unsigned int id) +{ + return qdisc->children[id] && + qdisc->children[id] != NFP_QDISC_UNTRACKED; +} + +static void *nfp_abm_qdisc_tree_deref_slot(void __rcu **slot) +{ + return rtnl_dereference(*slot); +} + +static void +nfp_abm_stats_propagate(struct nfp_alink_stats *parent, + struct nfp_alink_stats *child) +{ + parent->tx_pkts += child->tx_pkts; + parent->tx_bytes += child->tx_bytes; + parent->backlog_pkts += child->backlog_pkts; + parent->backlog_bytes += child->backlog_bytes; + parent->overlimits += child->overlimits; + parent->drops += child->drops; +} + +static void +nfp_abm_stats_update_red(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc, + unsigned int queue) +{ + struct nfp_cpp *cpp = alink->abm->app->cpp; + unsigned int i; + int err; + + if (!qdisc->offloaded) + return; + + for (i = 0; i < qdisc->red.num_bands; i++) { + err = nfp_abm_ctrl_read_q_stats(alink, i, queue, + &qdisc->red.band[i].stats); + if (err) + nfp_err(cpp, "RED stats (%d, %d) read failed with error %d\n", + i, queue, err); + + err = nfp_abm_ctrl_read_q_xstats(alink, i, queue, + &qdisc->red.band[i].xstats); + if (err) + nfp_err(cpp, "RED xstats (%d, %d) read failed with error %d\n", + i, queue, err); + } +} + +static void +nfp_abm_stats_update_mq(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc) +{ + unsigned int i; + + if (qdisc->type != NFP_QDISC_MQ) + return; + + for (i = 0; i < alink->total_queues; i++) + if (nfp_abm_qdisc_child_valid(qdisc, i)) + nfp_abm_stats_update_red(alink, qdisc->children[i], i); +} + +static void __nfp_abm_stats_update(struct nfp_abm_link *alink, u64 time_now) +{ + alink->last_stats_update = time_now; + if (alink->root_qdisc) + nfp_abm_stats_update_mq(alink, alink->root_qdisc); +} + +static void nfp_abm_stats_update(struct nfp_abm_link *alink) +{ + u64 now; + + /* Limit the frequency of updates - stats of non-leaf qdiscs are a sum + * of all their leafs, so we would read the same stat multiple times + * for every dump. + */ + now = ktime_get(); + if (now - alink->last_stats_update < NFP_ABM_STATS_REFRESH_IVAL) + return; + + __nfp_abm_stats_update(alink, now); +} + +static void +nfp_abm_qdisc_unlink_children(struct nfp_qdisc *qdisc, + unsigned int start, unsigned int end) +{ + unsigned int i; + + for (i = start; i < end; i++) + if (nfp_abm_qdisc_child_valid(qdisc, i)) { + qdisc->children[i]->use_cnt--; + qdisc->children[i] = NULL; + } +} + +static void +nfp_abm_qdisc_offload_stop(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc) +{ + unsigned int i; + + /* Don't complain when qdisc is getting unlinked */ + if (qdisc->use_cnt) + nfp_warn(alink->abm->app->cpp, "Offload of '%08x' stopped\n", + qdisc->handle); + + if (!nfp_abm_qdisc_is_red(qdisc)) + return; + + for (i = 0; i < qdisc->red.num_bands; i++) { + qdisc->red.band[i].stats.backlog_pkts = 0; + qdisc->red.band[i].stats.backlog_bytes = 0; + } +} + +static int +__nfp_abm_stats_init(struct nfp_abm_link *alink, unsigned int band, + unsigned int queue, struct nfp_alink_stats *prev_stats, + struct nfp_alink_xstats *prev_xstats) +{ + u64 backlog_pkts, backlog_bytes; + int err; + + /* Don't touch the backlog, backlog can only be reset after it has + * been reported back to the tc qdisc stats. + */ + backlog_pkts = prev_stats->backlog_pkts; + backlog_bytes = prev_stats->backlog_bytes; + + err = nfp_abm_ctrl_read_q_stats(alink, band, queue, prev_stats); + if (err) { + nfp_err(alink->abm->app->cpp, + "RED stats init (%d, %d) failed with error %d\n", + band, queue, err); + return err; + } + + err = nfp_abm_ctrl_read_q_xstats(alink, band, queue, prev_xstats); + if (err) { + nfp_err(alink->abm->app->cpp, + "RED xstats init (%d, %d) failed with error %d\n", + band, queue, err); + return err; + } + + prev_stats->backlog_pkts = backlog_pkts; + prev_stats->backlog_bytes = backlog_bytes; + return 0; +} + +static int +nfp_abm_stats_init(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc, + unsigned int queue) +{ + unsigned int i; + int err; + + for (i = 0; i < qdisc->red.num_bands; i++) { + err = __nfp_abm_stats_init(alink, i, queue, + &qdisc->red.band[i].prev_stats, + &qdisc->red.band[i].prev_xstats); + if (err) + return err; + } + + return 0; +} + +static void +nfp_abm_offload_compile_red(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc, + unsigned int queue) +{ + bool good_red, good_gred; + unsigned int i; + + good_red = qdisc->type == NFP_QDISC_RED && + qdisc->params_ok && + qdisc->use_cnt == 1 && + !alink->has_prio && + !qdisc->children[0]; + good_gred = qdisc->type == NFP_QDISC_GRED && + qdisc->params_ok && + qdisc->use_cnt == 1; + qdisc->offload_mark = good_red || good_gred; + + /* If we are starting offload init prev_stats */ + if (qdisc->offload_mark && !qdisc->offloaded) + if (nfp_abm_stats_init(alink, qdisc, queue)) + qdisc->offload_mark = false; + + if (!qdisc->offload_mark) + return; + + for (i = 0; i < alink->abm->num_bands; i++) { + enum nfp_abm_q_action act; + + nfp_abm_ctrl_set_q_lvl(alink, i, queue, + qdisc->red.band[i].threshold); + act = qdisc->red.band[i].ecn ? + NFP_ABM_ACT_MARK_DROP : NFP_ABM_ACT_DROP; + nfp_abm_ctrl_set_q_act(alink, i, queue, act); + } +} + +static void +nfp_abm_offload_compile_mq(struct nfp_abm_link *alink, struct nfp_qdisc *qdisc) +{ + unsigned int i; + + qdisc->offload_mark = qdisc->type == NFP_QDISC_MQ; + if (!qdisc->offload_mark) + return; + + for (i = 0; i < alink->total_queues; i++) { + struct nfp_qdisc *child = qdisc->children[i]; + + if (!nfp_abm_qdisc_child_valid(qdisc, i)) + continue; + + nfp_abm_offload_compile_red(alink, child, i); + } +} + +void nfp_abm_qdisc_offload_update(struct nfp_abm_link *alink) +{ + struct nfp_abm *abm = alink->abm; + struct radix_tree_iter iter; + struct nfp_qdisc *qdisc; + void __rcu **slot; + size_t i; + + /* Mark all thresholds as unconfigured */ + for (i = 0; i < abm->num_bands; i++) + __bitmap_set(abm->threshold_undef, + i * NFP_NET_MAX_RX_RINGS + alink->queue_base, + alink->total_queues); + + /* Clear offload marks */ + radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) { + qdisc = nfp_abm_qdisc_tree_deref_slot(slot); + qdisc->offload_mark = false; + } + + if (alink->root_qdisc) + nfp_abm_offload_compile_mq(alink, alink->root_qdisc); + + /* Refresh offload status */ + radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) { + qdisc = nfp_abm_qdisc_tree_deref_slot(slot); + if (!qdisc->offload_mark && qdisc->offloaded) + nfp_abm_qdisc_offload_stop(alink, qdisc); + qdisc->offloaded = qdisc->offload_mark; + } + + /* Reset the unconfigured thresholds */ + for (i = 0; i < abm->num_thresholds; i++) + if (test_bit(i, abm->threshold_undef)) + __nfp_abm_ctrl_set_q_lvl(abm, i, NFP_ABM_LVL_INFINITY); + + __nfp_abm_stats_update(alink, ktime_get()); +} + +static void +nfp_abm_qdisc_clear_mq(struct net_device *netdev, struct nfp_abm_link *alink, + struct nfp_qdisc *qdisc) +{ + struct radix_tree_iter iter; + unsigned int mq_refs = 0; + void __rcu **slot; + + if (!qdisc->use_cnt) + return; + /* MQ doesn't notify well on destruction, we need special handling of + * MQ's children. + */ + if (qdisc->type == NFP_QDISC_MQ && + qdisc == alink->root_qdisc && + netdev->reg_state == NETREG_UNREGISTERING) + return; + + /* Count refs held by MQ instances and clear pointers */ + radix_tree_for_each_slot(slot, &alink->qdiscs, &iter, 0) { + struct nfp_qdisc *mq = nfp_abm_qdisc_tree_deref_slot(slot); + unsigned int i; + + if (mq->type != NFP_QDISC_MQ || mq->netdev != netdev) + continue; + for (i = 0; i < mq->num_children; i++) + if (mq->children[i] == qdisc) { + mq->children[i] = NULL; + mq_refs++; + } + } + + WARN(qdisc->use_cnt != mq_refs, "non-zero qdisc use count: %d (- %d)\n", + qdisc->use_cnt, mq_refs); +} + +static void +nfp_abm_qdisc_free(struct net_device *netdev, struct nfp_abm_link *alink, + struct nfp_qdisc *qdisc) +{ + struct nfp_port *port = nfp_port_from_netdev(netdev); + + if (!qdisc) + return; + nfp_abm_qdisc_clear_mq(netdev, alink, qdisc); + WARN_ON(radix_tree_delete(&alink->qdiscs, + TC_H_MAJ(qdisc->handle)) != qdisc); + + kfree(qdisc->children); + kfree(qdisc); + + port->tc_offload_cnt--; +} + +static struct nfp_qdisc * +nfp_abm_qdisc_alloc(struct net_device *netdev, struct nfp_abm_link *alink, + enum nfp_qdisc_type type, u32 parent_handle, u32 handle, + unsigned int children) +{ + struct nfp_port *port = nfp_port_from_netdev(netdev); + struct nfp_qdisc *qdisc; + int err; + + qdisc = kzalloc(sizeof(*qdisc), GFP_KERNEL); + if (!qdisc) + return NULL; + + if (children) { + qdisc->children = kcalloc(children, sizeof(void *), GFP_KERNEL); + if (!qdisc->children) + goto err_free_qdisc; + } + + qdisc->netdev = netdev; + qdisc->type = type; + qdisc->parent_handle = parent_handle; + qdisc->handle = handle; + qdisc->num_children = children; + + err = radix_tree_insert(&alink->qdiscs, TC_H_MAJ(qdisc->handle), qdisc); + if (err) { + nfp_err(alink->abm->app->cpp, + "Qdisc insertion into radix tree failed: %d\n", err); + goto err_free_child_tbl; + } + + port->tc_offload_cnt++; + return qdisc; + +err_free_child_tbl: + kfree(qdisc->children); +err_free_qdisc: + kfree(qdisc); + return NULL; +} + +static struct nfp_qdisc * +nfp_abm_qdisc_find(struct nfp_abm_link *alink, u32 handle) +{ + return radix_tree_lookup(&alink->qdiscs, TC_H_MAJ(handle)); +} + +static int +nfp_abm_qdisc_replace(struct net_device *netdev, struct nfp_abm_link *alink, + enum nfp_qdisc_type type, u32 parent_handle, u32 handle, + unsigned int children, struct nfp_qdisc **qdisc) +{ + *qdisc = nfp_abm_qdisc_find(alink, handle); + if (*qdisc) { + if (WARN_ON((*qdisc)->type != type)) + return -EINVAL; + return 1; + } + + *qdisc = nfp_abm_qdisc_alloc(netdev, alink, type, parent_handle, handle, + children); + return *qdisc ? 0 : -ENOMEM; +} + +static void +nfp_abm_qdisc_destroy(struct net_device *netdev, struct nfp_abm_link *alink, + u32 handle) +{ + struct nfp_qdisc *qdisc; + + qdisc = nfp_abm_qdisc_find(alink, handle); + if (!qdisc) + return; + + /* We don't get TC_SETUP_ROOT_QDISC w/ MQ when netdev is unregistered */ + if (alink->root_qdisc == qdisc) + qdisc->use_cnt--; + + nfp_abm_qdisc_unlink_children(qdisc, 0, qdisc->num_children); + nfp_abm_qdisc_free(netdev, alink, qdisc); + + if (alink->root_qdisc == qdisc) { + alink->root_qdisc = NULL; + /* Only root change matters, other changes are acted upon on + * the graft notification. + */ + nfp_abm_qdisc_offload_update(alink); + } +} + +static int +nfp_abm_qdisc_graft(struct nfp_abm_link *alink, u32 handle, u32 child_handle, + unsigned int id) +{ + struct nfp_qdisc *parent, *child; + + parent = nfp_abm_qdisc_find(alink, handle); + if (!parent) + return 0; + + if (WARN(id >= parent->num_children, + "graft child out of bound %d >= %d\n", + id, parent->num_children)) + return -EINVAL; + + nfp_abm_qdisc_unlink_children(parent, id, id + 1); + + child = nfp_abm_qdisc_find(alink, child_handle); + if (child) + child->use_cnt++; + else + child = NFP_QDISC_UNTRACKED; + parent->children[id] = child; + + nfp_abm_qdisc_offload_update(alink); + + return 0; +} + +static void +nfp_abm_stats_calculate(struct nfp_alink_stats *new, + struct nfp_alink_stats *old, + struct gnet_stats_basic_packed *bstats, + struct gnet_stats_queue *qstats) +{ + _bstats_update(bstats, new->tx_bytes - old->tx_bytes, + new->tx_pkts - old->tx_pkts); + qstats->qlen += new->backlog_pkts - old->backlog_pkts; + qstats->backlog += new->backlog_bytes - old->backlog_bytes; + qstats->overlimits += new->overlimits - old->overlimits; + qstats->drops += new->drops - old->drops; +} + +static void +nfp_abm_stats_red_calculate(struct nfp_alink_xstats *new, + struct nfp_alink_xstats *old, + struct red_stats *stats) +{ + stats->forced_mark += new->ecn_marked - old->ecn_marked; + stats->pdrop += new->pdrop - old->pdrop; +} + +static int +nfp_abm_gred_stats(struct nfp_abm_link *alink, u32 handle, + struct tc_gred_qopt_offload_stats *stats) +{ + struct nfp_qdisc *qdisc; + unsigned int i; + + nfp_abm_stats_update(alink); + + qdisc = nfp_abm_qdisc_find(alink, handle); + if (!qdisc) + return -EOPNOTSUPP; + /* If the qdisc offload has stopped we may need to adjust the backlog + * counters back so carry on even if qdisc is not currently offloaded. + */ + + for (i = 0; i < qdisc->red.num_bands; i++) { + if (!stats->xstats[i]) + continue; + + nfp_abm_stats_calculate(&qdisc->red.band[i].stats, + &qdisc->red.band[i].prev_stats, + &stats->bstats[i], &stats->qstats[i]); + qdisc->red.band[i].prev_stats = qdisc->red.band[i].stats; + + nfp_abm_stats_red_calculate(&qdisc->red.band[i].xstats, + &qdisc->red.band[i].prev_xstats, + stats->xstats[i]); + qdisc->red.band[i].prev_xstats = qdisc->red.band[i].xstats; + } + + return qdisc->offloaded ? 0 : -EOPNOTSUPP; +} + +static bool +nfp_abm_gred_check_params(struct nfp_abm_link *alink, + struct tc_gred_qopt_offload *opt) +{ + struct nfp_cpp *cpp = alink->abm->app->cpp; + struct nfp_abm *abm = alink->abm; + unsigned int i; + + if (opt->set.grio_on || opt->set.wred_on) { + nfp_warn(cpp, "GRED offload failed - GRIO and WRED not supported (p:%08x h:%08x)\n", + opt->parent, opt->handle); + return false; + } + if (opt->set.dp_def != alink->def_band) { + nfp_warn(cpp, "GRED offload failed - default band must be %d (p:%08x h:%08x)\n", + alink->def_band, opt->parent, opt->handle); + return false; + } + if (opt->set.dp_cnt != abm->num_bands) { + nfp_warn(cpp, "GRED offload failed - band count must be %d (p:%08x h:%08x)\n", + abm->num_bands, opt->parent, opt->handle); + return false; + } + + for (i = 0; i < abm->num_bands; i++) { + struct tc_gred_vq_qopt_offload_params *band = &opt->set.tab[i]; + + if (!band->present) + return false; + if (!band->is_ecn && !nfp_abm_has_drop(abm)) { + nfp_warn(cpp, "GRED offload failed - drop is not supported (ECN option required) (p:%08x h:%08x vq:%d)\n", + opt->parent, opt->handle, i); + return false; + } + if (band->is_ecn && !nfp_abm_has_mark(abm)) { + nfp_warn(cpp, "GRED offload failed - ECN marking not supported (p:%08x h:%08x vq:%d)\n", + opt->parent, opt->handle, i); + return false; + } + if (band->is_harddrop) { + nfp_warn(cpp, "GRED offload failed - harddrop is not supported (p:%08x h:%08x vq:%d)\n", + opt->parent, opt->handle, i); + return false; + } + if (band->min != band->max) { + nfp_warn(cpp, "GRED offload failed - threshold mismatch (p:%08x h:%08x vq:%d)\n", + opt->parent, opt->handle, i); + return false; + } + if (band->min > S32_MAX) { + nfp_warn(cpp, "GRED offload failed - threshold too large %d > %d (p:%08x h:%08x vq:%d)\n", + band->min, S32_MAX, opt->parent, opt->handle, + i); + return false; + } + } + + return true; +} + +static int +nfp_abm_gred_replace(struct net_device *netdev, struct nfp_abm_link *alink, + struct tc_gred_qopt_offload *opt) +{ + struct nfp_qdisc *qdisc; + unsigned int i; + int ret; + + ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_GRED, opt->parent, + opt->handle, 0, &qdisc); + if (ret < 0) + return ret; + + qdisc->params_ok = nfp_abm_gred_check_params(alink, opt); + if (qdisc->params_ok) { + qdisc->red.num_bands = opt->set.dp_cnt; + for (i = 0; i < qdisc->red.num_bands; i++) { + qdisc->red.band[i].ecn = opt->set.tab[i].is_ecn; + qdisc->red.band[i].threshold = opt->set.tab[i].min; + } + } + + if (qdisc->use_cnt) + nfp_abm_qdisc_offload_update(alink); + + return 0; +} + +int nfp_abm_setup_tc_gred(struct net_device *netdev, struct nfp_abm_link *alink, + struct tc_gred_qopt_offload *opt) +{ + switch (opt->command) { + case TC_GRED_REPLACE: + return nfp_abm_gred_replace(netdev, alink, opt); + case TC_GRED_DESTROY: + nfp_abm_qdisc_destroy(netdev, alink, opt->handle); + return 0; + case TC_GRED_STATS: + return nfp_abm_gred_stats(alink, opt->handle, &opt->stats); + default: + return -EOPNOTSUPP; + } +} + +static int +nfp_abm_red_xstats(struct nfp_abm_link *alink, struct tc_red_qopt_offload *opt) +{ + struct nfp_qdisc *qdisc; + + nfp_abm_stats_update(alink); + + qdisc = nfp_abm_qdisc_find(alink, opt->handle); + if (!qdisc || !qdisc->offloaded) + return -EOPNOTSUPP; + + nfp_abm_stats_red_calculate(&qdisc->red.band[0].xstats, + &qdisc->red.band[0].prev_xstats, + opt->xstats); + qdisc->red.band[0].prev_xstats = qdisc->red.band[0].xstats; + return 0; +} + +static int +nfp_abm_red_stats(struct nfp_abm_link *alink, u32 handle, + struct tc_qopt_offload_stats *stats) +{ + struct nfp_qdisc *qdisc; + + nfp_abm_stats_update(alink); + + qdisc = nfp_abm_qdisc_find(alink, handle); + if (!qdisc) + return -EOPNOTSUPP; + /* If the qdisc offload has stopped we may need to adjust the backlog + * counters back so carry on even if qdisc is not currently offloaded. + */ + + nfp_abm_stats_calculate(&qdisc->red.band[0].stats, + &qdisc->red.band[0].prev_stats, + stats->bstats, stats->qstats); + qdisc->red.band[0].prev_stats = qdisc->red.band[0].stats; + + return qdisc->offloaded ? 0 : -EOPNOTSUPP; +} + +static bool +nfp_abm_red_check_params(struct nfp_abm_link *alink, + struct tc_red_qopt_offload *opt) +{ + struct nfp_cpp *cpp = alink->abm->app->cpp; + struct nfp_abm *abm = alink->abm; + + if (!opt->set.is_ecn && !nfp_abm_has_drop(abm)) { + nfp_warn(cpp, "RED offload failed - drop is not supported (ECN option required) (p:%08x h:%08x)\n", + opt->parent, opt->handle); + return false; + } + if (opt->set.is_ecn && !nfp_abm_has_mark(abm)) { + nfp_warn(cpp, "RED offload failed - ECN marking not supported (p:%08x h:%08x)\n", + opt->parent, opt->handle); + return false; + } + if (opt->set.is_harddrop) { + nfp_warn(cpp, "RED offload failed - harddrop is not supported (p:%08x h:%08x)\n", + opt->parent, opt->handle); + return false; + } + if (opt->set.min != opt->set.max) { + nfp_warn(cpp, "RED offload failed - unsupported min/max parameters (p:%08x h:%08x)\n", + opt->parent, opt->handle); + return false; + } + if (opt->set.min > NFP_ABM_LVL_INFINITY) { + nfp_warn(cpp, "RED offload failed - threshold too large %d > %d (p:%08x h:%08x)\n", + opt->set.min, NFP_ABM_LVL_INFINITY, opt->parent, + opt->handle); + return false; + } + + return true; +} + +static int +nfp_abm_red_replace(struct net_device *netdev, struct nfp_abm_link *alink, + struct tc_red_qopt_offload *opt) +{ + struct nfp_qdisc *qdisc; + int ret; + + ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_RED, opt->parent, + opt->handle, 1, &qdisc); + if (ret < 0) + return ret; + + /* If limit != 0 child gets reset */ + if (opt->set.limit) { + if (nfp_abm_qdisc_child_valid(qdisc, 0)) + qdisc->children[0]->use_cnt--; + qdisc->children[0] = NULL; + } else { + /* Qdisc was just allocated without a limit will use noop_qdisc, + * i.e. a block hole. + */ + if (!ret) + qdisc->children[0] = NFP_QDISC_UNTRACKED; + } + + qdisc->params_ok = nfp_abm_red_check_params(alink, opt); + if (qdisc->params_ok) { + qdisc->red.num_bands = 1; + qdisc->red.band[0].ecn = opt->set.is_ecn; + qdisc->red.band[0].threshold = opt->set.min; + } + + if (qdisc->use_cnt == 1) + nfp_abm_qdisc_offload_update(alink); + + return 0; +} + +int nfp_abm_setup_tc_red(struct net_device *netdev, struct nfp_abm_link *alink, + struct tc_red_qopt_offload *opt) +{ + switch (opt->command) { + case TC_RED_REPLACE: + return nfp_abm_red_replace(netdev, alink, opt); + case TC_RED_DESTROY: + nfp_abm_qdisc_destroy(netdev, alink, opt->handle); + return 0; + case TC_RED_STATS: + return nfp_abm_red_stats(alink, opt->handle, &opt->stats); + case TC_RED_XSTATS: + return nfp_abm_red_xstats(alink, opt); + case TC_RED_GRAFT: + return nfp_abm_qdisc_graft(alink, opt->handle, + opt->child_handle, 0); + default: + return -EOPNOTSUPP; + } +} + +static int +nfp_abm_mq_create(struct net_device *netdev, struct nfp_abm_link *alink, + struct tc_mq_qopt_offload *opt) +{ + struct nfp_qdisc *qdisc; + int ret; + + ret = nfp_abm_qdisc_replace(netdev, alink, NFP_QDISC_MQ, + TC_H_ROOT, opt->handle, alink->total_queues, + &qdisc); + if (ret < 0) + return ret; + + qdisc->params_ok = true; + qdisc->offloaded = true; + nfp_abm_qdisc_offload_update(alink); + return 0; +} + +static int +nfp_abm_mq_stats(struct nfp_abm_link *alink, u32 handle, + struct tc_qopt_offload_stats *stats) +{ + struct nfp_qdisc *qdisc, *red; + unsigned int i, j; + + qdisc = nfp_abm_qdisc_find(alink, handle); + if (!qdisc) + return -EOPNOTSUPP; + + nfp_abm_stats_update(alink); + + /* MQ stats are summed over the children in the core, so we need + * to add up the unreported child values. + */ + memset(&qdisc->mq.stats, 0, sizeof(qdisc->mq.stats)); + memset(&qdisc->mq.prev_stats, 0, sizeof(qdisc->mq.prev_stats)); + + for (i = 0; i < qdisc->num_children; i++) { + if (!nfp_abm_qdisc_child_valid(qdisc, i)) + continue; + + if (!nfp_abm_qdisc_is_red(qdisc->children[i])) + continue; + red = qdisc->children[i]; + + for (j = 0; j < red->red.num_bands; j++) { + nfp_abm_stats_propagate(&qdisc->mq.stats, + &red->red.band[j].stats); + nfp_abm_stats_propagate(&qdisc->mq.prev_stats, + &red->red.band[j].prev_stats); + } + } + + nfp_abm_stats_calculate(&qdisc->mq.stats, &qdisc->mq.prev_stats, + stats->bstats, stats->qstats); + + return qdisc->offloaded ? 0 : -EOPNOTSUPP; +} + +int nfp_abm_setup_tc_mq(struct net_device *netdev, struct nfp_abm_link *alink, + struct tc_mq_qopt_offload *opt) +{ + switch (opt->command) { + case TC_MQ_CREATE: + return nfp_abm_mq_create(netdev, alink, opt); + case TC_MQ_DESTROY: + nfp_abm_qdisc_destroy(netdev, alink, opt->handle); + return 0; + case TC_MQ_STATS: + return nfp_abm_mq_stats(alink, opt->handle, &opt->stats); + case TC_MQ_GRAFT: + return nfp_abm_qdisc_graft(alink, opt->handle, + opt->graft_params.child_handle, + opt->graft_params.queue); + default: + return -EOPNOTSUPP; + } +} + +int nfp_abm_setup_root(struct net_device *netdev, struct nfp_abm_link *alink, + struct tc_root_qopt_offload *opt) +{ + if (opt->ingress) + return -EOPNOTSUPP; + if (alink->root_qdisc) + alink->root_qdisc->use_cnt--; + alink->root_qdisc = nfp_abm_qdisc_find(alink, opt->handle); + if (alink->root_qdisc) + alink->root_qdisc->use_cnt++; + + nfp_abm_qdisc_offload_update(alink); + + return 0; +} diff --git a/drivers/net/ethernet/netronome/nfp/bpf/jit.c b/drivers/net/ethernet/netronome/nfp/bpf/jit.c index 97d33bb4d84d..e23ca90289f7 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/jit.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/jit.c @@ -2382,6 +2382,49 @@ static int neg_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) return 0; } +static int __ashr_imm(struct nfp_prog *nfp_prog, u8 dst, u8 shift_amt) +{ + /* Set signedness bit (MSB of result). */ + emit_alu(nfp_prog, reg_none(), reg_a(dst), ALU_OP_OR, reg_imm(0)); + emit_shf(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, reg_b(dst), + SHF_SC_R_SHF, shift_amt); + wrp_immed(nfp_prog, reg_both(dst + 1), 0); + + return 0; +} + +static int ashr_reg(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + const struct bpf_insn *insn = &meta->insn; + u64 umin, umax; + u8 dst, src; + + dst = insn->dst_reg * 2; + umin = meta->umin_src; + umax = meta->umax_src; + if (umin == umax) + return __ashr_imm(nfp_prog, dst, umin); + + src = insn->src_reg * 2; + /* NOTE: the first insn will set both indirect shift amount (source A) + * and signedness bit (MSB of result). + */ + emit_alu(nfp_prog, reg_none(), reg_a(src), ALU_OP_OR, reg_b(dst)); + emit_shf_indir(nfp_prog, reg_both(dst), reg_none(), SHF_OP_ASHR, + reg_b(dst), SHF_SC_R_SHF); + wrp_immed(nfp_prog, reg_both(dst + 1), 0); + + return 0; +} + +static int ashr_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) +{ + const struct bpf_insn *insn = &meta->insn; + u8 dst = insn->dst_reg * 2; + + return __ashr_imm(nfp_prog, dst, insn->imm); +} + static int shl_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { const struct bpf_insn *insn = &meta->insn; @@ -3009,26 +3052,19 @@ static int jset_imm(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta) { const struct bpf_insn *insn = &meta->insn; u64 imm = insn->imm; /* sign extend */ + u8 dst_gpr = insn->dst_reg * 2; swreg tmp_reg; - if (!imm) { - meta->skip = true; - return 0; - } - - if (imm & ~0U) { - tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); - emit_alu(nfp_prog, reg_none(), - reg_a(insn->dst_reg * 2), ALU_OP_AND, tmp_reg); - emit_br(nfp_prog, BR_BNE, insn->off, 0); - } - - if (imm >> 32) { - tmp_reg = ur_load_imm_any(nfp_prog, imm >> 32, imm_b(nfp_prog)); + tmp_reg = ur_load_imm_any(nfp_prog, imm & ~0U, imm_b(nfp_prog)); + emit_alu(nfp_prog, imm_b(nfp_prog), + reg_a(dst_gpr), ALU_OP_AND, tmp_reg); + /* Upper word of the mask can only be 0 or ~0 from sign extension, + * so either ignore it or OR the whole thing in. + */ + if (imm >> 32) emit_alu(nfp_prog, reg_none(), - reg_a(insn->dst_reg * 2 + 1), ALU_OP_AND, tmp_reg); - emit_br(nfp_prog, BR_BNE, insn->off, 0); - } + reg_a(dst_gpr + 1), ALU_OP_OR, imm_b(nfp_prog)); + emit_br(nfp_prog, BR_BNE, insn->off, 0); return 0; } @@ -3286,6 +3322,8 @@ static const instr_cb_t instr_cb[256] = { [BPF_ALU | BPF_DIV | BPF_K] = div_imm, [BPF_ALU | BPF_NEG] = neg_reg, [BPF_ALU | BPF_LSH | BPF_K] = shl_imm, + [BPF_ALU | BPF_ARSH | BPF_X] = ashr_reg, + [BPF_ALU | BPF_ARSH | BPF_K] = ashr_imm, [BPF_ALU | BPF_END | BPF_X] = end_reg32, [BPF_LD | BPF_IMM | BPF_DW] = imm_ld8, [BPF_LD | BPF_ABS | BPF_B] = data_ld1, diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c index 6243af0ab025..dccae0319204 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c @@ -465,7 +465,7 @@ static int nfp_bpf_init(struct nfp_app *app) app->ctrl_mtu = nfp_bpf_ctrl_cmsg_mtu(bpf); } - bpf->bpf_dev = bpf_offload_dev_create(); + bpf->bpf_dev = bpf_offload_dev_create(&nfp_bpf_dev_ops); err = PTR_ERR_OR_ZERO(bpf->bpf_dev); if (err) goto err_free_neutral_maps; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.h b/drivers/net/ethernet/netronome/nfp/bpf/main.h index 7f591d71ab28..941277936475 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/main.h +++ b/drivers/net/ethernet/netronome/nfp/bpf/main.h @@ -509,7 +509,11 @@ void nfp_bpf_jit_prepare(struct nfp_prog *nfp_prog, unsigned int cnt); int nfp_bpf_jit(struct nfp_prog *prog); bool nfp_bpf_supported_opcode(u8 code); -extern const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops; +int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, + int prev_insn_idx); +int nfp_bpf_finalize(struct bpf_verifier_env *env); + +extern const struct bpf_prog_offload_ops nfp_bpf_dev_ops; struct netdev_bpf; struct nfp_app; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/offload.c b/drivers/net/ethernet/netronome/nfp/bpf/offload.c index ba8ceedcf6a2..f0283854fade 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/offload.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/offload.c @@ -33,9 +33,6 @@ nfp_map_ptr_record(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog, struct nfp_bpf_neutral_map *record; int err; - /* Map record paths are entered via ndo, update side is protected. */ - ASSERT_RTNL(); - /* Reuse path - other offloaded program is already tracking this map. */ record = rhashtable_lookup_fast(&bpf->maps_neutral, &map->id, nfp_bpf_maps_neutral_params); @@ -84,8 +81,6 @@ nfp_map_ptrs_forget(struct nfp_app_bpf *bpf, struct nfp_prog *nfp_prog) bool freed = false; int i; - ASSERT_RTNL(); - for (i = 0; i < nfp_prog->map_records_cnt; i++) { if (--nfp_prog->map_records[i]->count) { nfp_prog->map_records[i] = NULL; @@ -187,11 +182,10 @@ static void nfp_prog_free(struct nfp_prog *nfp_prog) kfree(nfp_prog); } -static int -nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn, - struct netdev_bpf *bpf) +static int nfp_bpf_verifier_prep(struct bpf_prog *prog) { - struct bpf_prog *prog = bpf->verifier.prog; + struct nfp_net *nn = netdev_priv(prog->aux->offload->netdev); + struct nfp_app *app = nn->app; struct nfp_prog *nfp_prog; int ret; @@ -209,7 +203,6 @@ nfp_bpf_verifier_prep(struct nfp_app *app, struct nfp_net *nn, goto err_free; nfp_prog->verifier_meta = nfp_prog_first_meta(nfp_prog); - bpf->verifier.ops = &nfp_bpf_analyzer_ops; return 0; @@ -219,8 +212,9 @@ err_free: return ret; } -static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog) +static int nfp_bpf_translate(struct bpf_prog *prog) { + struct nfp_net *nn = netdev_priv(prog->aux->offload->netdev); struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; unsigned int max_instr; int err; @@ -242,15 +236,13 @@ static int nfp_bpf_translate(struct nfp_net *nn, struct bpf_prog *prog) return nfp_map_ptrs_record(nfp_prog->bpf, nfp_prog, prog); } -static int nfp_bpf_destroy(struct nfp_net *nn, struct bpf_prog *prog) +static void nfp_bpf_destroy(struct bpf_prog *prog) { struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; kvfree(nfp_prog->prog); nfp_map_ptrs_forget(nfp_prog->bpf, nfp_prog); nfp_prog_free(nfp_prog); - - return 0; } /* Atomic engine requires values to be in big endian, we need to byte swap @@ -422,12 +414,6 @@ nfp_bpf_map_free(struct nfp_app_bpf *bpf, struct bpf_offloaded_map *offmap) int nfp_ndo_bpf(struct nfp_app *app, struct nfp_net *nn, struct netdev_bpf *bpf) { switch (bpf->command) { - case BPF_OFFLOAD_VERIFIER_PREP: - return nfp_bpf_verifier_prep(app, nn, bpf); - case BPF_OFFLOAD_TRANSLATE: - return nfp_bpf_translate(nn, bpf->offload.prog); - case BPF_OFFLOAD_DESTROY: - return nfp_bpf_destroy(nn, bpf->offload.prog); case BPF_OFFLOAD_MAP_ALLOC: return nfp_bpf_map_alloc(app->priv, bpf->offmap); case BPF_OFFLOAD_MAP_FREE: @@ -489,14 +475,15 @@ nfp_net_bpf_load(struct nfp_net *nn, struct bpf_prog *prog, struct netlink_ext_ack *extack) { struct nfp_prog *nfp_prog = prog->aux->offload->dev_priv; - unsigned int max_mtu, max_stack, max_prog_len; + unsigned int fw_mtu, pkt_off, max_stack, max_prog_len; dma_addr_t dma_addr; void *img; int err; - max_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32; - if (max_mtu < nn->dp.netdev->mtu) { - NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with MTU larger than HW packet split boundary"); + fw_mtu = nn_readb(nn, NFP_NET_CFG_BPF_INL_MTU) * 64 - 32; + pkt_off = min(prog->aux->max_pkt_offset, nn->dp.netdev->mtu); + if (fw_mtu < pkt_off) { + NL_SET_ERR_MSG_MOD(extack, "BPF offload not supported with potential packet access beyond HW packet split boundary"); return -EOPNOTSUPP; } @@ -600,3 +587,11 @@ int nfp_net_bpf_offload(struct nfp_net *nn, struct bpf_prog *prog, return 0; } + +const struct bpf_prog_offload_ops nfp_bpf_dev_ops = { + .insn_hook = nfp_verify_insn, + .finalize = nfp_bpf_finalize, + .prepare = nfp_bpf_verifier_prep, + .translate = nfp_bpf_translate, + .destroy = nfp_bpf_destroy, +}; diff --git a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c index 99f977bfd8cc..337bb862ec1d 100644 --- a/drivers/net/ethernet/netronome/nfp/bpf/verifier.c +++ b/drivers/net/ethernet/netronome/nfp/bpf/verifier.c @@ -623,8 +623,8 @@ nfp_bpf_check_alu(struct nfp_prog *nfp_prog, struct nfp_insn_meta *meta, return 0; } -static int -nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn_idx) +int nfp_verify_insn(struct bpf_verifier_env *env, int insn_idx, + int prev_insn_idx) { struct nfp_prog *nfp_prog = env->prog->aux->offload->dev_priv; struct nfp_insn_meta *meta = nfp_prog->verifier_meta; @@ -745,7 +745,7 @@ continue_subprog: goto continue_subprog; } -static int nfp_bpf_finalize(struct bpf_verifier_env *env) +int nfp_bpf_finalize(struct bpf_verifier_env *env) { struct bpf_subprog_info *info; struct nfp_prog *nfp_prog; @@ -788,8 +788,3 @@ static int nfp_bpf_finalize(struct bpf_verifier_env *env) return 0; } - -const struct bpf_prog_offload_ops nfp_bpf_analyzer_ops = { - .insn_hook = nfp_verify_insn, - .finalize = nfp_bpf_finalize, -}; diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 244dc261006e..8d54b36afee8 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c @@ -2,7 +2,6 @@ /* Copyright (C) 2017-2018 Netronome Systems, Inc. */ #include <linux/bitfield.h> -#include <net/geneve.h> #include <net/pkt_cls.h> #include <net/switchdev.h> #include <net/tc_act/tc_csum.h> @@ -91,21 +90,6 @@ nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action, return act_size; } -static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev, - enum nfp_flower_tun_type tun_type) -{ - if (!out_dev->rtnl_link_ops) - return false; - - if (!strcmp(out_dev->rtnl_link_ops->kind, "vxlan")) - return tun_type == NFP_FL_TUNNEL_VXLAN; - - if (!strcmp(out_dev->rtnl_link_ops->kind, "geneve")) - return tun_type == NFP_FL_TUNNEL_GENEVE; - - return false; -} - static int nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output, const struct tc_action *action, struct nfp_fl_payload *nfp_flow, @@ -151,11 +135,12 @@ nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output, /* Set action output parameters. */ output->flags = cpu_to_be16(tmp_flags); - /* Only offload if egress ports are on the same device as the - * ingress port. - */ - if (!switchdev_port_same_parent_id(in_dev, out_dev)) - return -EOPNOTSUPP; + if (nfp_netdev_is_nfp_repr(in_dev)) { + /* Confirm ingress and egress are on same device. */ + if (!switchdev_port_same_parent_id(in_dev, out_dev)) + return -EOPNOTSUPP; + } + if (!nfp_netdev_is_nfp_repr(out_dev)) return -EOPNOTSUPP; @@ -384,10 +369,21 @@ nfp_fl_set_eth(const struct tc_action *action, int idx, u32 off, return 0; } +struct ipv4_ttl_word { + __u8 ttl; + __u8 protocol; + __sum16 check; +}; + static int nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off, - struct nfp_fl_set_ip4_addrs *set_ip_addr) + struct nfp_fl_set_ip4_addrs *set_ip_addr, + struct nfp_fl_set_ip4_ttl_tos *set_ip_ttl_tos) { + struct ipv4_ttl_word *ttl_word_mask; + struct ipv4_ttl_word *ttl_word; + struct iphdr *tos_word_mask; + struct iphdr *tos_word; __be32 exact, mask; /* We are expecting tcf_pedit to return a big endian value */ @@ -402,20 +398,53 @@ nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off, set_ip_addr->ipv4_dst_mask |= mask; set_ip_addr->ipv4_dst &= ~mask; set_ip_addr->ipv4_dst |= exact & mask; + set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS; + set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >> + NFP_FL_LW_SIZ; break; case offsetof(struct iphdr, saddr): set_ip_addr->ipv4_src_mask |= mask; set_ip_addr->ipv4_src &= ~mask; set_ip_addr->ipv4_src |= exact & mask; + set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS; + set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >> + NFP_FL_LW_SIZ; + break; + case offsetof(struct iphdr, ttl): + ttl_word_mask = (struct ipv4_ttl_word *)&mask; + ttl_word = (struct ipv4_ttl_word *)&exact; + + if (ttl_word_mask->protocol || ttl_word_mask->check) + return -EOPNOTSUPP; + + set_ip_ttl_tos->ipv4_ttl_mask |= ttl_word_mask->ttl; + set_ip_ttl_tos->ipv4_ttl &= ~ttl_word_mask->ttl; + set_ip_ttl_tos->ipv4_ttl |= ttl_word->ttl & ttl_word_mask->ttl; + set_ip_ttl_tos->head.jump_id = + NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS; + set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >> + NFP_FL_LW_SIZ; + break; + case round_down(offsetof(struct iphdr, tos), 4): + tos_word_mask = (struct iphdr *)&mask; + tos_word = (struct iphdr *)&exact; + + if (tos_word_mask->version || tos_word_mask->ihl || + tos_word_mask->tot_len) + return -EOPNOTSUPP; + + set_ip_ttl_tos->ipv4_tos_mask |= tos_word_mask->tos; + set_ip_ttl_tos->ipv4_tos &= ~tos_word_mask->tos; + set_ip_ttl_tos->ipv4_tos |= tos_word->tos & tos_word_mask->tos; + set_ip_ttl_tos->head.jump_id = + NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS; + set_ip_ttl_tos->head.len_lw = sizeof(*set_ip_ttl_tos) >> + NFP_FL_LW_SIZ; break; default: return -EOPNOTSUPP; } - set_ip_addr->reserved = cpu_to_be16(0); - set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS; - set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >> NFP_FL_LW_SIZ; - return 0; } @@ -432,12 +461,57 @@ nfp_fl_set_ip6_helper(int opcode_tag, u8 word, __be32 exact, __be32 mask, ip6->head.len_lw = sizeof(*ip6) >> NFP_FL_LW_SIZ; } +struct ipv6_hop_limit_word { + __be16 payload_len; + u8 nexthdr; + u8 hop_limit; +}; + +static int +nfp_fl_set_ip6_hop_limit_flow_label(u32 off, __be32 exact, __be32 mask, + struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl) +{ + struct ipv6_hop_limit_word *fl_hl_mask; + struct ipv6_hop_limit_word *fl_hl; + + switch (off) { + case offsetof(struct ipv6hdr, payload_len): + fl_hl_mask = (struct ipv6_hop_limit_word *)&mask; + fl_hl = (struct ipv6_hop_limit_word *)&exact; + + if (fl_hl_mask->nexthdr || fl_hl_mask->payload_len) + return -EOPNOTSUPP; + + ip_hl_fl->ipv6_hop_limit_mask |= fl_hl_mask->hop_limit; + ip_hl_fl->ipv6_hop_limit &= ~fl_hl_mask->hop_limit; + ip_hl_fl->ipv6_hop_limit |= fl_hl->hop_limit & + fl_hl_mask->hop_limit; + break; + case round_down(offsetof(struct ipv6hdr, flow_lbl), 4): + if (mask & ~IPV6_FLOW_LABEL_MASK || + exact & ~IPV6_FLOW_LABEL_MASK) + return -EOPNOTSUPP; + + ip_hl_fl->ipv6_label_mask |= mask; + ip_hl_fl->ipv6_label &= ~mask; + ip_hl_fl->ipv6_label |= exact & mask; + break; + } + + ip_hl_fl->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL; + ip_hl_fl->head.len_lw = sizeof(*ip_hl_fl) >> NFP_FL_LW_SIZ; + + return 0; +} + static int nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off, struct nfp_fl_set_ipv6_addr *ip_dst, - struct nfp_fl_set_ipv6_addr *ip_src) + struct nfp_fl_set_ipv6_addr *ip_src, + struct nfp_fl_set_ipv6_tc_hl_fl *ip_hl_fl) { __be32 exact, mask; + int err = 0; u8 word; /* We are expecting tcf_pedit to return a big endian value */ @@ -448,7 +522,8 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off, return -EOPNOTSUPP; if (off < offsetof(struct ipv6hdr, saddr)) { - return -EOPNOTSUPP; + err = nfp_fl_set_ip6_hop_limit_flow_label(off, exact, mask, + ip_hl_fl); } else if (off < offsetof(struct ipv6hdr, daddr)) { word = (off - offsetof(struct ipv6hdr, saddr)) / sizeof(exact); nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, word, @@ -462,7 +537,7 @@ nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off, return -EOPNOTSUPP; } - return 0; + return err; } static int @@ -513,6 +588,8 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow, char *nfp_action, int *a_len, u32 *csum_updated) { struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src; + struct nfp_fl_set_ipv6_tc_hl_fl set_ip6_tc_hl_fl; + struct nfp_fl_set_ip4_ttl_tos set_ip_ttl_tos; struct nfp_fl_set_ip4_addrs set_ip_addr; struct nfp_fl_set_tport set_tport; struct nfp_fl_set_eth set_eth; @@ -522,6 +599,8 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow, u32 offset, cmd; u8 ip_proto = 0; + memset(&set_ip6_tc_hl_fl, 0, sizeof(set_ip6_tc_hl_fl)); + memset(&set_ip_ttl_tos, 0, sizeof(set_ip_ttl_tos)); memset(&set_ip6_dst, 0, sizeof(set_ip6_dst)); memset(&set_ip6_src, 0, sizeof(set_ip6_src)); memset(&set_ip_addr, 0, sizeof(set_ip_addr)); @@ -542,11 +621,12 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow, err = nfp_fl_set_eth(action, idx, offset, &set_eth); break; case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4: - err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr); + err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr, + &set_ip_ttl_tos); break; case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6: err = nfp_fl_set_ip6(action, idx, offset, &set_ip6_dst, - &set_ip6_src); + &set_ip6_src, &set_ip6_tc_hl_fl); break; case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP: err = nfp_fl_set_tport(action, idx, offset, &set_tport, @@ -577,6 +657,16 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow, memcpy(nfp_action, &set_eth, act_size); *a_len += act_size; } + if (set_ip_ttl_tos.head.len_lw) { + nfp_action += act_size; + act_size = sizeof(set_ip_ttl_tos); + memcpy(nfp_action, &set_ip_ttl_tos, act_size); + *a_len += act_size; + + /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */ + *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR | + nfp_fl_csum_l4_to_flag(ip_proto); + } if (set_ip_addr.head.len_lw) { nfp_action += act_size; act_size = sizeof(set_ip_addr); @@ -587,6 +677,15 @@ nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow, *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR | nfp_fl_csum_l4_to_flag(ip_proto); } + if (set_ip6_tc_hl_fl.head.len_lw) { + nfp_action += act_size; + act_size = sizeof(set_ip6_tc_hl_fl); + memcpy(nfp_action, &set_ip6_tc_hl_fl, act_size); + *a_len += act_size; + + /* Hardware will automatically fix TCP/UDP checksum. */ + *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto); + } if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) { /* TC compiles set src and dst IPv6 address as a single action, * the hardware requires this to be 2 separate actions. @@ -728,9 +827,8 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a, *a_len += sizeof(struct nfp_fl_push_vlan); } else if (is_tcf_tunnel_set(a)) { struct ip_tunnel_info *ip_tun = tcf_tunnel_info(a); - struct nfp_repr *repr = netdev_priv(netdev); - *tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a); + *tun_type = nfp_fl_get_tun_from_act_l4_port(app, a); if (*tun_type == NFP_FL_TUNNEL_NONE) return -EOPNOTSUPP; diff --git a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h index 29d673aa5277..15f41cfef9f1 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/cmsg.h +++ b/drivers/net/ethernet/netronome/nfp/flower/cmsg.h @@ -8,6 +8,7 @@ #include <linux/skbuff.h> #include <linux/types.h> #include <net/geneve.h> +#include <net/vxlan.h> #include "../nfp_app.h" #include "../nfpcore/nfp_cpp.h" @@ -65,8 +66,10 @@ #define NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL 6 #define NFP_FL_ACTION_OPCODE_SET_ETHERNET 7 #define NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS 9 +#define NFP_FL_ACTION_OPCODE_SET_IPV4_TTL_TOS 10 #define NFP_FL_ACTION_OPCODE_SET_IPV6_SRC 11 #define NFP_FL_ACTION_OPCODE_SET_IPV6_DST 12 +#define NFP_FL_ACTION_OPCODE_SET_IPV6_TC_HL_FL 13 #define NFP_FL_ACTION_OPCODE_SET_UDP 14 #define NFP_FL_ACTION_OPCODE_SET_TCP 15 #define NFP_FL_ACTION_OPCODE_PRE_LAG 16 @@ -82,6 +85,8 @@ #define NFP_FL_PUSH_VLAN_CFI BIT(12) #define NFP_FL_PUSH_VLAN_VID GENMASK(11, 0) +#define IPV6_FLOW_LABEL_MASK cpu_to_be32(0x000fffff) + /* LAG ports */ #define NFP_FL_LAG_OUT 0xC0DE0000 @@ -125,6 +130,26 @@ struct nfp_fl_set_ip4_addrs { __be32 ipv4_dst; }; +struct nfp_fl_set_ip4_ttl_tos { + struct nfp_fl_act_head head; + u8 ipv4_ttl_mask; + u8 ipv4_tos_mask; + u8 ipv4_ttl; + u8 ipv4_tos; + __be16 reserved; +}; + +struct nfp_fl_set_ipv6_tc_hl_fl { + struct nfp_fl_act_head head; + u8 ipv6_tc_mask; + u8 ipv6_hop_limit_mask; + __be16 reserved; + u8 ipv6_tc; + u8 ipv6_hop_limit; + __be32 ipv6_label_mask; + __be32 ipv6_label; +}; + struct nfp_fl_set_ipv6_addr { struct nfp_fl_act_head head; __be16 reserved; @@ -475,6 +500,32 @@ static inline int nfp_flower_cmsg_get_data_len(struct sk_buff *skb) return skb->len - NFP_FLOWER_CMSG_HLEN; } +static inline bool +nfp_fl_netdev_is_tunnel_type(struct net_device *netdev, + enum nfp_flower_tun_type tun_type) +{ + if (netif_is_vxlan(netdev)) + return tun_type == NFP_FL_TUNNEL_VXLAN; + if (netif_is_geneve(netdev)) + return tun_type == NFP_FL_TUNNEL_GENEVE; + + return false; +} + +static inline bool nfp_fl_is_netdev_to_offload(struct net_device *netdev) +{ + if (!netdev->rtnl_link_ops) + return false; + if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch")) + return true; + if (netif_is_vxlan(netdev)) + return true; + if (netif_is_geneve(netdev)) + return true; + + return false; +} + struct sk_buff * nfp_flower_cmsg_mac_repr_start(struct nfp_app *app, unsigned int num_ports); void diff --git a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c index 81dcf5b318ba..5db838f45694 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/lag_conf.c @@ -472,17 +472,25 @@ nfp_fl_lag_schedule_group_remove(struct nfp_fl_lag *lag, schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY); } -static int +static void nfp_fl_lag_schedule_group_delete(struct nfp_fl_lag *lag, struct net_device *master) { struct nfp_fl_lag_group *group; + struct nfp_flower_priv *priv; + + priv = container_of(lag, struct nfp_flower_priv, nfp_lag); + + if (!netif_is_bond_master(master)) + return; mutex_lock(&lag->lock); group = nfp_fl_lag_find_group_for_master_with_lag(lag, master); if (!group) { mutex_unlock(&lag->lock); - return -ENOENT; + nfp_warn(priv->app->cpp, "untracked bond got unregistered %s\n", + netdev_name(master)); + return; } group->to_remove = true; @@ -490,7 +498,6 @@ nfp_fl_lag_schedule_group_delete(struct nfp_fl_lag *lag, mutex_unlock(&lag->lock); schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY); - return 0; } static int @@ -575,7 +582,7 @@ nfp_fl_lag_changeupper_event(struct nfp_fl_lag *lag, return 0; } -static int +static void nfp_fl_lag_changels_event(struct nfp_fl_lag *lag, struct net_device *netdev, struct netdev_notifier_changelowerstate_info *info) { @@ -586,18 +593,18 @@ nfp_fl_lag_changels_event(struct nfp_fl_lag *lag, struct net_device *netdev, unsigned long *flags; if (!netif_is_lag_port(netdev) || !nfp_netdev_is_nfp_repr(netdev)) - return 0; + return; lag_lower_info = info->lower_state_info; if (!lag_lower_info) - return 0; + return; priv = container_of(lag, struct nfp_flower_priv, nfp_lag); repr = netdev_priv(netdev); /* Verify that the repr is associated with this app. */ if (repr->app != priv->app) - return 0; + return; repr_priv = repr->app_priv; flags = &repr_priv->lag_port_flags; @@ -617,20 +624,15 @@ nfp_fl_lag_changels_event(struct nfp_fl_lag *lag, struct net_device *netdev, mutex_unlock(&lag->lock); schedule_delayed_work(&lag->work, NFP_FL_LAG_DELAY); - return 0; } -static int -nfp_fl_lag_netdev_event(struct notifier_block *nb, unsigned long event, - void *ptr) +int nfp_flower_lag_netdev_event(struct nfp_flower_priv *priv, + struct net_device *netdev, + unsigned long event, void *ptr) { - struct net_device *netdev; - struct nfp_fl_lag *lag; + struct nfp_fl_lag *lag = &priv->nfp_lag; int err; - netdev = netdev_notifier_info_to_dev(ptr); - lag = container_of(nb, struct nfp_fl_lag, lag_nb); - switch (event) { case NETDEV_CHANGEUPPER: err = nfp_fl_lag_changeupper_event(lag, ptr); @@ -638,17 +640,11 @@ nfp_fl_lag_netdev_event(struct notifier_block *nb, unsigned long event, return NOTIFY_BAD; return NOTIFY_OK; case NETDEV_CHANGELOWERSTATE: - err = nfp_fl_lag_changels_event(lag, netdev, ptr); - if (err) - return NOTIFY_BAD; + nfp_fl_lag_changels_event(lag, netdev, ptr); return NOTIFY_OK; case NETDEV_UNREGISTER: - if (netif_is_bond_master(netdev)) { - err = nfp_fl_lag_schedule_group_delete(lag, netdev); - if (err) - return NOTIFY_BAD; - return NOTIFY_OK; - } + nfp_fl_lag_schedule_group_delete(lag, netdev); + return NOTIFY_OK; } return NOTIFY_DONE; @@ -673,8 +669,6 @@ void nfp_flower_lag_init(struct nfp_fl_lag *lag) /* 0 is a reserved batch version so increment to first valid value. */ nfp_fl_increment_version(lag); - - lag->lag_nb.notifier_call = nfp_fl_lag_netdev_event; } void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag) diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c index 3a54728d2ea6..5059110a1768 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.c +++ b/drivers/net/ethernet/netronome/nfp/flower/main.c @@ -146,23 +146,12 @@ nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr) return nfp_flower_cmsg_portmod(repr, false, repr->netdev->mtu, false); } -static int -nfp_flower_repr_netdev_init(struct nfp_app *app, struct net_device *netdev) -{ - return tc_setup_cb_egdev_register(netdev, - nfp_flower_setup_tc_egress_cb, - netdev_priv(netdev)); -} - static void nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev) { struct nfp_repr *repr = netdev_priv(netdev); kfree(repr->app_priv); - - tc_setup_cb_egdev_unregister(netdev, nfp_flower_setup_tc_egress_cb, - netdev_priv(netdev)); } static void @@ -568,6 +557,8 @@ static int nfp_flower_init(struct nfp_app *app) goto err_cleanup_metadata; } + INIT_LIST_HEAD(&app_priv->indr_block_cb_priv); + return 0; err_cleanup_metadata: @@ -661,10 +652,6 @@ static int nfp_flower_start(struct nfp_app *app) err = nfp_flower_lag_reset(&app_priv->nfp_lag); if (err) return err; - - err = register_netdevice_notifier(&app_priv->nfp_lag.lag_nb); - if (err) - return err; } return nfp_tunnel_config_start(app); @@ -672,12 +659,27 @@ static int nfp_flower_start(struct nfp_app *app) static void nfp_flower_stop(struct nfp_app *app) { + nfp_tunnel_config_stop(app); +} + +static int +nfp_flower_netdev_event(struct nfp_app *app, struct net_device *netdev, + unsigned long event, void *ptr) +{ struct nfp_flower_priv *app_priv = app->priv; + int ret; - if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) - unregister_netdevice_notifier(&app_priv->nfp_lag.lag_nb); + if (app_priv->flower_ext_feats & NFP_FL_FEATS_LAG) { + ret = nfp_flower_lag_netdev_event(app_priv, netdev, event, ptr); + if (ret & NOTIFY_STOP_MASK) + return ret; + } - nfp_tunnel_config_stop(app); + ret = nfp_flower_reg_indir_block_handler(app, netdev, event); + if (ret & NOTIFY_STOP_MASK) + return ret; + + return nfp_tunnel_mac_event_handler(app, netdev, event, ptr); } const struct nfp_app_type app_flower = { @@ -698,7 +700,6 @@ const struct nfp_app_type app_flower = { .vnic_init = nfp_flower_vnic_init, .vnic_clean = nfp_flower_vnic_clean, - .repr_init = nfp_flower_repr_netdev_init, .repr_preclean = nfp_flower_repr_netdev_preclean, .repr_clean = nfp_flower_repr_netdev_clean, @@ -708,6 +709,8 @@ const struct nfp_app_type app_flower = { .start = nfp_flower_start, .stop = nfp_flower_stop, + .netdev_event = nfp_flower_netdev_event, + .ctrl_msg_rx = nfp_flower_cmsg_rx, .sriov_enable = nfp_flower_sriov_enable, diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 90045bab95bf..b858bac47621 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h @@ -20,7 +20,6 @@ struct nfp_fl_pre_lag; struct net_device; struct nfp_app; -#define NFP_FL_STATS_CTX_DONT_CARE cpu_to_be32(0xffffffff) #define NFP_FL_STATS_ELEM_RS FIELD_SIZEOF(struct nfp_fl_stats_id, \ init_unalloc) #define NFP_FLOWER_MASK_ENTRY_RS 256 @@ -72,7 +71,6 @@ struct nfp_mtu_conf { /** * struct nfp_fl_lag - Flower APP priv data for link aggregation - * @lag_nb: Notifier to track master/slave events * @work: Work queue for writing configs to the HW * @lock: Lock to protect lag_group_list * @group_list: List of all master/slave groups offloaded @@ -85,7 +83,6 @@ struct nfp_mtu_conf { * retransmission */ struct nfp_fl_lag { - struct notifier_block lag_nb; struct delayed_work work; struct mutex lock; struct list_head group_list; @@ -126,13 +123,13 @@ struct nfp_fl_lag { * @nfp_neigh_off_lock: Lock for the neighbour address list * @nfp_mac_off_ids: IDA to manage id assignment for offloaded macs * @nfp_mac_off_count: Number of MACs in address list - * @nfp_tun_mac_nb: Notifier to monitor link state * @nfp_tun_neigh_nb: Notifier to monitor neighbour state * @reify_replies: atomically stores the number of replies received * from firmware for repr reify * @reify_wait_queue: wait queue for repr reify response counting * @mtu_conf: Configuration of repr MTU value * @nfp_lag: Link aggregation data block + * @indr_block_cb_priv: List of priv data passed to indirect block cbs */ struct nfp_flower_priv { struct nfp_app *app; @@ -160,12 +157,12 @@ struct nfp_flower_priv { spinlock_t nfp_neigh_off_lock; struct ida nfp_mac_off_ids; int nfp_mac_off_count; - struct notifier_block nfp_tun_mac_nb; struct notifier_block nfp_tun_neigh_nb; atomic_t reify_replies; wait_queue_head_t reify_wait_queue; struct nfp_mtu_conf mtu_conf; struct nfp_fl_lag nfp_lag; + struct list_head indr_block_cb_priv; }; /** @@ -209,7 +206,6 @@ struct nfp_fl_payload { char *unmasked_data; char *mask_data; char *action_data; - bool ingress_offload; }; extern const struct rhashtable_params nfp_flower_table_params; @@ -226,7 +222,8 @@ void nfp_flower_metadata_cleanup(struct nfp_app *app); int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, enum tc_setup_type type, void *type_data); -int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, +int nfp_flower_compile_flow_match(struct nfp_app *app, + struct tc_cls_flower_offload *flow, struct nfp_fl_key_ls *key_ls, struct net_device *netdev, struct nfp_fl_payload *nfp_flow, @@ -244,7 +241,7 @@ int nfp_modify_flow_metadata(struct nfp_app *app, struct nfp_fl_payload * nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie, - struct net_device *netdev, __be32 host_ctx); + struct net_device *netdev); struct nfp_fl_payload * nfp_flower_remove_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie); @@ -252,21 +249,28 @@ void nfp_flower_rx_flow_stats(struct nfp_app *app, struct sk_buff *skb); int nfp_tunnel_config_start(struct nfp_app *app); void nfp_tunnel_config_stop(struct nfp_app *app); +int nfp_tunnel_mac_event_handler(struct nfp_app *app, + struct net_device *netdev, + unsigned long event, void *ptr); void nfp_tunnel_write_macs(struct nfp_app *app); void nfp_tunnel_del_ipv4_off(struct nfp_app *app, __be32 ipv4); void nfp_tunnel_add_ipv4_off(struct nfp_app *app, __be32 ipv4); void nfp_tunnel_request_route(struct nfp_app *app, struct sk_buff *skb); void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb); -int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data, - void *cb_priv); void nfp_flower_lag_init(struct nfp_fl_lag *lag); void nfp_flower_lag_cleanup(struct nfp_fl_lag *lag); int nfp_flower_lag_reset(struct nfp_fl_lag *lag); +int nfp_flower_lag_netdev_event(struct nfp_flower_priv *priv, + struct net_device *netdev, + unsigned long event, void *ptr); bool nfp_flower_lag_unprocessed_msg(struct nfp_app *app, struct sk_buff *skb); int nfp_flower_lag_populate_pre_action(struct nfp_app *app, struct net_device *master, struct nfp_fl_pre_lag *pre_act); int nfp_flower_lag_get_output_id(struct nfp_app *app, struct net_device *master); +int nfp_flower_reg_indir_block_handler(struct nfp_app *app, + struct net_device *netdev, + unsigned long event); #endif diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index e54fb6034326..cdf75595f627 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c @@ -52,10 +52,13 @@ nfp_flower_compile_port(struct nfp_flower_in_port *frame, u32 cmsg_port, return 0; } - if (tun_type) + if (tun_type) { frame->in_port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type); - else + } else { + if (!cmsg_port) + return -EOPNOTSUPP; frame->in_port = cpu_to_be32(cmsg_port); + } return 0; } @@ -289,17 +292,21 @@ nfp_flower_compile_ipv4_udp_tun(struct nfp_flower_ipv4_udp_tun *frame, } } -int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, +int nfp_flower_compile_flow_match(struct nfp_app *app, + struct tc_cls_flower_offload *flow, struct nfp_fl_key_ls *key_ls, struct net_device *netdev, struct nfp_fl_payload *nfp_flow, enum nfp_flower_tun_type tun_type) { - struct nfp_repr *netdev_repr; + u32 cmsg_port = 0; int err; u8 *ext; u8 *msk; + if (nfp_netdev_is_nfp_repr(netdev)) + cmsg_port = nfp_repr_get_port_id(netdev); + memset(nfp_flow->unmasked_data, 0, key_ls->key_size); memset(nfp_flow->mask_data, 0, key_ls->key_size); @@ -327,15 +334,13 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, /* Populate Exact Port data. */ err = nfp_flower_compile_port((struct nfp_flower_in_port *)ext, - nfp_repr_get_port_id(netdev), - false, tun_type); + cmsg_port, false, tun_type); if (err) return err; /* Populate Mask Port Data. */ err = nfp_flower_compile_port((struct nfp_flower_in_port *)msk, - nfp_repr_get_port_id(netdev), - true, tun_type); + cmsg_port, true, tun_type); if (err) return err; @@ -399,16 +404,13 @@ int nfp_flower_compile_flow_match(struct tc_cls_flower_offload *flow, msk += sizeof(struct nfp_flower_ipv4_udp_tun); /* Configure tunnel end point MAC. */ - if (nfp_netdev_is_nfp_repr(netdev)) { - netdev_repr = netdev_priv(netdev); - nfp_tunnel_write_macs(netdev_repr->app); - - /* Store the tunnel destination in the rule data. - * This must be present and be an exact match. - */ - nfp_flow->nfp_tun_ipv4_addr = tun_dst; - nfp_tunnel_add_ipv4_off(netdev_repr->app, tun_dst); - } + nfp_tunnel_write_macs(app); + + /* Store the tunnel destination in the rule data. + * This must be present and be an exact match. + */ + nfp_flow->nfp_tun_ipv4_addr = tun_dst; + nfp_tunnel_add_ipv4_off(app, tun_dst); if (key_ls->key_layer_two & NFP_FLOWER_LAYER2_GENEVE_OP) { err = nfp_flower_compile_geneve_opt(ext, flow, false); diff --git a/drivers/net/ethernet/netronome/nfp/flower/metadata.c b/drivers/net/ethernet/netronome/nfp/flower/metadata.c index 48729bf171e0..573a4400a26c 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/metadata.c +++ b/drivers/net/ethernet/netronome/nfp/flower/metadata.c @@ -21,7 +21,6 @@ struct nfp_mask_id_table { struct nfp_fl_flow_table_cmp_arg { struct net_device *netdev; unsigned long cookie; - __be32 host_ctx; }; static int nfp_release_stats_entry(struct nfp_app *app, u32 stats_context_id) @@ -76,14 +75,13 @@ static int nfp_get_stats_entry(struct nfp_app *app, u32 *stats_context_id) /* Must be called with either RTNL or rcu_read_lock */ struct nfp_fl_payload * nfp_flower_search_fl_table(struct nfp_app *app, unsigned long tc_flower_cookie, - struct net_device *netdev, __be32 host_ctx) + struct net_device *netdev) { struct nfp_fl_flow_table_cmp_arg flower_cmp_arg; struct nfp_flower_priv *priv = app->priv; flower_cmp_arg.netdev = netdev; flower_cmp_arg.cookie = tc_flower_cookie; - flower_cmp_arg.host_ctx = host_ctx; return rhashtable_lookup_fast(&priv->flow_table, &flower_cmp_arg, nfp_flower_table_params); @@ -287,6 +285,7 @@ int nfp_compile_flow_metadata(struct nfp_app *app, nfp_flow->meta.host_ctx_id = cpu_to_be32(stats_cxt); nfp_flow->meta.host_cookie = cpu_to_be64(flow->cookie); + nfp_flow->ingress_dev = netdev; new_mask_id = 0; if (!nfp_check_mask_add(app, nfp_flow->mask_data, @@ -306,8 +305,7 @@ int nfp_compile_flow_metadata(struct nfp_app *app, priv->stats[stats_cxt].bytes = 0; priv->stats[stats_cxt].used = jiffies; - check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev, - NFP_FL_STATS_CTX_DONT_CARE); + check_entry = nfp_flower_search_fl_table(app, flow->cookie, netdev); if (check_entry) { if (nfp_release_stats_entry(app, stats_cxt)) return -EINVAL; @@ -352,9 +350,7 @@ static int nfp_fl_obj_cmpfn(struct rhashtable_compare_arg *arg, const struct nfp_fl_flow_table_cmp_arg *cmp_arg = arg->key; const struct nfp_fl_payload *flow_entry = obj; - if ((!cmp_arg->netdev || flow_entry->ingress_dev == cmp_arg->netdev) && - (cmp_arg->host_ctx == NFP_FL_STATS_CTX_DONT_CARE || - flow_entry->meta.host_ctx_id == cmp_arg->host_ctx)) + if (flow_entry->ingress_dev == cmp_arg->netdev) return flow_entry->tc_flower_cookie != cmp_arg->cookie; return 1; diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 67e576fe7fc0..2cdbf29ecbe7 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c @@ -56,11 +56,10 @@ BIT(FLOW_DISSECTOR_KEY_ENC_PORTS)) static int -nfp_flower_xmit_flow(struct net_device *netdev, - struct nfp_fl_payload *nfp_flow, u8 mtype) +nfp_flower_xmit_flow(struct nfp_app *app, struct nfp_fl_payload *nfp_flow, + u8 mtype) { u32 meta_len, key_len, mask_len, act_len, tot_len; - struct nfp_repr *priv = netdev_priv(netdev); struct sk_buff *skb; unsigned char *msg; @@ -78,7 +77,7 @@ nfp_flower_xmit_flow(struct net_device *netdev, nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ; nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ; - skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype, GFP_KERNEL); + skb = nfp_flower_cmsg_alloc(app, tot_len, mtype, GFP_KERNEL); if (!skb) return -ENOMEM; @@ -96,7 +95,7 @@ nfp_flower_xmit_flow(struct net_device *netdev, nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ; nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ; - nfp_ctrl_tx(priv->app->ctrl, skb); + nfp_ctrl_tx(app->ctrl, skb); return 0; } @@ -129,9 +128,9 @@ nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts, static int nfp_flower_calculate_key_layers(struct nfp_app *app, + struct net_device *netdev, struct nfp_fl_key_ls *ret_key_ls, struct tc_cls_flower_offload *flow, - bool egress, enum nfp_flower_tun_type *tun_type) { struct flow_dissector_key_basic *mask_basic = NULL; @@ -187,8 +186,6 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, skb_flow_dissector_target(flow->dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL, flow->key); - if (!egress) - return -EOPNOTSUPP; if (mask_enc_ctl->addr_type != 0xffff || enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS) @@ -251,9 +248,10 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, default: return -EOPNOTSUPP; } - } else if (egress) { - /* Reject non tunnel matches offloaded to egress repr. */ - return -EOPNOTSUPP; + + /* Ensure the ingress netdev matches the expected tun type. */ + if (!nfp_fl_netdev_is_tunnel_type(netdev, *tun_type)) + return -EOPNOTSUPP; } if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) { @@ -390,7 +388,7 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, } static struct nfp_fl_payload * -nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress) +nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer) { struct nfp_fl_payload *flow_pay; @@ -414,7 +412,6 @@ nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress) flow_pay->nfp_tun_ipv4_addr = 0; flow_pay->meta.flags = 0; - flow_pay->ingress_offload = !egress; return flow_pay; @@ -432,7 +429,6 @@ err_free_flow: * @app: Pointer to the APP handle * @netdev: netdev structure. * @flow: TC flower classifier offload structure. - * @egress: NFP netdev is the egress. * * Adds a new flow to the repeated hash structure and action payload. * @@ -440,46 +436,35 @@ err_free_flow: */ static int nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, - struct tc_cls_flower_offload *flow, bool egress) + struct tc_cls_flower_offload *flow) { enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE; - struct nfp_port *port = nfp_port_from_netdev(netdev); struct nfp_flower_priv *priv = app->priv; struct nfp_fl_payload *flow_pay; struct nfp_fl_key_ls *key_layer; - struct net_device *ingr_dev; + struct nfp_port *port = NULL; int err; - ingr_dev = egress ? NULL : netdev; - flow_pay = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev, - NFP_FL_STATS_CTX_DONT_CARE); - if (flow_pay) { - /* Ignore as duplicate if it has been added by different cb. */ - if (flow_pay->ingress_offload && egress) - return 0; - else - return -EOPNOTSUPP; - } + if (nfp_netdev_is_nfp_repr(netdev)) + port = nfp_port_from_netdev(netdev); key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL); if (!key_layer) return -ENOMEM; - err = nfp_flower_calculate_key_layers(app, key_layer, flow, egress, + err = nfp_flower_calculate_key_layers(app, netdev, key_layer, flow, &tun_type); if (err) goto err_free_key_ls; - flow_pay = nfp_flower_allocate_new(key_layer, egress); + flow_pay = nfp_flower_allocate_new(key_layer); if (!flow_pay) { err = -ENOMEM; goto err_free_key_ls; } - flow_pay->ingress_dev = egress ? NULL : netdev; - - err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay, - tun_type); + err = nfp_flower_compile_flow_match(app, flow, key_layer, netdev, + flow_pay, tun_type); if (err) goto err_destroy_flow; @@ -487,8 +472,7 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, if (err) goto err_destroy_flow; - err = nfp_compile_flow_metadata(app, flow, flow_pay, - flow_pay->ingress_dev); + err = nfp_compile_flow_metadata(app, flow, flow_pay, netdev); if (err) goto err_destroy_flow; @@ -498,12 +482,13 @@ nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev, if (err) goto err_release_metadata; - err = nfp_flower_xmit_flow(netdev, flow_pay, + err = nfp_flower_xmit_flow(app, flow_pay, NFP_FLOWER_CMSG_TYPE_FLOW_ADD); if (err) goto err_remove_rhash; - port->tc_offload_cnt++; + if (port) + port->tc_offload_cnt++; /* Deallocate flow payload when flower rule has been destroyed. */ kfree(key_layer); @@ -531,7 +516,6 @@ err_free_key_ls: * @app: Pointer to the APP handle * @netdev: netdev structure. * @flow: TC flower classifier offload structure - * @egress: Netdev is the egress dev. * * Removes a flow from the repeated hash structure and clears the * action payload. @@ -540,19 +524,19 @@ err_free_key_ls: */ static int nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, - struct tc_cls_flower_offload *flow, bool egress) + struct tc_cls_flower_offload *flow) { - struct nfp_port *port = nfp_port_from_netdev(netdev); struct nfp_flower_priv *priv = app->priv; struct nfp_fl_payload *nfp_flow; - struct net_device *ingr_dev; + struct nfp_port *port = NULL; int err; - ingr_dev = egress ? NULL : netdev; - nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev, - NFP_FL_STATS_CTX_DONT_CARE); + if (nfp_netdev_is_nfp_repr(netdev)) + port = nfp_port_from_netdev(netdev); + + nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev); if (!nfp_flow) - return egress ? 0 : -ENOENT; + return -ENOENT; err = nfp_modify_flow_metadata(app, nfp_flow); if (err) @@ -561,13 +545,14 @@ nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev, if (nfp_flow->nfp_tun_ipv4_addr) nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr); - err = nfp_flower_xmit_flow(netdev, nfp_flow, + err = nfp_flower_xmit_flow(app, nfp_flow, NFP_FLOWER_CMSG_TYPE_FLOW_DEL); if (err) goto err_free_flow; err_free_flow: - port->tc_offload_cnt--; + if (port) + port->tc_offload_cnt--; kfree(nfp_flow->action_data); kfree(nfp_flow->mask_data); kfree(nfp_flow->unmasked_data); @@ -583,7 +568,6 @@ err_free_flow: * @app: Pointer to the APP handle * @netdev: Netdev structure. * @flow: TC flower classifier offload structure - * @egress: Netdev is the egress dev. * * Populates a flow statistics structure which which corresponds to a * specific flow. @@ -592,22 +576,16 @@ err_free_flow: */ static int nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev, - struct tc_cls_flower_offload *flow, bool egress) + struct tc_cls_flower_offload *flow) { struct nfp_flower_priv *priv = app->priv; struct nfp_fl_payload *nfp_flow; - struct net_device *ingr_dev; u32 ctx_id; - ingr_dev = egress ? NULL : netdev; - nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev, - NFP_FL_STATS_CTX_DONT_CARE); + nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, netdev); if (!nfp_flow) return -EINVAL; - if (nfp_flow->ingress_offload && egress) - return 0; - ctx_id = be32_to_cpu(nfp_flow->meta.host_ctx_id); spin_lock_bh(&priv->stats_lock); @@ -624,35 +602,18 @@ nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev, static int nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev, - struct tc_cls_flower_offload *flower, bool egress) + struct tc_cls_flower_offload *flower) { if (!eth_proto_is_802_3(flower->common.protocol)) return -EOPNOTSUPP; switch (flower->command) { case TC_CLSFLOWER_REPLACE: - return nfp_flower_add_offload(app, netdev, flower, egress); + return nfp_flower_add_offload(app, netdev, flower); case TC_CLSFLOWER_DESTROY: - return nfp_flower_del_offload(app, netdev, flower, egress); + return nfp_flower_del_offload(app, netdev, flower); case TC_CLSFLOWER_STATS: - return nfp_flower_get_stats(app, netdev, flower, egress); - default: - return -EOPNOTSUPP; - } -} - -int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data, - void *cb_priv) -{ - struct nfp_repr *repr = cb_priv; - - if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data)) - return -EOPNOTSUPP; - - switch (type) { - case TC_SETUP_CLSFLOWER: - return nfp_flower_repr_offload(repr->app, repr->netdev, - type_data, true); + return nfp_flower_get_stats(app, netdev, flower); default: return -EOPNOTSUPP; } @@ -669,7 +630,7 @@ static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type, switch (type) { case TC_SETUP_CLSFLOWER: return nfp_flower_repr_offload(repr->app, repr->netdev, - type_data, false); + type_data); default: return -EOPNOTSUPP; } @@ -708,3 +669,130 @@ int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev, return -EOPNOTSUPP; } } + +struct nfp_flower_indr_block_cb_priv { + struct net_device *netdev; + struct nfp_app *app; + struct list_head list; +}; + +static struct nfp_flower_indr_block_cb_priv * +nfp_flower_indr_block_cb_priv_lookup(struct nfp_app *app, + struct net_device *netdev) +{ + struct nfp_flower_indr_block_cb_priv *cb_priv; + struct nfp_flower_priv *priv = app->priv; + + /* All callback list access should be protected by RTNL. */ + ASSERT_RTNL(); + + list_for_each_entry(cb_priv, &priv->indr_block_cb_priv, list) + if (cb_priv->netdev == netdev) + return cb_priv; + + return NULL; +} + +static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type, + void *type_data, void *cb_priv) +{ + struct nfp_flower_indr_block_cb_priv *priv = cb_priv; + struct tc_cls_flower_offload *flower = type_data; + + if (flower->common.chain_index) + return -EOPNOTSUPP; + + switch (type) { + case TC_SETUP_CLSFLOWER: + return nfp_flower_repr_offload(priv->app, priv->netdev, + type_data); + default: + return -EOPNOTSUPP; + } +} + +static int +nfp_flower_setup_indr_tc_block(struct net_device *netdev, struct nfp_app *app, + struct tc_block_offload *f) +{ + struct nfp_flower_indr_block_cb_priv *cb_priv; + struct nfp_flower_priv *priv = app->priv; + int err; + + if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS) + return -EOPNOTSUPP; + + switch (f->command) { + case TC_BLOCK_BIND: + cb_priv = kmalloc(sizeof(*cb_priv), GFP_KERNEL); + if (!cb_priv) + return -ENOMEM; + + cb_priv->netdev = netdev; + cb_priv->app = app; + list_add(&cb_priv->list, &priv->indr_block_cb_priv); + + err = tcf_block_cb_register(f->block, + nfp_flower_setup_indr_block_cb, + cb_priv, cb_priv, f->extack); + if (err) { + list_del(&cb_priv->list); + kfree(cb_priv); + } + + return err; + case TC_BLOCK_UNBIND: + cb_priv = nfp_flower_indr_block_cb_priv_lookup(app, netdev); + if (!cb_priv) + return -ENOENT; + + tcf_block_cb_unregister(f->block, + nfp_flower_setup_indr_block_cb, + cb_priv); + list_del(&cb_priv->list); + kfree(cb_priv); + + return 0; + default: + return -EOPNOTSUPP; + } + return 0; +} + +static int +nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv, + enum tc_setup_type type, void *type_data) +{ + switch (type) { + case TC_SETUP_BLOCK: + return nfp_flower_setup_indr_tc_block(netdev, cb_priv, + type_data); + default: + return -EOPNOTSUPP; + } +} + +int nfp_flower_reg_indir_block_handler(struct nfp_app *app, + struct net_device *netdev, + unsigned long event) +{ + int err; + + if (!nfp_fl_is_netdev_to_offload(netdev)) + return NOTIFY_OK; + + if (event == NETDEV_REGISTER) { + err = __tc_indr_block_cb_register(netdev, app, + nfp_flower_indr_setup_tc_cb, + app); + if (err) + nfp_flower_cmsg_warn(app, + "Indirect block reg failed - %s\n", + netdev->name); + } else if (event == NETDEV_UNREGISTER) { + __tc_indr_block_cb_unregister(netdev, + nfp_flower_indr_setup_tc_cb, app); + } + + return NOTIFY_OK; +} diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c index 8e5bec04d1f9..2d9f26a725c2 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c +++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c @@ -4,7 +4,6 @@ #include <linux/etherdevice.h> #include <linux/inetdevice.h> #include <net/netevent.h> -#include <net/vxlan.h> #include <linux/idr.h> #include <net/dst_metadata.h> #include <net/arp.h> @@ -182,18 +181,6 @@ void nfp_tunnel_keep_alive(struct nfp_app *app, struct sk_buff *skb) } } -static bool nfp_tun_is_netdev_to_offload(struct net_device *netdev) -{ - if (!netdev->rtnl_link_ops) - return false; - if (!strcmp(netdev->rtnl_link_ops->kind, "openvswitch")) - return true; - if (netif_is_vxlan(netdev)) - return true; - - return false; -} - static int nfp_flower_xmit_tun_conf(struct nfp_app *app, u8 mtype, u16 plen, void *pdata, gfp_t flag) @@ -615,7 +602,7 @@ static void nfp_tun_add_to_mac_offload_list(struct net_device *netdev, if (nfp_netdev_is_nfp_repr(netdev)) port = nfp_repr_get_port_id(netdev); - else if (!nfp_tun_is_netdev_to_offload(netdev)) + else if (!nfp_fl_is_netdev_to_offload(netdev)) return; entry = kmalloc(sizeof(*entry), GFP_KERNEL); @@ -652,29 +639,16 @@ static void nfp_tun_add_to_mac_offload_list(struct net_device *netdev, mutex_unlock(&priv->nfp_mac_off_lock); } -static int nfp_tun_mac_event_handler(struct notifier_block *nb, - unsigned long event, void *ptr) +int nfp_tunnel_mac_event_handler(struct nfp_app *app, + struct net_device *netdev, + unsigned long event, void *ptr) { - struct nfp_flower_priv *app_priv; - struct net_device *netdev; - struct nfp_app *app; - if (event == NETDEV_DOWN || event == NETDEV_UNREGISTER) { - app_priv = container_of(nb, struct nfp_flower_priv, - nfp_tun_mac_nb); - app = app_priv->app; - netdev = netdev_notifier_info_to_dev(ptr); - /* If non-nfp netdev then free its offload index. */ - if (nfp_tun_is_netdev_to_offload(netdev)) + if (nfp_fl_is_netdev_to_offload(netdev)) nfp_tun_del_mac_idx(app, netdev->ifindex); } else if (event == NETDEV_UP || event == NETDEV_CHANGEADDR || event == NETDEV_REGISTER) { - app_priv = container_of(nb, struct nfp_flower_priv, - nfp_tun_mac_nb); - app = app_priv->app; - netdev = netdev_notifier_info_to_dev(ptr); - nfp_tun_add_to_mac_offload_list(netdev, app); /* Force a list write to keep NFP up to date. */ @@ -686,14 +660,11 @@ static int nfp_tun_mac_event_handler(struct notifier_block *nb, int nfp_tunnel_config_start(struct nfp_app *app) { struct nfp_flower_priv *priv = app->priv; - struct net_device *netdev; - int err; /* Initialise priv data for MAC offloading. */ priv->nfp_mac_off_count = 0; mutex_init(&priv->nfp_mac_off_lock); INIT_LIST_HEAD(&priv->nfp_mac_off_list); - priv->nfp_tun_mac_nb.notifier_call = nfp_tun_mac_event_handler; mutex_init(&priv->nfp_mac_index_lock); INIT_LIST_HEAD(&priv->nfp_mac_index_list); ida_init(&priv->nfp_mac_off_ids); @@ -707,27 +678,7 @@ int nfp_tunnel_config_start(struct nfp_app *app) INIT_LIST_HEAD(&priv->nfp_neigh_off_list); priv->nfp_tun_neigh_nb.notifier_call = nfp_tun_neigh_event_handler; - err = register_netdevice_notifier(&priv->nfp_tun_mac_nb); - if (err) - goto err_free_mac_ida; - - err = register_netevent_notifier(&priv->nfp_tun_neigh_nb); - if (err) - goto err_unreg_mac_nb; - - /* Parse netdevs already registered for MACs that need offloaded. */ - rtnl_lock(); - for_each_netdev(&init_net, netdev) - nfp_tun_add_to_mac_offload_list(netdev, app); - rtnl_unlock(); - - return 0; - -err_unreg_mac_nb: - unregister_netdevice_notifier(&priv->nfp_tun_mac_nb); -err_free_mac_ida: - ida_destroy(&priv->nfp_mac_off_ids); - return err; + return register_netevent_notifier(&priv->nfp_tun_neigh_nb); } void nfp_tunnel_config_stop(struct nfp_app *app) @@ -739,7 +690,6 @@ void nfp_tunnel_config_stop(struct nfp_app *app) struct nfp_ipv4_addr_entry *ip_entry; struct list_head *ptr, *storage; - unregister_netdevice_notifier(&priv->nfp_tun_mac_nb); unregister_netevent_notifier(&priv->nfp_tun_neigh_nb); /* Free any memory that may be occupied by MAC list. */ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.c b/drivers/net/ethernet/netronome/nfp/nfp_app.c index 68a0991aac22..3a973282b2bb 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.c @@ -131,11 +131,100 @@ nfp_app_reprs_set(struct nfp_app *app, enum nfp_repr_type type, struct nfp_reprs *old; old = nfp_reprs_get_locked(app, type); + rtnl_lock(); rcu_assign_pointer(app->reprs[type], reprs); + rtnl_unlock(); return old; } +static void +nfp_app_netdev_feat_change(struct nfp_app *app, struct net_device *netdev) +{ + struct nfp_net *nn; + unsigned int type; + + if (!nfp_netdev_is_nfp_net(netdev)) + return; + nn = netdev_priv(netdev); + if (nn->app != app) + return; + + for (type = 0; type < __NFP_REPR_TYPE_MAX; type++) { + struct nfp_reprs *reprs; + unsigned int i; + + reprs = rtnl_dereference(app->reprs[type]); + if (!reprs) + continue; + + for (i = 0; i < reprs->num_reprs; i++) { + struct net_device *repr; + + repr = rtnl_dereference(reprs->reprs[i]); + if (!repr) + continue; + + nfp_repr_transfer_features(repr, netdev); + } + } +} + +static int +nfp_app_netdev_event(struct notifier_block *nb, unsigned long event, void *ptr) +{ + struct net_device *netdev; + struct nfp_app *app; + + netdev = netdev_notifier_info_to_dev(ptr); + app = container_of(nb, struct nfp_app, netdev_nb); + + /* Handle events common code is interested in */ + switch (event) { + case NETDEV_FEAT_CHANGE: + nfp_app_netdev_feat_change(app, netdev); + break; + } + + /* Call offload specific handlers */ + if (app->type->netdev_event) + return app->type->netdev_event(app, netdev, event, ptr); + return NOTIFY_DONE; +} + +int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl) +{ + int err; + + app->ctrl = ctrl; + + if (app->type->start) { + err = app->type->start(app); + if (err) + return err; + } + + app->netdev_nb.notifier_call = nfp_app_netdev_event; + err = register_netdevice_notifier(&app->netdev_nb); + if (err) + goto err_app_stop; + + return 0; + +err_app_stop: + if (app->type->stop) + app->type->stop(app); + return err; +} + +void nfp_app_stop(struct nfp_app *app) +{ + unregister_netdevice_notifier(&app->netdev_nb); + + if (app->type->stop) + app->type->stop(app); +} + struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id) { struct nfp_app *app; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_app.h b/drivers/net/ethernet/netronome/nfp/nfp_app.h index 4d6ecf99b1cc..d578d856a009 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_app.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_app.h @@ -69,6 +69,7 @@ extern const struct nfp_app_type app_abm; * @port_get_stats_strings: get strings for extra statistics * @start: start application logic * @stop: stop application logic + * @netdev_event: Netdevice notifier event * @ctrl_msg_rx: control message handler * @ctrl_msg_rx_raw: handler for control messages from data queues * @setup_tc: setup TC ndo @@ -122,6 +123,9 @@ struct nfp_app_type { int (*start)(struct nfp_app *app); void (*stop)(struct nfp_app *app); + int (*netdev_event)(struct nfp_app *app, struct net_device *netdev, + unsigned long event, void *ptr); + void (*ctrl_msg_rx)(struct nfp_app *app, struct sk_buff *skb); void (*ctrl_msg_rx_raw)(struct nfp_app *app, const void *data, unsigned int len); @@ -151,6 +155,7 @@ struct nfp_app_type { * @reprs: array of pointers to representors * @type: pointer to const application ops and info * @ctrl_mtu: MTU to set on the control vNIC (set in .init()) + * @netdev_nb: Netdevice notifier block * @priv: app-specific priv data */ struct nfp_app { @@ -163,6 +168,9 @@ struct nfp_app { const struct nfp_app_type *type; unsigned int ctrl_mtu; + + struct notifier_block netdev_nb; + void *priv; }; @@ -264,21 +272,6 @@ nfp_app_repr_change_mtu(struct nfp_app *app, struct net_device *netdev, return app->type->repr_change_mtu(app, netdev, new_mtu); } -static inline int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl) -{ - app->ctrl = ctrl; - if (!app->type->start) - return 0; - return app->type->start(app); -} - -static inline void nfp_app_stop(struct nfp_app *app) -{ - if (!app->type->stop) - return; - app->type->stop(app); -} - static inline const char *nfp_app_name(struct nfp_app *app) { if (!app) @@ -430,6 +423,8 @@ nfp_app_ctrl_msg_alloc(struct nfp_app *app, unsigned int size, gfp_t priority); struct nfp_app *nfp_app_alloc(struct nfp_pf *pf, enum nfp_app_id id); void nfp_app_free(struct nfp_app *app); +int nfp_app_start(struct nfp_app *app, struct nfp_net *ctrl); +void nfp_app_stop(struct nfp_app *app); /* Callbacks shared between apps */ diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h index 6f0c37d09256..be37c2d6151c 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h @@ -158,6 +158,7 @@ struct nfp_net_tx_desc { __le16 data_len; /* Length of frame + meta data */ } __packed; __le32 vals[4]; + __le64 vals8[2]; }; }; @@ -543,6 +544,7 @@ struct nfp_net_dp { * @reconfig_timer_active: Timer for reading reconfiguration results is pending * @reconfig_sync_present: Some thread is performing synchronous reconfig * @reconfig_timer: Timer for async reading of reconfig results + * @reconfig_in_progress_update: Update FW is processing now (debug only) * @link_up: Is the link up? * @link_status_lock: Protects @link_* and ensures atomicity with BAR reading * @rx_coalesce_usecs: RX interrupt moderation usecs delay parameter @@ -611,6 +613,7 @@ struct nfp_net { bool reconfig_timer_active; bool reconfig_sync_present; struct timer_list reconfig_timer; + u32 reconfig_in_progress_update; u32 rx_coalesce_usecs; u32 rx_coalesce_max_frames; @@ -851,7 +854,7 @@ void nfp_net_get_fw_version(struct nfp_net_fw_version *fw_ver, void __iomem *ctrl_bar); struct nfp_net * -nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev, +nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev, unsigned int max_tx_rings, unsigned int max_rx_rings); void nfp_net_free(struct nfp_net *nn); @@ -868,6 +871,7 @@ unsigned int nfp_net_rss_key_sz(struct nfp_net *nn); void nfp_net_rss_write_itbl(struct nfp_net *nn); void nfp_net_rss_write_key(struct nfp_net *nn); void nfp_net_coalesce_write_cfg(struct nfp_net *nn); +int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd); unsigned int nfp_net_irqs_alloc(struct pci_dev *pdev, struct msix_entry *irq_entries, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c index 6bddfcfdec34..e97636d2e6ee 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c @@ -101,6 +101,7 @@ static void nfp_net_reconfig_start(struct nfp_net *nn, u32 update) /* ensure update is written before pinging HW */ nn_pci_flush(nn); nfp_qcp_wr_ptr_add(nn->qcp_cfg, 1); + nn->reconfig_in_progress_update = update; } /* Pass 0 as update to run posted reconfigs. */ @@ -123,10 +124,14 @@ static bool nfp_net_reconfig_check_done(struct nfp_net *nn, bool last_check) if (reg == 0) return true; if (reg & NFP_NET_CFG_UPDATE_ERR) { - nn_err(nn, "Reconfig error: 0x%08x\n", reg); + nn_err(nn, "Reconfig error (status: 0x%08x update: 0x%08x ctrl: 0x%08x)\n", + reg, nn->reconfig_in_progress_update, + nn_readl(nn, NFP_NET_CFG_CTRL)); return true; } else if (last_check) { - nn_err(nn, "Reconfig timeout: 0x%08x\n", reg); + nn_err(nn, "Reconfig timeout (status: 0x%08x update: 0x%08x ctrl: 0x%08x)\n", + reg, nn->reconfig_in_progress_update, + nn_readl(nn, NFP_NET_CFG_CTRL)); return true; } @@ -279,7 +284,7 @@ int nfp_net_reconfig(struct nfp_net *nn, u32 update) * * Return: Negative errno on error, 0 on success */ -static int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd) +int nfp_net_reconfig_mbox(struct nfp_net *nn, u32 mbox_cmd) { u32 mbox = nn->tlv_caps.mbox_off; int ret; @@ -647,27 +652,29 @@ static void nfp_net_tx_ring_stop(struct netdev_queue *nd_q, * @txbuf: Pointer to driver soft TX descriptor * @txd: Pointer to HW TX descriptor * @skb: Pointer to SKB + * @md_bytes: Prepend length * * Set up Tx descriptor for LSO, do nothing for non-LSO skbs. * Return error on packet header greater than maximum supported LSO header size. */ static void nfp_net_tx_tso(struct nfp_net_r_vector *r_vec, struct nfp_net_tx_buf *txbuf, - struct nfp_net_tx_desc *txd, struct sk_buff *skb) + struct nfp_net_tx_desc *txd, struct sk_buff *skb, + u32 md_bytes) { - u32 hdrlen; + u32 l3_offset, l4_offset, hdrlen; u16 mss; if (!skb_is_gso(skb)) return; if (!skb->encapsulation) { - txd->l3_offset = skb_network_offset(skb); - txd->l4_offset = skb_transport_offset(skb); + l3_offset = skb_network_offset(skb); + l4_offset = skb_transport_offset(skb); hdrlen = skb_transport_offset(skb) + tcp_hdrlen(skb); } else { - txd->l3_offset = skb_inner_network_offset(skb); - txd->l4_offset = skb_inner_transport_offset(skb); + l3_offset = skb_inner_network_offset(skb); + l4_offset = skb_inner_transport_offset(skb); hdrlen = skb_inner_transport_header(skb) - skb->data + inner_tcp_hdrlen(skb); } @@ -676,7 +683,9 @@ static void nfp_net_tx_tso(struct nfp_net_r_vector *r_vec, txbuf->real_len += hdrlen * (txbuf->pkt_cnt - 1); mss = skb_shinfo(skb)->gso_size & PCIE_DESC_TX_MSS_MASK; - txd->lso_hdrlen = hdrlen; + txd->l3_offset = l3_offset - md_bytes; + txd->l4_offset = l4_offset - md_bytes; + txd->lso_hdrlen = hdrlen - md_bytes; txd->mss = cpu_to_le16(mss); txd->flags |= PCIE_DESC_TX_LSO; @@ -786,11 +795,11 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) { struct nfp_net *nn = netdev_priv(netdev); const struct skb_frag_struct *frag; - struct nfp_net_tx_desc *txd, txdg; int f, nr_frags, wr_idx, md_bytes; struct nfp_net_tx_ring *tx_ring; struct nfp_net_r_vector *r_vec; struct nfp_net_tx_buf *txbuf; + struct nfp_net_tx_desc *txd; struct netdev_queue *nd_q; struct nfp_net_dp *dp; dma_addr_t dma_addr; @@ -801,13 +810,13 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) qidx = skb_get_queue_mapping(skb); tx_ring = &dp->tx_rings[qidx]; r_vec = tx_ring->r_vec; - nd_q = netdev_get_tx_queue(dp->netdev, qidx); nr_frags = skb_shinfo(skb)->nr_frags; if (unlikely(nfp_net_tx_full(tx_ring, nr_frags + 1))) { nn_dp_warn(dp, "TX ring %d busy. wrp=%u rdp=%u\n", qidx, tx_ring->wr_p, tx_ring->rd_p); + nd_q = netdev_get_tx_queue(dp->netdev, qidx); netif_tx_stop_queue(nd_q); nfp_net_tx_xmit_more_flush(tx_ring); u64_stats_update_begin(&r_vec->tx_sync); @@ -851,7 +860,7 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) txd->lso_hdrlen = 0; /* Do not reorder - tso may adjust pkt cnt, vlan may override fields */ - nfp_net_tx_tso(r_vec, txbuf, txd, skb); + nfp_net_tx_tso(r_vec, txbuf, txd, skb, md_bytes); nfp_net_tx_csum(dp, r_vec, txbuf, txd, skb); if (skb_vlan_tag_present(skb) && dp->ctrl & NFP_NET_CFG_CTRL_TXVLAN) { txd->flags |= PCIE_DESC_TX_VLAN; @@ -860,8 +869,10 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) /* Gather DMA */ if (nr_frags > 0) { + __le64 second_half; + /* all descs must match except for in addr, length and eop */ - txdg = *txd; + second_half = txd->vals8[1]; for (f = 0; f < nr_frags; f++) { frag = &skb_shinfo(skb)->frags[f]; @@ -878,11 +889,11 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) tx_ring->txbufs[wr_idx].fidx = f; txd = &tx_ring->txds[wr_idx]; - *txd = txdg; txd->dma_len = cpu_to_le16(fsize); nfp_desc_set_dma_addr(txd, dma_addr); - txd->offset_eop |= - (f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0; + txd->offset_eop = md_bytes | + ((f == nr_frags - 1) ? PCIE_DESC_TX_EOP : 0); + txd->vals8[1] = second_half; } u64_stats_update_begin(&r_vec->tx_sync); @@ -890,16 +901,16 @@ static int nfp_net_tx(struct sk_buff *skb, struct net_device *netdev) u64_stats_update_end(&r_vec->tx_sync); } - netdev_tx_sent_queue(nd_q, txbuf->real_len); - skb_tx_timestamp(skb); + nd_q = netdev_get_tx_queue(dp->netdev, tx_ring->idx); + tx_ring->wr_p += nr_frags + 1; if (nfp_net_tx_ring_should_stop(tx_ring)) nfp_net_tx_ring_stop(nd_q, tx_ring); tx_ring->wr_ptr_add += nr_frags + 1; - if (!skb->xmit_more || netif_xmit_stopped(nd_q)) + if (__netdev_tx_sent_queue(nd_q, txbuf->real_len, skb->xmit_more)) nfp_net_tx_xmit_more_flush(tx_ring); return NETDEV_TX_OK; @@ -940,14 +951,10 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget) { struct nfp_net_r_vector *r_vec = tx_ring->r_vec; struct nfp_net_dp *dp = &r_vec->nfp_net->dp; - const struct skb_frag_struct *frag; struct netdev_queue *nd_q; u32 done_pkts = 0, done_bytes = 0; - struct sk_buff *skb; - int todo, nr_frags; u32 qcp_rd_p; - int fidx; - int idx; + int todo; if (tx_ring->wr_p == tx_ring->rd_p) return; @@ -961,26 +968,33 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget) todo = D_IDX(tx_ring, qcp_rd_p - tx_ring->qcp_rd_p); while (todo--) { + const struct skb_frag_struct *frag; + struct nfp_net_tx_buf *tx_buf; + struct sk_buff *skb; + int fidx, nr_frags; + int idx; + idx = D_IDX(tx_ring, tx_ring->rd_p++); + tx_buf = &tx_ring->txbufs[idx]; - skb = tx_ring->txbufs[idx].skb; + skb = tx_buf->skb; if (!skb) continue; nr_frags = skb_shinfo(skb)->nr_frags; - fidx = tx_ring->txbufs[idx].fidx; + fidx = tx_buf->fidx; if (fidx == -1) { /* unmap head */ - dma_unmap_single(dp->dev, tx_ring->txbufs[idx].dma_addr, + dma_unmap_single(dp->dev, tx_buf->dma_addr, skb_headlen(skb), DMA_TO_DEVICE); - done_pkts += tx_ring->txbufs[idx].pkt_cnt; - done_bytes += tx_ring->txbufs[idx].real_len; + done_pkts += tx_buf->pkt_cnt; + done_bytes += tx_buf->real_len; } else { /* unmap fragment */ frag = &skb_shinfo(skb)->frags[fidx]; - dma_unmap_page(dp->dev, tx_ring->txbufs[idx].dma_addr, + dma_unmap_page(dp->dev, tx_buf->dma_addr, skb_frag_size(frag), DMA_TO_DEVICE); } @@ -988,9 +1002,9 @@ static void nfp_net_tx_complete(struct nfp_net_tx_ring *tx_ring, int budget) if (fidx == nr_frags - 1) napi_consume_skb(skb, budget); - tx_ring->txbufs[idx].dma_addr = 0; - tx_ring->txbufs[idx].skb = NULL; - tx_ring->txbufs[idx].fidx = -2; + tx_buf->dma_addr = 0; + tx_buf->skb = NULL; + tx_buf->fidx = -2; } tx_ring->qcp_rd_p = qcp_rd_p; @@ -3275,7 +3289,10 @@ nfp_net_features_check(struct sk_buff *skb, struct net_device *dev, hdrlen = skb_inner_transport_header(skb) - skb->data + inner_tcp_hdrlen(skb); - if (unlikely(hdrlen > NFP_NET_LSO_MAX_HDR_SZ)) + /* Assume worst case scenario of having longest possible + * metadata prepend - 8B + */ + if (unlikely(hdrlen > NFP_NET_LSO_MAX_HDR_SZ - 8)) features &= ~NETIF_F_GSO_MASK; } @@ -3560,6 +3577,7 @@ void nfp_net_info(struct nfp_net *nn) /** * nfp_net_alloc() - Allocate netdev and related structure * @pdev: PCI device + * @ctrl_bar: PCI IOMEM with vNIC config memory * @needs_netdev: Whether to allocate a netdev for this vNIC * @max_tx_rings: Maximum number of TX rings supported by device * @max_rx_rings: Maximum number of RX rings supported by device @@ -3570,11 +3588,12 @@ void nfp_net_info(struct nfp_net *nn) * * Return: NFP Net device structure, or ERR_PTR on error. */ -struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev, - unsigned int max_tx_rings, - unsigned int max_rx_rings) +struct nfp_net * +nfp_net_alloc(struct pci_dev *pdev, void __iomem *ctrl_bar, bool needs_netdev, + unsigned int max_tx_rings, unsigned int max_rx_rings) { struct nfp_net *nn; + int err; if (needs_netdev) { struct net_device *netdev; @@ -3594,6 +3613,7 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev, } nn->dp.dev = &pdev->dev; + nn->dp.ctrl_bar = ctrl_bar; nn->pdev = pdev; nn->max_tx_rings = max_tx_rings; @@ -3616,7 +3636,19 @@ struct nfp_net *nfp_net_alloc(struct pci_dev *pdev, bool needs_netdev, timer_setup(&nn->reconfig_timer, nfp_net_reconfig_timer, 0); + err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar, + &nn->tlv_caps); + if (err) + goto err_free_nn; + return nn; + +err_free_nn: + if (nn->dp.netdev) + free_netdev(nn->dp.netdev); + else + vfree(nn); + return ERR_PTR(err); } /** @@ -3889,11 +3921,6 @@ int nfp_net_init(struct nfp_net *nn) nn->dp.ctrl |= NFP_NET_CFG_CTRL_IRQMOD; } - err = nfp_net_tlv_caps_parse(&nn->pdev->dev, nn->dp.ctrl_bar, - &nn->tlv_caps); - if (err) - return err; - if (nn->dp.netdev) nfp_net_netdev_init(nn); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c index f2aaef976c7d..6d5213b5bcb0 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.c @@ -41,8 +41,8 @@ int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem, data += 4; if (length % NFP_NET_CFG_TLV_LENGTH_INC) { - dev_err(dev, "TLV size not multiple of %u len:%u\n", - NFP_NET_CFG_TLV_LENGTH_INC, length); + dev_err(dev, "TLV size not multiple of %u offset:%u len:%u\n", + NFP_NET_CFG_TLV_LENGTH_INC, offset, length); return -EINVAL; } if (data + length > end) { @@ -61,14 +61,14 @@ int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem, if (!length) return 0; - dev_err(dev, "END TLV should be empty, has len:%d\n", - length); + dev_err(dev, "END TLV should be empty, has offset:%u len:%d\n", + offset, length); return -EINVAL; case NFP_NET_CFG_TLV_TYPE_ME_FREQ: if (length != 4) { dev_err(dev, - "ME FREQ TLV should be 4B, is %dB\n", - length); + "ME FREQ TLV should be 4B, is %dB offset:%u\n", + length, offset); return -EINVAL; } @@ -90,6 +90,15 @@ int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem, FIELD_GET(NFP_NET_CFG_TLV_HEADER_TYPE, hdr), offset, length); break; + case NFP_NET_CFG_TLV_TYPE_REPR_CAP: + if (length < 4) { + dev_err(dev, "REPR CAP TLV short %dB < 4B offset:%u\n", + length, offset); + return -EINVAL; + } + + caps->repr_cap = readl(data); + break; default: if (!FIELD_GET(NFP_NET_CFG_TLV_HEADER_REQUIRED, hdr)) break; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h index d7c8518ac952..166d7f71442e 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ctrl.h @@ -397,6 +397,8 @@ #define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_ADD 1 #define NFP_NET_CFG_MBOX_CMD_CTAG_FILTER_KILL 2 +#define NFP_NET_CFG_MBOX_CMD_PCI_DSCP_PRIOMAP_SET 5 + /** * VLAN filtering using general use mailbox * %NFP_NET_CFG_VLAN_FILTER: Base address of VLAN filter mailbox @@ -464,6 +466,10 @@ * Variable, experimental IDs. IDs designated for internal development and * experiments before a stable TLV ID has been allocated to a feature. Should * never be present in production firmware. + * + * %NFP_NET_CFG_TLV_TYPE_REPR_CAP: + * Single word, equivalent of %NFP_NET_CFG_CAP for representors, features which + * can be used on representors. */ #define NFP_NET_CFG_TLV_TYPE_UNKNOWN 0 #define NFP_NET_CFG_TLV_TYPE_RESERVED 1 @@ -472,6 +478,7 @@ #define NFP_NET_CFG_TLV_TYPE_MBOX 4 #define NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL0 5 #define NFP_NET_CFG_TLV_TYPE_EXPERIMENTAL1 6 +#define NFP_NET_CFG_TLV_TYPE_REPR_CAP 7 struct device; @@ -480,11 +487,13 @@ struct device; * @me_freq_mhz: ME clock_freq (MHz) * @mbox_off: vNIC mailbox area offset * @mbox_len: vNIC mailbox area length + * @repr_cap: capabilities for representors */ struct nfp_net_tlv_caps { u32 me_freq_mhz; unsigned int mbox_off; unsigned int mbox_len; + u32 repr_cap; }; int nfp_net_tlv_caps_parse(struct device *dev, u8 __iomem *ctrl_mem, diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c index 69b1c9b62e3d..ab7f2498e1c4 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_debugfs.c @@ -8,7 +8,7 @@ static struct dentry *nfp_dir; -static int nfp_net_debugfs_rx_q_read(struct seq_file *file, void *data) +static int nfp_rx_q_show(struct seq_file *file, void *data) { struct nfp_net_r_vector *r_vec = file->private; struct nfp_net_rx_ring *rx_ring; @@ -65,31 +65,12 @@ out: rtnl_unlock(); return 0; } +DEFINE_SHOW_ATTRIBUTE(nfp_rx_q); -static int nfp_net_debugfs_rx_q_open(struct inode *inode, struct file *f) -{ - return single_open(f, nfp_net_debugfs_rx_q_read, inode->i_private); -} +static int nfp_tx_q_show(struct seq_file *file, void *data); +DEFINE_SHOW_ATTRIBUTE(nfp_tx_q); -static const struct file_operations nfp_rx_q_fops = { - .owner = THIS_MODULE, - .open = nfp_net_debugfs_rx_q_open, - .release = single_release, - .read = seq_read, - .llseek = seq_lseek -}; - -static int nfp_net_debugfs_tx_q_open(struct inode *inode, struct file *f); - -static const struct file_operations nfp_tx_q_fops = { - .owner = THIS_MODULE, - .open = nfp_net_debugfs_tx_q_open, - .release = single_release, - .read = seq_read, - .llseek = seq_lseek -}; - -static int nfp_net_debugfs_tx_q_read(struct seq_file *file, void *data) +static int nfp_tx_q_show(struct seq_file *file, void *data) { struct nfp_net_r_vector *r_vec = file->private; struct nfp_net_tx_ring *tx_ring; @@ -158,18 +139,11 @@ out: return 0; } -static int nfp_net_debugfs_tx_q_open(struct inode *inode, struct file *f) +static int nfp_xdp_q_show(struct seq_file *file, void *data) { - return single_open(f, nfp_net_debugfs_tx_q_read, inode->i_private); + return nfp_tx_q_show(file, data); } - -static const struct file_operations nfp_xdp_q_fops = { - .owner = THIS_MODULE, - .open = nfp_net_debugfs_tx_q_open, - .release = single_release, - .read = seq_read, - .llseek = seq_lseek -}; +DEFINE_SHOW_ATTRIBUTE(nfp_xdp_q); void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir) { diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c index 1e7d20468a34..08f5fdbd8e41 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c @@ -116,13 +116,13 @@ nfp_net_pf_alloc_vnic(struct nfp_pf *pf, bool needs_netdev, n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS); /* Allocate and initialise the vNIC */ - nn = nfp_net_alloc(pf->pdev, needs_netdev, n_tx_rings, n_rx_rings); + nn = nfp_net_alloc(pf->pdev, ctrl_bar, needs_netdev, + n_tx_rings, n_rx_rings); if (IS_ERR(nn)) return nn; nn->app = pf->app; nfp_net_get_fw_version(&nn->fw_ver, ctrl_bar); - nn->dp.ctrl_bar = ctrl_bar; nn->tx_bar = qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ; nn->rx_bar = qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ; nn->dp.is_vf = 0; diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c index c09b893c30dd..69d7aebda09b 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.c @@ -11,6 +11,7 @@ #include "nfpcore/nfp_nsp.h" #include "nfp_app.h" #include "nfp_main.h" +#include "nfp_net.h" #include "nfp_net_ctrl.h" #include "nfp_net_repr.h" #include "nfp_net_sriov.h" @@ -231,6 +232,27 @@ err_port_disable: return err; } +static netdev_features_t +nfp_repr_fix_features(struct net_device *netdev, netdev_features_t features) +{ + struct nfp_repr *repr = netdev_priv(netdev); + netdev_features_t old_features = features; + netdev_features_t lower_features; + struct net_device *lower_dev; + + lower_dev = repr->dst->u.port_info.lower_dev; + + lower_features = lower_dev->features; + if (lower_features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) + lower_features |= NETIF_F_HW_CSUM; + + features = netdev_intersect_features(features, lower_features); + features |= old_features & (NETIF_F_SOFT_FEATURES | NETIF_F_HW_TC); + features |= NETIF_F_LLTX; + + return features; +} + const struct net_device_ops nfp_repr_netdev_ops = { .ndo_init = nfp_app_ndo_init, .ndo_uninit = nfp_app_ndo_uninit, @@ -248,10 +270,25 @@ const struct net_device_ops nfp_repr_netdev_ops = { .ndo_set_vf_spoofchk = nfp_app_set_vf_spoofchk, .ndo_get_vf_config = nfp_app_get_vf_config, .ndo_set_vf_link_state = nfp_app_set_vf_link_state, + .ndo_fix_features = nfp_repr_fix_features, .ndo_set_features = nfp_port_set_features, .ndo_set_mac_address = eth_mac_addr, }; +void +nfp_repr_transfer_features(struct net_device *netdev, struct net_device *lower) +{ + struct nfp_repr *repr = netdev_priv(netdev); + + if (repr->dst->u.port_info.lower_dev != lower) + return; + + netdev->gso_max_size = lower->gso_max_size; + netdev->gso_max_segs = lower->gso_max_segs; + + netdev_update_features(netdev); +} + static void nfp_repr_clean(struct nfp_repr *repr) { unregister_netdev(repr->netdev); @@ -281,6 +318,8 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev, struct net_device *pf_netdev) { struct nfp_repr *repr = netdev_priv(netdev); + struct nfp_net *nn = netdev_priv(pf_netdev); + u32 repr_cap = nn->tlv_caps.repr_cap; int err; nfp_repr_set_lockdep_class(netdev); @@ -299,6 +338,55 @@ int nfp_repr_init(struct nfp_app *app, struct net_device *netdev, SWITCHDEV_SET_OPS(netdev, &nfp_port_switchdev_ops); + /* Set features the lower device can support with representors */ + if (repr_cap & NFP_NET_CFG_CTRL_LIVE_ADDR) + netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE; + + netdev->hw_features = NETIF_F_HIGHDMA; + if (repr_cap & NFP_NET_CFG_CTRL_RXCSUM_ANY) + netdev->hw_features |= NETIF_F_RXCSUM; + if (repr_cap & NFP_NET_CFG_CTRL_TXCSUM) + netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; + if (repr_cap & NFP_NET_CFG_CTRL_GATHER) + netdev->hw_features |= NETIF_F_SG; + if ((repr_cap & NFP_NET_CFG_CTRL_LSO && nn->fw_ver.major > 2) || + repr_cap & NFP_NET_CFG_CTRL_LSO2) + netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; + if (repr_cap & NFP_NET_CFG_CTRL_RSS_ANY) + netdev->hw_features |= NETIF_F_RXHASH; + if (repr_cap & NFP_NET_CFG_CTRL_VXLAN) { + if (repr_cap & NFP_NET_CFG_CTRL_LSO) + netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL; + } + if (repr_cap & NFP_NET_CFG_CTRL_NVGRE) { + if (repr_cap & NFP_NET_CFG_CTRL_LSO) + netdev->hw_features |= NETIF_F_GSO_GRE; + } + if (repr_cap & (NFP_NET_CFG_CTRL_VXLAN | NFP_NET_CFG_CTRL_NVGRE)) + netdev->hw_enc_features = netdev->hw_features; + + netdev->vlan_features = netdev->hw_features; + + if (repr_cap & NFP_NET_CFG_CTRL_RXVLAN) + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; + if (repr_cap & NFP_NET_CFG_CTRL_TXVLAN) { + if (repr_cap & NFP_NET_CFG_CTRL_LSO2) + netdev_warn(netdev, "Device advertises both TSO2 and TXVLAN. Refusing to enable TXVLAN.\n"); + else + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX; + } + if (repr_cap & NFP_NET_CFG_CTRL_CTAG_FILTER) + netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER; + + netdev->features = netdev->hw_features; + + /* Advertise but disable TSO by default. */ + netdev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); + netdev->gso_max_segs = NFP_NET_LSO_MAX_SEGS; + + netdev->priv_flags |= IFF_NO_QUEUE; + netdev->features |= NETIF_F_LLTX; + if (nfp_app_has_tc(app)) { netdev->features |= NETIF_F_HW_TC; netdev->hw_features |= NETIF_F_HW_TC; @@ -442,7 +530,9 @@ int nfp_reprs_resync_phys_ports(struct nfp_app *app) continue; nfp_app_repr_preclean(app, netdev); + rtnl_lock(); rcu_assign_pointer(reprs->reprs[i], NULL); + rtnl_unlock(); synchronize_rcu(); nfp_repr_clean(repr); } diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h index c412b94bfb97..e0f13dfe1f39 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_repr.h @@ -92,6 +92,8 @@ nfp_repr_get_locked(struct nfp_app *app, struct nfp_reprs *set, unsigned int id); void nfp_repr_inc_rx_stats(struct net_device *netdev, unsigned int len); +void +nfp_repr_transfer_features(struct net_device *netdev, struct net_device *lower); int nfp_repr_init(struct nfp_app *app, struct net_device *netdev, u32 cmsg_port_id, struct nfp_port *port, struct net_device *pf_netdev); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c index d2c1e9ea5668..1145849ca7ba 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_netvf_main.c @@ -172,7 +172,7 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, rx_bar_off = NFP_PCIE_QUEUE(startq); /* Allocate and initialise the netdev */ - nn = nfp_net_alloc(pdev, true, max_tx_rings, max_rx_rings); + nn = nfp_net_alloc(pdev, ctrl_bar, true, max_tx_rings, max_rx_rings); if (IS_ERR(nn)) { err = PTR_ERR(nn); goto err_ctrl_unmap; @@ -180,7 +180,6 @@ static int nfp_netvf_pci_probe(struct pci_dev *pdev, vf->nn = nn; nn->fw_ver = fw_ver; - nn->dp.ctrl_bar = ctrl_bar; nn->dp.is_vf = 1; nn->stride_tx = stride; nn->stride_rx = stride; diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c index 25382f8fbb70..89d17399fb5a 100644 --- a/drivers/net/ethernet/nxp/lpc_eth.c +++ b/drivers/net/ethernet/nxp/lpc_eth.c @@ -280,7 +280,7 @@ #define LPC_FCCR_MIRRORCOUNTERCURRENT(n) ((n) & 0xFFFF) /* - * rxfliterctrl, rxfilterwolstatus, and rxfilterwolclear shared + * rxfilterctrl, rxfilterwolstatus, and rxfilterwolclear shared * register definitions */ #define LPC_RXFLTRW_ACCEPTUNICAST (1 << 0) @@ -291,7 +291,7 @@ #define LPC_RXFLTRW_ACCEPTPERFECT (1 << 5) /* - * rxfliterctrl register definitions + * rxfilterctrl register definitions */ #define LPC_RXFLTRWSTS_MAGICPACKETENWOL (1 << 12) #define LPC_RXFLTRWSTS_RXFILTERENWOL (1 << 13) @@ -783,8 +783,6 @@ static int lpc_mii_probe(struct net_device *ndev) phy_set_max_speed(phydev, SPEED_100); - phydev->advertising = phydev->supported; - pldat->link = 0; pldat->speed = 0; pldat->duplex = -1; diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index d9a03aba0e02..24a90163775e 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -296,6 +296,12 @@ enum qed_wol_support { QED_WOL_SUPPORT_PME, }; +enum qed_db_rec_exec { + DB_REC_DRY_RUN, + DB_REC_REAL_DEAL, + DB_REC_ONCE, +}; + struct qed_hw_info { /* PCI personality */ enum qed_pci_personality personality; @@ -425,6 +431,14 @@ struct qed_qm_info { u8 num_pf_rls; }; +struct qed_db_recovery_info { + struct list_head list; + + /* Lock to protect the doorbell recovery mechanism list */ + spinlock_t lock; + u32 db_recovery_counter; +}; + struct storm_stats { u32 address; u32 len; @@ -522,6 +536,7 @@ struct qed_simd_fp_handler { enum qed_slowpath_wq_flag { QED_SLOWPATH_MFW_TLV_REQ, + QED_SLOWPATH_PERIODIC_DB_REC, }; struct qed_hwfn { @@ -640,6 +655,9 @@ struct qed_hwfn { /* L2-related */ struct qed_l2_info *p_l2_info; + /* Mechanism for recovering from doorbell drop */ + struct qed_db_recovery_info db_recovery_info; + /* Nvm images number and attributes */ struct qed_nvm_image_info nvm_info; @@ -652,11 +670,12 @@ struct qed_hwfn { struct delayed_work iov_task; unsigned long iov_task_flags; #endif - - struct z_stream_s *stream; + struct z_stream_s *stream; + bool slowpath_wq_active; struct workqueue_struct *slowpath_wq; struct delayed_work slowpath_task; unsigned long slowpath_task_flags; + u32 periodic_db_rec_count; }; struct pci_params { @@ -897,6 +916,12 @@ u16 qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn *p_hwfn, u8 tc); #define QED_LEADING_HWFN(dev) (&dev->hwfns[0]) +/* doorbell recovery mechanism */ +void qed_db_recovery_dp(struct qed_hwfn *p_hwfn); +void qed_db_recovery_execute(struct qed_hwfn *p_hwfn, + enum qed_db_rec_exec db_exec); +bool qed_edpm_enabled(struct qed_hwfn *p_hwfn); + /* Other Linux specific common definitions */ #define DP_NAME(cdev) ((cdev)->name) @@ -931,4 +956,6 @@ int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, union qed_mfw_tlv_data *tlv_data); void qed_hw_info_set_offload_tc(struct qed_hw_info *p_info, u8 tc); + +void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn); #endif /* _QED_H */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index 88a8576ca9ce..8f6551421945 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -66,6 +66,318 @@ static DEFINE_SPINLOCK(qm_lock); +/******************** Doorbell Recovery *******************/ +/* The doorbell recovery mechanism consists of a list of entries which represent + * doorbelling entities (l2 queues, roce sq/rq/cqs, the slowpath spq, etc). Each + * entity needs to register with the mechanism and provide the parameters + * describing it's doorbell, including a location where last used doorbell data + * can be found. The doorbell execute function will traverse the list and + * doorbell all of the registered entries. + */ +struct qed_db_recovery_entry { + struct list_head list_entry; + void __iomem *db_addr; + void *db_data; + enum qed_db_rec_width db_width; + enum qed_db_rec_space db_space; + u8 hwfn_idx; +}; + +/* Display a single doorbell recovery entry */ +static void qed_db_recovery_dp_entry(struct qed_hwfn *p_hwfn, + struct qed_db_recovery_entry *db_entry, + char *action) +{ + DP_VERBOSE(p_hwfn, + QED_MSG_SPQ, + "(%s: db_entry %p, addr %p, data %p, width %s, %s space, hwfn %d)\n", + action, + db_entry, + db_entry->db_addr, + db_entry->db_data, + db_entry->db_width == DB_REC_WIDTH_32B ? "32b" : "64b", + db_entry->db_space == DB_REC_USER ? "user" : "kernel", + db_entry->hwfn_idx); +} + +/* Doorbell address sanity (address within doorbell bar range) */ +static bool qed_db_rec_sanity(struct qed_dev *cdev, + void __iomem *db_addr, void *db_data) +{ + /* Make sure doorbell address is within the doorbell bar */ + if (db_addr < cdev->doorbells || + (u8 __iomem *)db_addr > + (u8 __iomem *)cdev->doorbells + cdev->db_size) { + WARN(true, + "Illegal doorbell address: %p. Legal range for doorbell addresses is [%p..%p]\n", + db_addr, + cdev->doorbells, + (u8 __iomem *)cdev->doorbells + cdev->db_size); + return false; + } + + /* ake sure doorbell data pointer is not null */ + if (!db_data) { + WARN(true, "Illegal doorbell data pointer: %p", db_data); + return false; + } + + return true; +} + +/* Find hwfn according to the doorbell address */ +static struct qed_hwfn *qed_db_rec_find_hwfn(struct qed_dev *cdev, + void __iomem *db_addr) +{ + struct qed_hwfn *p_hwfn; + + /* In CMT doorbell bar is split down the middle between engine 0 and enigne 1 */ + if (cdev->num_hwfns > 1) + p_hwfn = db_addr < cdev->hwfns[1].doorbells ? + &cdev->hwfns[0] : &cdev->hwfns[1]; + else + p_hwfn = QED_LEADING_HWFN(cdev); + + return p_hwfn; +} + +/* Add a new entry to the doorbell recovery mechanism */ +int qed_db_recovery_add(struct qed_dev *cdev, + void __iomem *db_addr, + void *db_data, + enum qed_db_rec_width db_width, + enum qed_db_rec_space db_space) +{ + struct qed_db_recovery_entry *db_entry; + struct qed_hwfn *p_hwfn; + + /* Shortcircuit VFs, for now */ + if (IS_VF(cdev)) { + DP_VERBOSE(cdev, + QED_MSG_IOV, "db recovery - skipping VF doorbell\n"); + return 0; + } + + /* Sanitize doorbell address */ + if (!qed_db_rec_sanity(cdev, db_addr, db_data)) + return -EINVAL; + + /* Obtain hwfn from doorbell address */ + p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr); + + /* Create entry */ + db_entry = kzalloc(sizeof(*db_entry), GFP_KERNEL); + if (!db_entry) { + DP_NOTICE(cdev, "Failed to allocate a db recovery entry\n"); + return -ENOMEM; + } + + /* Populate entry */ + db_entry->db_addr = db_addr; + db_entry->db_data = db_data; + db_entry->db_width = db_width; + db_entry->db_space = db_space; + db_entry->hwfn_idx = p_hwfn->my_id; + + /* Display */ + qed_db_recovery_dp_entry(p_hwfn, db_entry, "Adding"); + + /* Protect the list */ + spin_lock_bh(&p_hwfn->db_recovery_info.lock); + list_add_tail(&db_entry->list_entry, &p_hwfn->db_recovery_info.list); + spin_unlock_bh(&p_hwfn->db_recovery_info.lock); + + return 0; +} + +/* Remove an entry from the doorbell recovery mechanism */ +int qed_db_recovery_del(struct qed_dev *cdev, + void __iomem *db_addr, void *db_data) +{ + struct qed_db_recovery_entry *db_entry = NULL; + struct qed_hwfn *p_hwfn; + int rc = -EINVAL; + + /* Shortcircuit VFs, for now */ + if (IS_VF(cdev)) { + DP_VERBOSE(cdev, + QED_MSG_IOV, "db recovery - skipping VF doorbell\n"); + return 0; + } + + /* Sanitize doorbell address */ + if (!qed_db_rec_sanity(cdev, db_addr, db_data)) + return -EINVAL; + + /* Obtain hwfn from doorbell address */ + p_hwfn = qed_db_rec_find_hwfn(cdev, db_addr); + + /* Protect the list */ + spin_lock_bh(&p_hwfn->db_recovery_info.lock); + list_for_each_entry(db_entry, + &p_hwfn->db_recovery_info.list, list_entry) { + /* search according to db_data addr since db_addr is not unique (roce) */ + if (db_entry->db_data == db_data) { + qed_db_recovery_dp_entry(p_hwfn, db_entry, "Deleting"); + list_del(&db_entry->list_entry); + rc = 0; + break; + } + } + + spin_unlock_bh(&p_hwfn->db_recovery_info.lock); + + if (rc == -EINVAL) + + DP_NOTICE(p_hwfn, + "Failed to find element in list. Key (db_data addr) was %p. db_addr was %p\n", + db_data, db_addr); + else + kfree(db_entry); + + return rc; +} + +/* Initialize the doorbell recovery mechanism */ +static int qed_db_recovery_setup(struct qed_hwfn *p_hwfn) +{ + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Setting up db recovery\n"); + + /* Make sure db_size was set in cdev */ + if (!p_hwfn->cdev->db_size) { + DP_ERR(p_hwfn->cdev, "db_size not set\n"); + return -EINVAL; + } + + INIT_LIST_HEAD(&p_hwfn->db_recovery_info.list); + spin_lock_init(&p_hwfn->db_recovery_info.lock); + p_hwfn->db_recovery_info.db_recovery_counter = 0; + + return 0; +} + +/* Destroy the doorbell recovery mechanism */ +static void qed_db_recovery_teardown(struct qed_hwfn *p_hwfn) +{ + struct qed_db_recovery_entry *db_entry = NULL; + + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Tearing down db recovery\n"); + if (!list_empty(&p_hwfn->db_recovery_info.list)) { + DP_VERBOSE(p_hwfn, + QED_MSG_SPQ, + "Doorbell Recovery teardown found the doorbell recovery list was not empty (Expected in disorderly driver unload (e.g. recovery) otherwise this probably means some flow forgot to db_recovery_del). Prepare to purge doorbell recovery list...\n"); + while (!list_empty(&p_hwfn->db_recovery_info.list)) { + db_entry = + list_first_entry(&p_hwfn->db_recovery_info.list, + struct qed_db_recovery_entry, + list_entry); + qed_db_recovery_dp_entry(p_hwfn, db_entry, "Purging"); + list_del(&db_entry->list_entry); + kfree(db_entry); + } + } + p_hwfn->db_recovery_info.db_recovery_counter = 0; +} + +/* Print the content of the doorbell recovery mechanism */ +void qed_db_recovery_dp(struct qed_hwfn *p_hwfn) +{ + struct qed_db_recovery_entry *db_entry = NULL; + + DP_NOTICE(p_hwfn, + "Displaying doorbell recovery database. Counter was %d\n", + p_hwfn->db_recovery_info.db_recovery_counter); + + /* Protect the list */ + spin_lock_bh(&p_hwfn->db_recovery_info.lock); + list_for_each_entry(db_entry, + &p_hwfn->db_recovery_info.list, list_entry) { + qed_db_recovery_dp_entry(p_hwfn, db_entry, "Printing"); + } + + spin_unlock_bh(&p_hwfn->db_recovery_info.lock); +} + +/* Ring the doorbell of a single doorbell recovery entry */ +static void qed_db_recovery_ring(struct qed_hwfn *p_hwfn, + struct qed_db_recovery_entry *db_entry, + enum qed_db_rec_exec db_exec) +{ + if (db_exec != DB_REC_ONCE) { + /* Print according to width */ + if (db_entry->db_width == DB_REC_WIDTH_32B) { + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, + "%s doorbell address %p data %x\n", + db_exec == DB_REC_DRY_RUN ? + "would have rung" : "ringing", + db_entry->db_addr, + *(u32 *)db_entry->db_data); + } else { + DP_VERBOSE(p_hwfn, QED_MSG_SPQ, + "%s doorbell address %p data %llx\n", + db_exec == DB_REC_DRY_RUN ? + "would have rung" : "ringing", + db_entry->db_addr, + *(u64 *)(db_entry->db_data)); + } + } + + /* Sanity */ + if (!qed_db_rec_sanity(p_hwfn->cdev, db_entry->db_addr, + db_entry->db_data)) + return; + + /* Flush the write combined buffer. Since there are multiple doorbelling + * entities using the same address, if we don't flush, a transaction + * could be lost. + */ + wmb(); + + /* Ring the doorbell */ + if (db_exec == DB_REC_REAL_DEAL || db_exec == DB_REC_ONCE) { + if (db_entry->db_width == DB_REC_WIDTH_32B) + DIRECT_REG_WR(db_entry->db_addr, + *(u32 *)(db_entry->db_data)); + else + DIRECT_REG_WR64(db_entry->db_addr, + *(u64 *)(db_entry->db_data)); + } + + /* Flush the write combined buffer. Next doorbell may come from a + * different entity to the same address... + */ + wmb(); +} + +/* Traverse the doorbell recovery entry list and ring all the doorbells */ +void qed_db_recovery_execute(struct qed_hwfn *p_hwfn, + enum qed_db_rec_exec db_exec) +{ + struct qed_db_recovery_entry *db_entry = NULL; + + if (db_exec != DB_REC_ONCE) { + DP_NOTICE(p_hwfn, + "Executing doorbell recovery. Counter was %d\n", + p_hwfn->db_recovery_info.db_recovery_counter); + + /* Track amount of times recovery was executed */ + p_hwfn->db_recovery_info.db_recovery_counter++; + } + + /* Protect the list */ + spin_lock_bh(&p_hwfn->db_recovery_info.lock); + list_for_each_entry(db_entry, + &p_hwfn->db_recovery_info.list, list_entry) { + qed_db_recovery_ring(p_hwfn, db_entry, db_exec); + if (db_exec == DB_REC_ONCE) + break; + } + + spin_unlock_bh(&p_hwfn->db_recovery_info.lock); +} + +/******************** Doorbell Recovery end ****************/ + #define QED_MIN_DPIS (4) #define QED_MIN_PWM_REGION (QED_WID_SIZE * QED_MIN_DPIS) @@ -194,6 +506,9 @@ void qed_resc_free(struct qed_dev *cdev) qed_dmae_info_free(p_hwfn); qed_dcbx_info_free(p_hwfn); qed_dbg_user_data_free(p_hwfn); + + /* Destroy doorbell recovery mechanism */ + qed_db_recovery_teardown(p_hwfn); } } @@ -969,6 +1284,11 @@ int qed_resc_alloc(struct qed_dev *cdev) struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; u32 n_eqes, num_cons; + /* Initialize the doorbell recovery mechanism */ + rc = qed_db_recovery_setup(p_hwfn); + if (rc) + goto alloc_err; + /* First allocate the context manager structure */ rc = qed_cxt_mngr_alloc(p_hwfn); if (rc) @@ -1468,6 +1788,14 @@ enum QED_ROCE_EDPM_MODE { QED_ROCE_EDPM_MODE_DISABLE = 2, }; +bool qed_edpm_enabled(struct qed_hwfn *p_hwfn) +{ + if (p_hwfn->dcbx_no_edpm || p_hwfn->db_bar_no_edpm) + return false; + + return true; +} + static int qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) { @@ -1537,13 +1865,13 @@ qed_hw_init_pf_doorbell_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) p_hwfn->wid_count = (u16) n_cpus; DP_INFO(p_hwfn, - "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s\n", + "doorbell bar: normal_region_size=%d, pwm_region_size=%d, dpi_size=%d, dpi_count=%d, roce_edpm=%s, page_size=%lu\n", norm_regsize, pwm_regsize, p_hwfn->dpi_size, p_hwfn->dpi_count, - ((p_hwfn->dcbx_no_edpm) || (p_hwfn->db_bar_no_edpm)) ? - "disabled" : "enabled"); + (!qed_edpm_enabled(p_hwfn)) ? + "disabled" : "enabled", PAGE_SIZE); if (rc) { DP_ERR(p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index defdda1ffaa2..acccd85170aa 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -472,6 +472,34 @@ int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *coal, void *handle); int qed_set_queue_coalesce(u16 rx_coal, u16 tx_coal, void *p_handle); +/** + * @brief db_recovery_add - add doorbell information to the doorbell + * recovery mechanism. + * + * @param cdev + * @param db_addr - doorbell address + * @param db_data - address of where db_data is stored + * @param db_width - doorbell is 32b pr 64b + * @param db_space - doorbell recovery addresses are user or kernel space + */ +int qed_db_recovery_add(struct qed_dev *cdev, + void __iomem *db_addr, + void *db_data, + enum qed_db_rec_width db_width, + enum qed_db_rec_space db_space); + +/** + * @brief db_recovery_del - remove doorbell information from the doorbell + * recovery mechanism. db_data serves as key (db_addr is not unique). + * + * @param cdev + * @param db_addr - doorbell address + * @param db_data - address where db_data is stored. Serves as key for the + * entry to delete. + */ +int qed_db_recovery_del(struct qed_dev *cdev, + void __iomem *db_addr, void *db_data); + const char *qed_hw_get_resc_name(enum qed_resources res_id); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index b38e12c9de9d..b13cfb449d8f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -12655,6 +12655,7 @@ struct public_drv_mb { #define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x000000FF #define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3 +#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI 0x3 #define DRV_MB_PARAM_NVM_LEN_OFFSET 24 #define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0 @@ -12814,6 +12815,11 @@ struct public_drv_mb { union drv_union_data union_data; }; +#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_MASK 0x00ffffff +#define FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET_SHIFT 0 +#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_MASK 0xff000000 +#define FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE_SHIFT 24 + enum MFW_DRV_MSG_TYPE { MFW_DRV_MSG_LINK_CHANGE, MFW_DRV_MSG_FLR_FW_ACK_FAILED, diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.c b/drivers/net/ethernet/qlogic/qed/qed_int.c index b22f464ea3fa..92340919d852 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.c +++ b/drivers/net/ethernet/qlogic/qed/qed_int.c @@ -361,29 +361,147 @@ static int qed_pglub_rbc_attn_cb(struct qed_hwfn *p_hwfn) return 0; } -#define QED_DORQ_ATTENTION_REASON_MASK (0xfffff) -#define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff) -#define QED_DORQ_ATTENTION_SIZE_MASK (0x7f) -#define QED_DORQ_ATTENTION_SIZE_SHIFT (16) +#define QED_DORQ_ATTENTION_REASON_MASK (0xfffff) +#define QED_DORQ_ATTENTION_OPAQUE_MASK (0xffff) +#define QED_DORQ_ATTENTION_OPAQUE_SHIFT (0x0) +#define QED_DORQ_ATTENTION_SIZE_MASK (0x7f) +#define QED_DORQ_ATTENTION_SIZE_SHIFT (16) + +#define QED_DB_REC_COUNT 1000 +#define QED_DB_REC_INTERVAL 100 + +static int qed_db_rec_flush_queue(struct qed_hwfn *p_hwfn, + struct qed_ptt *p_ptt) +{ + u32 count = QED_DB_REC_COUNT; + u32 usage = 1; + + /* wait for usage to zero or count to run out. This is necessary since + * EDPM doorbell transactions can take multiple 64b cycles, and as such + * can "split" over the pci. Possibly, the doorbell drop can happen with + * half an EDPM in the queue and other half dropped. Another EDPM + * doorbell to the same address (from doorbell recovery mechanism or + * from the doorbelling entity) could have first half dropped and second + * half interpreted as continuation of the first. To prevent such + * malformed doorbells from reaching the device, flush the queue before + * releasing the overflow sticky indication. + */ + while (count-- && usage) { + usage = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_USAGE_CNT); + udelay(QED_DB_REC_INTERVAL); + } + + /* should have been depleted by now */ + if (usage) { + DP_NOTICE(p_hwfn->cdev, + "DB recovery: doorbell usage failed to zero after %d usec. usage was %x\n", + QED_DB_REC_INTERVAL * QED_DB_REC_COUNT, usage); + return -EBUSY; + } + + return 0; +} + +int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) +{ + u32 overflow; + int rc; + + overflow = qed_rd(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY); + DP_NOTICE(p_hwfn, "PF Overflow sticky 0x%x\n", overflow); + if (!overflow) { + qed_db_recovery_execute(p_hwfn, DB_REC_ONCE); + return 0; + } + + if (qed_edpm_enabled(p_hwfn)) { + rc = qed_db_rec_flush_queue(p_hwfn, p_ptt); + if (rc) + return rc; + } + + /* Flush any pending (e)dpm as they may never arrive */ + qed_wr(p_hwfn, p_ptt, DORQ_REG_DPM_FORCE_ABORT, 0x1); + + /* Release overflow sticky indication (stop silently dropping everything) */ + qed_wr(p_hwfn, p_ptt, DORQ_REG_PF_OVFL_STICKY, 0x0); + + /* Repeat all last doorbells (doorbell drop recovery) */ + qed_db_recovery_execute(p_hwfn, DB_REC_REAL_DEAL); + + return 0; +} + static int qed_dorq_attn_cb(struct qed_hwfn *p_hwfn) { - u32 reason; + u32 int_sts, first_drop_reason, details, address, all_drops_reason; + struct qed_ptt *p_ptt = p_hwfn->p_dpc_ptt; + int rc; - reason = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, DORQ_REG_DB_DROP_REASON) & - QED_DORQ_ATTENTION_REASON_MASK; - if (reason) { - u32 details = qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, - DORQ_REG_DB_DROP_DETAILS); + int_sts = qed_rd(p_hwfn, p_ptt, DORQ_REG_INT_STS); + DP_NOTICE(p_hwfn->cdev, "DORQ attention. int_sts was %x\n", int_sts); - DP_INFO(p_hwfn->cdev, - "DORQ db_drop: address 0x%08x Opaque FID 0x%04x Size [bytes] 0x%08x Reason: 0x%08x\n", - qed_rd(p_hwfn, p_hwfn->p_dpc_ptt, - DORQ_REG_DB_DROP_DETAILS_ADDRESS), - (u16)(details & QED_DORQ_ATTENTION_OPAQUE_MASK), - GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4, - reason); + /* int_sts may be zero since all PFs were interrupted for doorbell + * overflow but another one already handled it. Can abort here. If + * This PF also requires overflow recovery we will be interrupted again. + * The masked almost full indication may also be set. Ignoring. + */ + if (!(int_sts & ~DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) + return 0; + + /* check if db_drop or overflow happened */ + if (int_sts & (DORQ_REG_INT_STS_DB_DROP | + DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR)) { + /* Obtain data about db drop/overflow */ + first_drop_reason = qed_rd(p_hwfn, p_ptt, + DORQ_REG_DB_DROP_REASON) & + QED_DORQ_ATTENTION_REASON_MASK; + details = qed_rd(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS); + address = qed_rd(p_hwfn, p_ptt, + DORQ_REG_DB_DROP_DETAILS_ADDRESS); + all_drops_reason = qed_rd(p_hwfn, p_ptt, + DORQ_REG_DB_DROP_DETAILS_REASON); + + /* Log info */ + DP_NOTICE(p_hwfn->cdev, + "Doorbell drop occurred\n" + "Address\t\t0x%08x\t(second BAR address)\n" + "FID\t\t0x%04x\t\t(Opaque FID)\n" + "Size\t\t0x%04x\t\t(in bytes)\n" + "1st drop reason\t0x%08x\t(details on first drop since last handling)\n" + "Sticky reasons\t0x%08x\t(all drop reasons since last handling)\n", + address, + GET_FIELD(details, QED_DORQ_ATTENTION_OPAQUE), + GET_FIELD(details, QED_DORQ_ATTENTION_SIZE) * 4, + first_drop_reason, all_drops_reason); + + rc = qed_db_rec_handler(p_hwfn, p_ptt); + qed_periodic_db_rec_start(p_hwfn); + if (rc) + return rc; + + /* Clear the doorbell drop details and prepare for next drop */ + qed_wr(p_hwfn, p_ptt, DORQ_REG_DB_DROP_DETAILS_REL, 0); + + /* Mark interrupt as handled (note: even if drop was due to a different + * reason than overflow we mark as handled) + */ + qed_wr(p_hwfn, + p_ptt, + DORQ_REG_INT_STS_WR, + DORQ_REG_INT_STS_DB_DROP | + DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR); + + /* If there are no indications other than drop indications, success */ + if ((int_sts & ~(DORQ_REG_INT_STS_DB_DROP | + DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR | + DORQ_REG_INT_STS_DORQ_FIFO_AFULL)) == 0) + return 0; } + /* Some other indication was present - non recoverable */ + DP_INFO(p_hwfn, "DORQ fatal attention\n"); + return -EINVAL; } diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index 54b4ee0acfd7..d81a62ebd524 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -190,6 +190,16 @@ void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, */ void qed_int_disable_post_isr_release(struct qed_dev *cdev); +/** + * @brief - Doorbell Recovery handler. + * Run DB_REAL_DEAL doorbell recovery in case of PF overflow + * (and flush DORQ if needed), otherwise run DB_REC_ONCE. + * + * @param p_hwfn + * @param p_ptt + */ +int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); + #define QED_CAU_DEF_RX_TIMER_RES 0 #define QED_CAU_DEF_TX_TIMER_RES 0 diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c index c6f4bab67a5f..90afd514ffe1 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c @@ -1085,7 +1085,14 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn, p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable; - return qed_spq_post(p_hwfn, p_ent, NULL); + rc = qed_spq_post(p_hwfn, p_ent, NULL); + if (rc) + return rc; + + rc = qed_db_recovery_add(p_hwfn->cdev, p_tx->doorbell_addr, + &p_tx->db_msg, DB_REC_WIDTH_32B, + DB_REC_KERNEL); + return rc; } static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn, @@ -1119,9 +1126,11 @@ static int qed_sp_ll2_rx_queue_stop(struct qed_hwfn *p_hwfn, static int qed_sp_ll2_tx_queue_stop(struct qed_hwfn *p_hwfn, struct qed_ll2_info *p_ll2_conn) { + struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; struct qed_spq_entry *p_ent = NULL; struct qed_sp_init_data init_data; int rc = -EINVAL; + qed_db_recovery_del(p_hwfn->cdev, p_tx->doorbell_addr, &p_tx->db_msg); /* Get SPQ entry */ memset(&init_data, 0, sizeof(init_data)); @@ -1542,6 +1551,13 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle) p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells + qed_db_addr(p_ll2_conn->cid, DQ_DEMS_LEGACY); + /* prepare db data */ + SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM); + SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET); + SET_FIELD(p_tx->db_msg.params, CORE_DB_DATA_AGG_VAL_SEL, + DQ_XCM_CORE_TX_BD_PROD_CMD); + p_tx->db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD; + rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn); if (rc) @@ -1780,7 +1796,6 @@ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn, bool b_notify = p_ll2_conn->tx_queue.cur_send_packet->notify_fw; struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue; struct qed_ll2_tx_packet *p_pkt = NULL; - struct core_db_data db_msg = { 0, 0, 0 }; u16 bd_prod; /* If there are missing BDs, don't do anything now */ @@ -1809,24 +1824,19 @@ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn, list_move_tail(&p_pkt->list_entry, &p_tx->active_descq); } - SET_FIELD(db_msg.params, CORE_DB_DATA_DEST, DB_DEST_XCM); - SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET); - SET_FIELD(db_msg.params, CORE_DB_DATA_AGG_VAL_SEL, - DQ_XCM_CORE_TX_BD_PROD_CMD); - db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD; - db_msg.spq_prod = cpu_to_le16(bd_prod); + p_tx->db_msg.spq_prod = cpu_to_le16(bd_prod); /* Make sure the BDs data is updated before ringing the doorbell */ wmb(); - DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&db_msg)); + DIRECT_REG_WR(p_tx->doorbell_addr, *((u32 *)&p_tx->db_msg)); DP_VERBOSE(p_hwfn, (NETIF_MSG_TX_QUEUED | QED_MSG_LL2), "LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n", p_ll2_conn->queue_id, p_ll2_conn->cid, - p_ll2_conn->input.conn_type, db_msg.spq_prod); + p_ll2_conn->input.conn_type, p_tx->db_msg.spq_prod); } int qed_ll2_prepare_tx_packet(void *cxt, diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h index 1a5c1ae01474..5f01fbd3c073 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h @@ -103,6 +103,7 @@ struct qed_ll2_tx_queue { struct qed_ll2_tx_packet cur_completing_packet; u16 cur_completing_bd_idx; void __iomem *doorbell_addr; + struct core_db_data db_msg; u16 bds_idx; u16 cur_send_frag_num; u16 cur_completing_frag_num; diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index fff7f04d4525..6adf5bda9811 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -966,9 +966,47 @@ static void qed_update_pf_params(struct qed_dev *cdev, } } +#define QED_PERIODIC_DB_REC_COUNT 100 +#define QED_PERIODIC_DB_REC_INTERVAL_MS 100 +#define QED_PERIODIC_DB_REC_INTERVAL \ + msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS) +#define QED_PERIODIC_DB_REC_WAIT_COUNT 10 +#define QED_PERIODIC_DB_REC_WAIT_INTERVAL \ + (QED_PERIODIC_DB_REC_INTERVAL_MS / QED_PERIODIC_DB_REC_WAIT_COUNT) + +static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn, + enum qed_slowpath_wq_flag wq_flag, + unsigned long delay) +{ + if (!hwfn->slowpath_wq_active) + return -EINVAL; + + /* Memory barrier for setting atomic bit */ + smp_mb__before_atomic(); + set_bit(wq_flag, &hwfn->slowpath_task_flags); + smp_mb__after_atomic(); + queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay); + + return 0; +} + +void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn) +{ + /* Reset periodic Doorbell Recovery counter */ + p_hwfn->periodic_db_rec_count = QED_PERIODIC_DB_REC_COUNT; + + /* Don't schedule periodic Doorbell Recovery if already scheduled */ + if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC, + &p_hwfn->slowpath_task_flags)) + return; + + qed_slowpath_delayed_work(p_hwfn, QED_SLOWPATH_PERIODIC_DB_REC, + QED_PERIODIC_DB_REC_INTERVAL); +} + static void qed_slowpath_wq_stop(struct qed_dev *cdev) { - int i; + int i, sleep_count = QED_PERIODIC_DB_REC_WAIT_COUNT; if (IS_VF(cdev)) return; @@ -977,6 +1015,15 @@ static void qed_slowpath_wq_stop(struct qed_dev *cdev) if (!cdev->hwfns[i].slowpath_wq) continue; + /* Stop queuing new delayed works */ + cdev->hwfns[i].slowpath_wq_active = false; + + /* Wait until the last periodic doorbell recovery is executed */ + while (test_bit(QED_SLOWPATH_PERIODIC_DB_REC, + &cdev->hwfns[i].slowpath_task_flags) && + sleep_count--) + msleep(QED_PERIODIC_DB_REC_WAIT_INTERVAL); + flush_workqueue(cdev->hwfns[i].slowpath_wq); destroy_workqueue(cdev->hwfns[i].slowpath_wq); } @@ -989,7 +1036,10 @@ static void qed_slowpath_task(struct work_struct *work) struct qed_ptt *ptt = qed_ptt_acquire(hwfn); if (!ptt) { - queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0); + if (hwfn->slowpath_wq_active) + queue_delayed_work(hwfn->slowpath_wq, + &hwfn->slowpath_task, 0); + return; } @@ -997,6 +1047,15 @@ static void qed_slowpath_task(struct work_struct *work) &hwfn->slowpath_task_flags)) qed_mfw_process_tlv_req(hwfn, ptt); + if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC, + &hwfn->slowpath_task_flags)) { + qed_db_rec_handler(hwfn, ptt); + if (hwfn->periodic_db_rec_count--) + qed_slowpath_delayed_work(hwfn, + QED_SLOWPATH_PERIODIC_DB_REC, + QED_PERIODIC_DB_REC_INTERVAL); + } + qed_ptt_release(hwfn, ptt); } @@ -1023,6 +1082,7 @@ static int qed_slowpath_wq_start(struct qed_dev *cdev) } INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task); + hwfn->slowpath_wq_active = true; } return 0; @@ -1939,21 +1999,30 @@ exit: * 0B | 0x3 [command index] | * 4B | b'0: check_response? | b'1-31 reserved | * 8B | File-type | reserved | + * 12B | Image length in bytes | * \----------------------------------------------------------------------/ * Start a new file of the provided type */ static int qed_nvm_flash_image_file_start(struct qed_dev *cdev, const u8 **data, bool *check_resp) { + u32 file_type, file_size = 0; int rc; *data += 4; *check_resp = !!(**data & BIT(0)); *data += 4; + file_type = **data; DP_VERBOSE(cdev, NETIF_MSG_DRV, - "About to start a new file of type %02x\n", **data); - rc = qed_mcp_nvm_put_file_begin(cdev, **data); + "About to start a new file of type %02x\n", file_type); + if (file_type == DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI) { + *data += 4; + file_size = *((u32 *)(*data)); + } + + rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_BEGIN, file_type, + (u8 *)(&file_size), 4); *data += 4; return rc; @@ -2315,6 +2384,8 @@ const struct qed_common_ops qed_common_ops_pass = { .update_mac = &qed_update_mac, .update_mtu = &qed_update_mtu, .update_wol = &qed_update_wol, + .db_recovery_add = &qed_db_recovery_add, + .db_recovery_del = &qed_db_recovery_del, .read_module_eeprom = &qed_read_module_eeprom, }; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c index a96364df4320..e7f18e34ff0d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c @@ -1619,7 +1619,7 @@ static void qed_mcp_update_stag(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) qed_sp_pf_update_stag(p_hwfn); } - DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n", + DP_VERBOSE(p_hwfn, QED_MSG_SP, "ovlan = %d hw_mode = 0x%x\n", p_hwfn->mcp_info->func_info.ovlan, p_hwfn->hw_info.hw_mode); /* Acknowledge the MFW */ @@ -1641,7 +1641,9 @@ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) val = (port_cfg & OEM_CFG_CHANNEL_TYPE_MASK) >> OEM_CFG_CHANNEL_TYPE_OFFSET; if (val != OEM_CFG_CHANNEL_TYPE_STAGGED) - DP_NOTICE(p_hwfn, "Incorrect UFP Channel type %d\n", val); + DP_NOTICE(p_hwfn, + "Incorrect UFP Channel type %d port_id 0x%02x\n", + val, MFW_PORT(p_hwfn)); val = (port_cfg & OEM_CFG_SCHED_TYPE_MASK) >> OEM_CFG_SCHED_TYPE_OFFSET; if (val == OEM_CFG_SCHED_TYPE_ETS) { @@ -1650,7 +1652,9 @@ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) p_hwfn->ufp_info.mode = QED_UFP_MODE_VNIC_BW; } else { p_hwfn->ufp_info.mode = QED_UFP_MODE_UNKNOWN; - DP_NOTICE(p_hwfn, "Unknown UFP scheduling mode %d\n", val); + DP_NOTICE(p_hwfn, + "Unknown UFP scheduling mode %d port_id 0x%02x\n", + val, MFW_PORT(p_hwfn)); } qed_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); @@ -1665,13 +1669,15 @@ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) p_hwfn->ufp_info.pri_type = QED_UFP_PRI_OS; } else { p_hwfn->ufp_info.pri_type = QED_UFP_PRI_UNKNOWN; - DP_NOTICE(p_hwfn, "Unknown Host priority control %d\n", val); + DP_NOTICE(p_hwfn, + "Unknown Host priority control %d port_id 0x%02x\n", + val, MFW_PORT(p_hwfn)); } DP_NOTICE(p_hwfn, - "UFP shmem config: mode = %d tc = %d pri_type = %d\n", - p_hwfn->ufp_info.mode, - p_hwfn->ufp_info.tc, p_hwfn->ufp_info.pri_type); + "UFP shmem config: mode = %d tc = %d pri_type = %d port_id 0x%02x\n", + p_hwfn->ufp_info.mode, p_hwfn->ufp_info.tc, + p_hwfn->ufp_info.pri_type, MFW_PORT(p_hwfn)); } static int @@ -2739,24 +2745,6 @@ int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf) return 0; } -int qed_mcp_nvm_put_file_begin(struct qed_dev *cdev, u32 addr) -{ - struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); - struct qed_ptt *p_ptt; - u32 resp, param; - int rc; - - p_ptt = qed_ptt_acquire(p_hwfn); - if (!p_ptt) - return -EBUSY; - rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, addr, - &resp, ¶m); - cdev->mcp_nvm_resp = resp; - qed_ptt_release(p_hwfn, p_ptt); - - return rc; -} - int qed_mcp_nvm_write(struct qed_dev *cdev, u32 cmd, u32 addr, u8 *p_buf, u32 len) { @@ -2770,6 +2758,9 @@ int qed_mcp_nvm_write(struct qed_dev *cdev, return -EBUSY; switch (cmd) { + case QED_PUT_FILE_BEGIN: + nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN; + break; case QED_PUT_FILE_DATA: nvm_cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA; break; @@ -2782,10 +2773,14 @@ int qed_mcp_nvm_write(struct qed_dev *cdev, goto out; } + buf_size = min_t(u32, (len - buf_idx), MCP_DRV_NVM_BUF_LEN); while (buf_idx < len) { - buf_size = min_t(u32, (len - buf_idx), MCP_DRV_NVM_BUF_LEN); - nvm_offset = ((buf_size << DRV_MB_PARAM_NVM_LEN_OFFSET) | - addr) + buf_idx; + if (cmd == QED_PUT_FILE_BEGIN) + nvm_offset = addr; + else + nvm_offset = ((buf_size << + DRV_MB_PARAM_NVM_LEN_OFFSET) | addr) + + buf_idx; rc = qed_mcp_nvm_wr_cmd(p_hwfn, p_ptt, nvm_cmd, nvm_offset, &resp, ¶m, buf_size, (u32 *)&p_buf[buf_idx]); @@ -2810,7 +2805,19 @@ int qed_mcp_nvm_write(struct qed_dev *cdev, if (buf_idx % 0x1000 > (buf_idx + buf_size) % 0x1000) usleep_range(1000, 2000); - buf_idx += buf_size; + /* For MBI upgrade, MFW response includes the next buffer offset + * to be delivered to MFW. + */ + if (param && cmd == QED_PUT_FILE_DATA) { + buf_idx = QED_MFW_GET_FIELD(param, + FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET); + buf_size = QED_MFW_GET_FIELD(param, + FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE); + } else { + buf_idx += buf_size; + buf_size = min_t(u32, (len - buf_idx), + MCP_DRV_NVM_BUF_LEN); + } } cdev->mcp_nvm_resp = resp; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 1adfe52b3905..eddf67798d6f 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -543,16 +543,6 @@ int qed_mcp_nvm_write(struct qed_dev *cdev, u32 cmd, u32 addr, u8 *p_buf, u32 len); /** - * @brief Put file begin - * - * @param cdev - * @param addr - nvm offset - * - * @return int - 0 - operation was successful. - */ -int qed_mcp_nvm_put_file_begin(struct qed_dev *cdev, u32 addr); - -/** * @brief Check latest response * * @param cdev diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h index 2440970882c4..8939ed6e08b7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h +++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h @@ -1243,6 +1243,56 @@ 0x1701534UL #define TSEM_REG_DBG_FORCE_FRAME \ 0x1701538UL +#define DORQ_REG_PF_USAGE_CNT \ + 0x1009c0UL +#define DORQ_REG_PF_OVFL_STICKY \ + 0x1009d0UL +#define DORQ_REG_DPM_FORCE_ABORT \ + 0x1009d8UL +#define DORQ_REG_INT_STS \ + 0x100180UL +#define DORQ_REG_INT_STS_ADDRESS_ERROR \ + (0x1UL << 0) +#define DORQ_REG_INT_STS_WR \ + 0x100188UL +#define DORQ_REG_DB_DROP_DETAILS_REL \ + 0x100a28UL +#define DORQ_REG_INT_STS_ADDRESS_ERROR_SHIFT \ + 0 +#define DORQ_REG_INT_STS_DB_DROP \ + (0x1UL << 1) +#define DORQ_REG_INT_STS_DB_DROP_SHIFT \ + 1 +#define DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR \ + (0x1UL << 2) +#define DORQ_REG_INT_STS_DORQ_FIFO_OVFL_ERR_SHIFT \ + 2 +#define DORQ_REG_INT_STS_DORQ_FIFO_AFULL\ + (0x1UL << 3) +#define DORQ_REG_INT_STS_DORQ_FIFO_AFULL_SHIFT \ + 3 +#define DORQ_REG_INT_STS_CFC_BYP_VALIDATION_ERR \ + (0x1UL << 4) +#define DORQ_REG_INT_STS_CFC_BYP_VALIDATION_ERR_SHIFT \ + 4 +#define DORQ_REG_INT_STS_CFC_LD_RESP_ERR \ + (0x1UL << 5) +#define DORQ_REG_INT_STS_CFC_LD_RESP_ERR_SHIFT \ + 5 +#define DORQ_REG_INT_STS_XCM_DONE_CNT_ERR \ + (0x1UL << 6) +#define DORQ_REG_INT_STS_XCM_DONE_CNT_ERR_SHIFT \ + 6 +#define DORQ_REG_INT_STS_CFC_LD_REQ_FIFO_OVFL_ERR \ + (0x1UL << 7) +#define DORQ_REG_INT_STS_CFC_LD_REQ_FIFO_OVFL_ERR_SHIFT \ + 7 +#define DORQ_REG_INT_STS_CFC_LD_REQ_FIFO_UNDER_ERR \ + (0x1UL << 8) +#define DORQ_REG_INT_STS_CFC_LD_REQ_FIFO_UNDER_ERR_SHIFT \ + 8 +#define DORQ_REG_DB_DROP_DETAILS_REASON \ + 0x100a20UL #define MSEM_REG_DBG_SELECT \ 0x1801528UL #define MSEM_REG_DBG_DWORD_ENABLE \ diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 3157c0d99441..4179c9013fc6 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -227,7 +227,9 @@ struct qed_spq { u32 comp_count; u32 cid; - qed_spq_async_comp_cb async_comp_cb[MAX_PROTOCOL_TYPE]; + u32 db_addr_offset; + struct core_db_data db_data; + qed_spq_async_comp_cb async_comp_cb[MAX_PROTOCOL_TYPE]; }; /** diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c index 0a9c5bb0fa48..eb88bbc6b193 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_spq.c +++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c @@ -252,9 +252,9 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn, struct qed_spq *p_spq, struct qed_spq_entry *p_ent) { struct qed_chain *p_chain = &p_hwfn->p_spq->chain; + struct core_db_data *p_db_data = &p_spq->db_data; u16 echo = qed_chain_get_prod_idx(p_chain); struct slow_path_element *elem; - struct core_db_data db; p_ent->elem.hdr.echo = cpu_to_le16(echo); elem = qed_chain_produce(p_chain); @@ -266,27 +266,22 @@ static int qed_spq_hw_post(struct qed_hwfn *p_hwfn, *elem = p_ent->elem; /* struct assignment */ /* send a doorbell on the slow hwfn session */ - memset(&db, 0, sizeof(db)); - SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM); - SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET); - SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL, - DQ_XCM_CORE_SPQ_PROD_CMD); - db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD; - db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain)); + p_db_data->spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain)); /* make sure the SPQE is updated before the doorbell */ wmb(); - DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db); + DOORBELL(p_hwfn, p_spq->db_addr_offset, *(u32 *)p_db_data); /* make sure doorbell is rang */ wmb(); DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n", - qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), - p_spq->cid, db.params, db.agg_flags, - qed_chain_get_prod_idx(p_chain)); + p_spq->db_addr_offset, + p_spq->cid, + p_db_data->params, + p_db_data->agg_flags, qed_chain_get_prod_idx(p_chain)); return 0; } @@ -490,8 +485,11 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn) { struct qed_spq *p_spq = p_hwfn->p_spq; struct qed_spq_entry *p_virt = NULL; + struct core_db_data *p_db_data; + void __iomem *db_addr; dma_addr_t p_phys = 0; u32 i, capacity; + int rc; INIT_LIST_HEAD(&p_spq->pending); INIT_LIST_HEAD(&p_spq->completion_pending); @@ -528,6 +526,25 @@ void qed_spq_setup(struct qed_hwfn *p_hwfn) /* reset the chain itself */ qed_chain_reset(&p_spq->chain); + + /* Initialize the address/data of the SPQ doorbell */ + p_spq->db_addr_offset = qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY); + p_db_data = &p_spq->db_data; + memset(p_db_data, 0, sizeof(*p_db_data)); + SET_FIELD(p_db_data->params, CORE_DB_DATA_DEST, DB_DEST_XCM); + SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_MAX); + SET_FIELD(p_db_data->params, CORE_DB_DATA_AGG_VAL_SEL, + DQ_XCM_CORE_SPQ_PROD_CMD); + p_db_data->agg_flags = DQ_XCM_CORE_DQ_CF_CMD; + + /* Register the SPQ doorbell with the doorbell recovery mechanism */ + db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells + + p_spq->db_addr_offset); + rc = qed_db_recovery_add(p_hwfn->cdev, db_addr, &p_spq->db_data, + DB_REC_WIDTH_32B, DB_REC_KERNEL); + if (rc) + DP_INFO(p_hwfn, + "Failed to register the SPQ doorbell with the doorbell recovery mechanism\n"); } int qed_spq_alloc(struct qed_hwfn *p_hwfn) @@ -575,11 +592,17 @@ spq_allocate_fail: void qed_spq_free(struct qed_hwfn *p_hwfn) { struct qed_spq *p_spq = p_hwfn->p_spq; + void __iomem *db_addr; u32 capacity; if (!p_spq) return; + /* Delete the SPQ doorbell from the doorbell recovery mechanism */ + db_addr = (void __iomem *)((u8 __iomem *)p_hwfn->doorbells + + p_spq->db_addr_offset); + qed_db_recovery_del(p_hwfn->cdev, db_addr, &p_spq->db_data); + if (p_spq->p_virt) { capacity = qed_chain_get_capacity(&p_spq->chain); dma_free_coherent(&p_hwfn->cdev->pdev->dev, diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h index de98a974673b..613249d1e967 100644 --- a/drivers/net/ethernet/qlogic/qede/qede.h +++ b/drivers/net/ethernet/qlogic/qede/qede.h @@ -168,6 +168,13 @@ struct qede_ptp; #define QEDE_RFS_MAX_FLTR 256 +enum qede_flags_bit { + QEDE_FLAGS_IS_VF = 0, + QEDE_FLAGS_LINK_REQUESTED, + QEDE_FLAGS_PTP_TX_IN_PRORGESS, + QEDE_FLAGS_TX_TIMESTAMPING_EN +}; + struct qede_dev { struct qed_dev *cdev; struct net_device *ndev; @@ -177,10 +184,7 @@ struct qede_dev { u8 dp_level; unsigned long flags; -#define QEDE_FLAG_IS_VF BIT(0) -#define IS_VF(edev) (!!((edev)->flags & QEDE_FLAG_IS_VF)) -#define QEDE_TX_TIMESTAMPING_EN BIT(1) -#define QEDE_FLAGS_PTP_TX_IN_PRORGESS BIT(2) +#define IS_VF(edev) (test_bit(QEDE_FLAGS_IS_VF, &(edev)->flags)) const struct qed_eth_ops *ops; struct qede_ptp *ptp; @@ -377,6 +381,7 @@ struct qede_tx_queue { u64 xmit_pkts; u64 stopped_cnt; + u64 tx_mem_alloc_err; __le16 *hw_cons_ptr; diff --git a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c index 8cbbd628fd73..16331c6c6fa7 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ethtool.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ethtool.c @@ -73,6 +73,7 @@ static const struct { } qede_tqstats_arr[] = { QEDE_TQSTAT(xmit_pkts), QEDE_TQSTAT(stopped_cnt), + QEDE_TQSTAT(tx_mem_alloc_err), }; #define QEDE_STAT_OFFSET(stat_name, type, base) \ diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c index 1a78027de071..bdf816fe5a16 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_fp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c @@ -1466,8 +1466,8 @@ netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev) #if ((MAX_SKB_FRAGS + 2) > ETH_TX_MAX_BDS_PER_NON_LSO_PACKET) if (qede_pkt_req_lin(skb, xmit_type)) { if (skb_linearize(skb)) { - DP_NOTICE(edev, - "SKB linearization failed - silently dropping this SKB\n"); + txq->tx_mem_alloc_err++; + dev_kfree_skb_any(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 46d0f2eaa0c0..5a74fcbdbc2b 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -1086,7 +1086,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, } if (is_vf) - edev->flags |= QEDE_FLAG_IS_VF; + set_bit(QEDE_FLAGS_IS_VF, &edev->flags); qede_init_ndev(edev); @@ -1774,6 +1774,10 @@ static int qede_drain_txq(struct qede_dev *edev, static int qede_stop_txq(struct qede_dev *edev, struct qede_tx_queue *txq, int rss_id) { + /* delete doorbell from doorbell recovery mechanism */ + edev->ops->common->db_recovery_del(edev->cdev, txq->doorbell_addr, + &txq->tx_db); + return edev->ops->q_tx_stop(edev->cdev, rss_id, txq->handle); } @@ -1910,6 +1914,11 @@ static int qede_start_txq(struct qede_dev *edev, DQ_XCM_ETH_TX_BD_PROD_CMD); txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD; + /* register doorbell with doorbell recovery mechanism */ + rc = edev->ops->common->db_recovery_add(edev->cdev, txq->doorbell_addr, + &txq->tx_db, DB_REC_WIDTH_32B, + DB_REC_KERNEL); + return rc; } @@ -2057,6 +2066,8 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode, if (!is_locked) __qede_lock(edev); + clear_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags); + edev->state = QEDE_STATE_CLOSED; qede_rdma_dev_event_close(edev); @@ -2163,6 +2174,8 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, /* Program un-configured VLANs */ qede_configure_vlan_filters(edev); + set_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags); + /* Ask for link-up using current configuration */ memset(&link_params, 0, sizeof(link_params)); link_params.link_up = true; @@ -2258,8 +2271,8 @@ static void qede_link_update(void *dev, struct qed_link_output *link) { struct qede_dev *edev = dev; - if (!netif_running(edev->ndev)) { - DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not running\n"); + if (!test_bit(QEDE_FLAGS_LINK_REQUESTED, &edev->flags)) { + DP_VERBOSE(edev, NETIF_MSG_LINK, "Interface is not ready\n"); return; } diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c index 013ff567283c..5f3f42a25361 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c +++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c @@ -223,12 +223,12 @@ static int qede_ptp_cfg_filters(struct qede_dev *edev) switch (ptp->tx_type) { case HWTSTAMP_TX_ON: - edev->flags |= QEDE_TX_TIMESTAMPING_EN; + set_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags); tx_type = QED_PTP_HWTSTAMP_TX_ON; break; case HWTSTAMP_TX_OFF: - edev->flags &= ~QEDE_TX_TIMESTAMPING_EN; + clear_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags); tx_type = QED_PTP_HWTSTAMP_TX_OFF; break; @@ -518,7 +518,7 @@ void qede_ptp_tx_ts(struct qede_dev *edev, struct sk_buff *skb) if (test_and_set_bit_lock(QEDE_FLAGS_PTP_TX_IN_PRORGESS, &edev->flags)) return; - if (unlikely(!(edev->flags & QEDE_TX_TIMESTAMPING_EN))) { + if (unlikely(!test_bit(QEDE_FLAGS_TX_TIMESTAMPING_EN, &edev->flags))) { DP_NOTICE(edev, "Tx timestamping was not enabled, this packet will not be timestamped\n"); } else if (unlikely(ptp->tx_skb)) { diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index d42ba2293d8c..16d0479f6891 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -2993,10 +2993,8 @@ int qlcnic_check_temp(struct qlcnic_adapter *adapter) static inline void dump_tx_ring_desc(struct qlcnic_host_tx_ring *tx_ring) { int i; - struct cmd_desc_type0 *tx_desc_info; for (i = 0; i < tx_ring->num_desc; i++) { - tx_desc_info = &tx_ring->desc_head[i]; pr_info("TX Desc: %d\n", i); print_hex_dump(KERN_INFO, "TX: ", DUMP_PREFIX_OFFSET, 16, 1, &tx_ring->desc_head[i], @@ -4008,19 +4006,12 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt, int queue_type) { struct net_device *netdev = adapter->netdev; - u8 max_hw_rings = 0; char buf[8]; - int cur_rings; - if (queue_type == QLCNIC_RX_QUEUE) { - max_hw_rings = adapter->max_sds_rings; - cur_rings = adapter->drv_sds_rings; + if (queue_type == QLCNIC_RX_QUEUE) strcpy(buf, "SDS"); - } else if (queue_type == QLCNIC_TX_QUEUE) { - max_hw_rings = adapter->max_tx_rings; - cur_rings = adapter->drv_tx_rings; + else strcpy(buf, "Tx"); - } if (!is_power_of_2(ring_cnt)) { netdev_err(netdev, "%s rings value should be a power of 2\n", diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c index 50eaafa3eaba..af3b037fa442 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c @@ -1067,9 +1067,6 @@ static int qlcnic_sriov_pf_cfg_ip_cmd(struct qlcnic_bc_trans *trans, struct qlcnic_vf_info *vf = trans->vf; struct qlcnic_adapter *adapter = vf->adapter; int err = -EIO; - u8 op; - - op = cmd->req.arg[1] & 0xff; cmd->req.arg[1] |= vf->vp->handle << 16; cmd->req.arg[1] |= BIT_31; @@ -1339,14 +1336,13 @@ static int qlcnic_sriov_pf_get_acl_cmd(struct qlcnic_bc_trans *trans, { struct qlcnic_vf_info *vf = trans->vf; struct qlcnic_vport *vp = vf->vp; - u8 cmd_op, mode = vp->vlan_mode; + u8 mode = vp->vlan_mode; struct qlcnic_adapter *adapter; struct qlcnic_sriov *sriov; adapter = vf->adapter; sriov = adapter->ahw->sriov; - cmd_op = trans->req_hdr->cmd_op; cmd->rsp.arg[0] |= 1 << 25; /* For 84xx adapter in case of PVID , PFD should send vlan mode as diff --git a/drivers/net/ethernet/qualcomm/qca_debug.c b/drivers/net/ethernet/qualcomm/qca_debug.c index a9f1bc013364..bcb890b18a94 100644 --- a/drivers/net/ethernet/qualcomm/qca_debug.c +++ b/drivers/net/ethernet/qualcomm/qca_debug.c @@ -61,6 +61,7 @@ static const char qcaspi_gstrings_stats[][ETH_GSTRING_LEN] = { "Transmit ring full", "SPI errors", "Write verify errors", + "Buffer available errors", }; #ifdef CONFIG_DEBUG_FS @@ -125,19 +126,7 @@ qcaspi_info_show(struct seq_file *s, void *what) return 0; } - -static int -qcaspi_info_open(struct inode *inode, struct file *file) -{ - return single_open(file, qcaspi_info_show, inode->i_private); -} - -static const struct file_operations qcaspi_info_ops = { - .open = qcaspi_info_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(qcaspi_info); void qcaspi_init_device_debugfs(struct qcaspi *qca) @@ -153,7 +142,7 @@ qcaspi_init_device_debugfs(struct qcaspi *qca) return; } debugfs_create_file("info", S_IFREG | 0444, device_root, qca, - &qcaspi_info_ops); + &qcaspi_info_fops); } void diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index d5310504f436..97f92953bdb9 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c @@ -289,6 +289,14 @@ qcaspi_transmit(struct qcaspi *qca) qcaspi_read_register(qca, SPI_REG_WRBUF_SPC_AVA, &available); + if (available > QCASPI_HW_BUF_LEN) { + /* This could only happen by interferences on the SPI line. + * So retry later ... + */ + qca->stats.buf_avail_err++; + return -1; + } + while (qca->txr.skb[qca->txr.head]) { pkt_len = qca->txr.skb[qca->txr.head]->len + QCASPI_HW_PKT_LEN; @@ -355,7 +363,13 @@ qcaspi_receive(struct qcaspi *qca) netdev_dbg(net_dev, "qcaspi_receive: SPI_REG_RDBUF_BYTE_AVA: Value: %08x\n", available); - if (available == 0) { + if (available > QCASPI_HW_BUF_LEN) { + /* This could only happen by interferences on the SPI line. + * So retry later ... + */ + qca->stats.buf_avail_err++; + return -1; + } else if (available == 0) { netdev_dbg(net_dev, "qcaspi_receive called without any data being available!\n"); return -1; } diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h index 2d2c49726492..eb9af45fcc5e 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.h +++ b/drivers/net/ethernet/qualcomm/qca_spi.h @@ -74,6 +74,7 @@ struct qcaspi_stats { u64 ring_full; u64 spi_err; u64 write_verify_failed; + u64 buf_avail_err; }; struct qcaspi { diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c index 5f4e447c5dce..b8bbee645f51 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c @@ -301,10 +301,13 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[], struct rmnet_port *port; u16 mux_id; + if (!dev) + return -ENODEV; + real_dev = __dev_get_by_index(dev_net(dev), nla_get_u32(tb[IFLA_LINK])); - if (!real_dev || !dev || !rmnet_is_real_dev_registered(real_dev)) + if (!real_dev || !rmnet_is_real_dev_registered(real_dev)) return -ENODEV; port = rmnet_get_port_rtnl(real_dev); diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c index 3ee8ae9b6838..f6cf59aee212 100644 --- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c +++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c @@ -20,17 +20,12 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb, struct rmnet_port *port, int enable) { - struct rmnet_map_control_command *cmd; struct rmnet_endpoint *ep; struct net_device *vnd; - u16 ip_family; - u16 fc_seq; - u32 qos_id; u8 mux_id; int r; mux_id = RMNET_MAP_GET_MUX_ID(skb); - cmd = RMNET_MAP_GET_CMD_START(skb); if (mux_id >= RMNET_MAX_LOGICAL_EP) { kfree_skb(skb); @@ -45,10 +40,6 @@ static u8 rmnet_map_do_flow_control(struct sk_buff *skb, vnd = ep->egress_dev; - ip_family = cmd->flow_control.ip_family; - fc_seq = ntohs(cmd->flow_control.flow_control_seq_num); - qos_id = ntohl(cmd->flow_control.qos_id); - /* Ignore the ip family and pass the sequence number for both v4 and v6 * sequence. User space does not support creating dedicated flows for * the 2 protocols diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index fe2d754c6c8e..99bc3de906e2 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -56,13 +56,6 @@ #define R8169_MSG_DEFAULT \ (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN) -#define TX_SLOTS_AVAIL(tp) \ - (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx) - -/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */ -#define TX_FRAGS_READY_FOR(tp,nr_frags) \ - (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1)) - /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast). The RTL chips use a 64 element hash table based on the Ethernet CRC. */ static const int multicast_filter_limit = 32; @@ -212,24 +205,24 @@ enum cfg_version { }; static const struct pci_device_id rtl8169_pci_tbl[] = { - { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 }, - { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 }, - { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8161), 0, 0, RTL_CFG_1 }, - { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 }, - { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 }, - { PCI_DEVICE(PCI_VENDOR_ID_NCUBE, 0x8168), 0, 0, RTL_CFG_1 }, - { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 }, - { PCI_VENDOR_ID_DLINK, 0x4300, - PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 }, - { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 }, - { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 }, - { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 }, - { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 }, + { PCI_VDEVICE(REALTEK, 0x8129), RTL_CFG_0 }, + { PCI_VDEVICE(REALTEK, 0x8136), RTL_CFG_2 }, + { PCI_VDEVICE(REALTEK, 0x8161), RTL_CFG_1 }, + { PCI_VDEVICE(REALTEK, 0x8167), RTL_CFG_0 }, + { PCI_VDEVICE(REALTEK, 0x8168), RTL_CFG_1 }, + { PCI_VDEVICE(NCUBE, 0x8168), RTL_CFG_1 }, + { PCI_VDEVICE(REALTEK, 0x8169), RTL_CFG_0 }, + { PCI_VENDOR_ID_DLINK, 0x4300, + PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 }, + { PCI_VDEVICE(DLINK, 0x4300), RTL_CFG_0 }, + { PCI_VDEVICE(DLINK, 0x4302), RTL_CFG_0 }, + { PCI_VDEVICE(AT, 0xc107), RTL_CFG_0 }, + { PCI_VDEVICE(USR, 0x0116), RTL_CFG_0 }, { PCI_VENDOR_ID_LINKSYS, 0x1032, PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 }, { 0x0001, 0x8168, PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 }, - {0,}, + {} }; MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl); @@ -603,7 +596,6 @@ struct RxDesc { struct ring_info { struct sk_buff *skb; u32 len; - u8 __pad[sizeof(void *) - sizeof(u32)]; }; struct rtl8169_counters { @@ -661,7 +653,7 @@ struct rtl8169_private { struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */ u16 cp_cmd; - u16 event_slow; + u16 irq_mask; const struct rtl_coalesce_info *coalesce_info; struct clk *clk; @@ -1102,23 +1094,6 @@ static u32 r8168ep_ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) return rtl_eri_read(tp, reg, ERIAR_OOB); } -static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg) -{ - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_27: - case RTL_GIGA_MAC_VER_28: - case RTL_GIGA_MAC_VER_31: - return r8168dp_ocp_read(tp, mask, reg); - case RTL_GIGA_MAC_VER_49: - case RTL_GIGA_MAC_VER_50: - case RTL_GIGA_MAC_VER_51: - return r8168ep_ocp_read(tp, mask, reg); - default: - BUG(); - return ~0; - } -} - static void r8168dp_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data) { @@ -1134,30 +1109,11 @@ static void r8168ep_ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, data, ERIAR_OOB); } -static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data) -{ - switch (tp->mac_version) { - case RTL_GIGA_MAC_VER_27: - case RTL_GIGA_MAC_VER_28: - case RTL_GIGA_MAC_VER_31: - r8168dp_ocp_write(tp, mask, reg, data); - break; - case RTL_GIGA_MAC_VER_49: - case RTL_GIGA_MAC_VER_50: - case RTL_GIGA_MAC_VER_51: - r8168ep_ocp_write(tp, mask, reg, data); - break; - default: - BUG(); - break; - } -} - -static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd) +static void r8168dp_oob_notify(struct rtl8169_private *tp, u8 cmd) { rtl_eri_write(tp, 0xe8, ERIAR_MASK_0001, cmd, ERIAR_EXGMAC); - ocp_write(tp, 0x1, 0x30, 0x00000001); + r8168dp_ocp_write(tp, 0x1, 0x30, 0x00000001); } #define OOB_CMD_RESET 0x00 @@ -1169,18 +1125,18 @@ static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp) return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10; } -DECLARE_RTL_COND(rtl_ocp_read_cond) +DECLARE_RTL_COND(rtl_dp_ocp_read_cond) { u16 reg; reg = rtl8168_get_ocp_reg(tp); - return ocp_read(tp, 0x0f, reg) & 0x00000800; + return r8168dp_ocp_read(tp, 0x0f, reg) & 0x00000800; } DECLARE_RTL_COND(rtl_ep_ocp_read_cond) { - return ocp_read(tp, 0x0f, 0x124) & 0x00000001; + return r8168ep_ocp_read(tp, 0x0f, 0x124) & 0x00000001; } DECLARE_RTL_COND(rtl_ocp_tx_cond) @@ -1198,14 +1154,15 @@ static void rtl8168ep_stop_cmac(struct rtl8169_private *tp) static void rtl8168dp_driver_start(struct rtl8169_private *tp) { - rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START); - rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10); + r8168dp_oob_notify(tp, OOB_CMD_DRIVER_START); + rtl_msleep_loop_wait_high(tp, &rtl_dp_ocp_read_cond, 10, 10); } static void rtl8168ep_driver_start(struct rtl8169_private *tp) { - ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START); - ocp_write(tp, 0x01, 0x30, ocp_read(tp, 0x01, 0x30) | 0x01); + r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_START); + r8168ep_ocp_write(tp, 0x01, 0x30, + r8168ep_ocp_read(tp, 0x01, 0x30) | 0x01); rtl_msleep_loop_wait_high(tp, &rtl_ep_ocp_read_cond, 10, 10); } @@ -1230,15 +1187,16 @@ static void rtl8168_driver_start(struct rtl8169_private *tp) static void rtl8168dp_driver_stop(struct rtl8169_private *tp) { - rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP); - rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10); + r8168dp_oob_notify(tp, OOB_CMD_DRIVER_STOP); + rtl_msleep_loop_wait_low(tp, &rtl_dp_ocp_read_cond, 10, 10); } static void rtl8168ep_driver_stop(struct rtl8169_private *tp) { rtl8168ep_stop_cmac(tp); - ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP); - ocp_write(tp, 0x01, 0x30, ocp_read(tp, 0x01, 0x30) | 0x01); + r8168ep_ocp_write(tp, 0x01, 0x180, OOB_CMD_DRIVER_STOP); + r8168ep_ocp_write(tp, 0x01, 0x30, + r8168ep_ocp_read(tp, 0x01, 0x30) | 0x01); rtl_msleep_loop_wait_low(tp, &rtl_ep_ocp_read_cond, 10, 10); } @@ -1265,12 +1223,12 @@ static bool r8168dp_check_dash(struct rtl8169_private *tp) { u16 reg = rtl8168_get_ocp_reg(tp); - return !!(ocp_read(tp, 0x0f, reg) & 0x00008000); + return !!(r8168dp_ocp_read(tp, 0x0f, reg) & 0x00008000); } static bool r8168ep_check_dash(struct rtl8169_private *tp) { - return !!(ocp_read(tp, 0x0f, 0x128) & 0x00000001); + return !!(r8168ep_ocp_read(tp, 0x0f, 0x128) & 0x00000001); } static bool r8168_check_dash(struct rtl8169_private *tp) @@ -1325,27 +1283,20 @@ static u16 rtl_get_events(struct rtl8169_private *tp) static void rtl_ack_events(struct rtl8169_private *tp, u16 bits) { RTL_W16(tp, IntrStatus, bits); - mmiowb(); } static void rtl_irq_disable(struct rtl8169_private *tp) { RTL_W16(tp, IntrMask, 0); - mmiowb(); -} - -static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits) -{ - RTL_W16(tp, IntrMask, bits); } #define RTL_EVENT_NAPI_RX (RxOK | RxErr) #define RTL_EVENT_NAPI_TX (TxOK | TxErr) #define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX) -static void rtl_irq_enable_all(struct rtl8169_private *tp) +static void rtl_irq_enable(struct rtl8169_private *tp) { - rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow); + RTL_W16(tp, IntrMask, tp->irq_mask); } static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp) @@ -2051,8 +2002,7 @@ static const struct ethtool_ops rtl8169_ethtool_ops = { .set_link_ksettings = phy_ethtool_set_link_ksettings, }; -static void rtl8169_get_mac_version(struct rtl8169_private *tp, - u8 default_version) +static void rtl8169_get_mac_version(struct rtl8169_private *tp) { /* * The driver currently handles the 8168Bf and the 8168Be identically @@ -2066,120 +2016,107 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp, * (RTL_R32(tp, TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec */ static const struct rtl_mac_info { - u32 mask; - u32 val; - int mac_version; + u16 mask; + u16 val; + u16 mac_version; } mac_info[] = { /* 8168EP family. */ - { 0x7cf00000, 0x50200000, RTL_GIGA_MAC_VER_51 }, - { 0x7cf00000, 0x50100000, RTL_GIGA_MAC_VER_50 }, - { 0x7cf00000, 0x50000000, RTL_GIGA_MAC_VER_49 }, + { 0x7cf, 0x502, RTL_GIGA_MAC_VER_51 }, + { 0x7cf, 0x501, RTL_GIGA_MAC_VER_50 }, + { 0x7cf, 0x500, RTL_GIGA_MAC_VER_49 }, /* 8168H family. */ - { 0x7cf00000, 0x54100000, RTL_GIGA_MAC_VER_46 }, - { 0x7cf00000, 0x54000000, RTL_GIGA_MAC_VER_45 }, + { 0x7cf, 0x541, RTL_GIGA_MAC_VER_46 }, + { 0x7cf, 0x540, RTL_GIGA_MAC_VER_45 }, /* 8168G family. */ - { 0x7cf00000, 0x5c800000, RTL_GIGA_MAC_VER_44 }, - { 0x7cf00000, 0x50900000, RTL_GIGA_MAC_VER_42 }, - { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 }, - { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 }, + { 0x7cf, 0x5c8, RTL_GIGA_MAC_VER_44 }, + { 0x7cf, 0x509, RTL_GIGA_MAC_VER_42 }, + { 0x7cf, 0x4c1, RTL_GIGA_MAC_VER_41 }, + { 0x7cf, 0x4c0, RTL_GIGA_MAC_VER_40 }, /* 8168F family. */ - { 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38 }, - { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 }, - { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 }, + { 0x7c8, 0x488, RTL_GIGA_MAC_VER_38 }, + { 0x7cf, 0x481, RTL_GIGA_MAC_VER_36 }, + { 0x7cf, 0x480, RTL_GIGA_MAC_VER_35 }, /* 8168E family. */ - { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 }, - { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 }, - { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 }, + { 0x7c8, 0x2c8, RTL_GIGA_MAC_VER_34 }, + { 0x7cf, 0x2c1, RTL_GIGA_MAC_VER_32 }, + { 0x7c8, 0x2c0, RTL_GIGA_MAC_VER_33 }, /* 8168D family. */ - { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 }, - { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 }, + { 0x7cf, 0x281, RTL_GIGA_MAC_VER_25 }, + { 0x7c8, 0x280, RTL_GIGA_MAC_VER_26 }, /* 8168DP family. */ - { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 }, - { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 }, - { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 }, + { 0x7cf, 0x288, RTL_GIGA_MAC_VER_27 }, + { 0x7cf, 0x28a, RTL_GIGA_MAC_VER_28 }, + { 0x7cf, 0x28b, RTL_GIGA_MAC_VER_31 }, /* 8168C family. */ - { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 }, - { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 }, - { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 }, - { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 }, - { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 }, - { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21 }, - { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22 }, + { 0x7cf, 0x3c9, RTL_GIGA_MAC_VER_23 }, + { 0x7cf, 0x3c8, RTL_GIGA_MAC_VER_18 }, + { 0x7c8, 0x3c8, RTL_GIGA_MAC_VER_24 }, + { 0x7cf, 0x3c0, RTL_GIGA_MAC_VER_19 }, + { 0x7cf, 0x3c2, RTL_GIGA_MAC_VER_20 }, + { 0x7cf, 0x3c3, RTL_GIGA_MAC_VER_21 }, + { 0x7c8, 0x3c0, RTL_GIGA_MAC_VER_22 }, /* 8168B family. */ - { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 }, - { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 }, - { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 }, + { 0x7cf, 0x380, RTL_GIGA_MAC_VER_12 }, + { 0x7c8, 0x380, RTL_GIGA_MAC_VER_17 }, + { 0x7c8, 0x300, RTL_GIGA_MAC_VER_11 }, /* 8101 family. */ - { 0x7c800000, 0x44800000, RTL_GIGA_MAC_VER_39 }, - { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 }, - { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 }, - { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 }, - { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 }, - { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 }, - { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 }, - { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 }, - { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 }, - { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 }, - { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 }, - { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 }, - { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 }, - { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 }, + { 0x7c8, 0x448, RTL_GIGA_MAC_VER_39 }, + { 0x7c8, 0x440, RTL_GIGA_MAC_VER_37 }, + { 0x7cf, 0x409, RTL_GIGA_MAC_VER_29 }, + { 0x7c8, 0x408, RTL_GIGA_MAC_VER_30 }, + { 0x7cf, 0x349, RTL_GIGA_MAC_VER_08 }, + { 0x7cf, 0x249, RTL_GIGA_MAC_VER_08 }, + { 0x7cf, 0x348, RTL_GIGA_MAC_VER_07 }, + { 0x7cf, 0x248, RTL_GIGA_MAC_VER_07 }, + { 0x7cf, 0x340, RTL_GIGA_MAC_VER_13 }, + { 0x7cf, 0x343, RTL_GIGA_MAC_VER_10 }, + { 0x7cf, 0x342, RTL_GIGA_MAC_VER_16 }, + { 0x7c8, 0x348, RTL_GIGA_MAC_VER_09 }, + { 0x7c8, 0x248, RTL_GIGA_MAC_VER_09 }, + { 0x7c8, 0x340, RTL_GIGA_MAC_VER_16 }, /* FIXME: where did these entries come from ? -- FR */ - { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 }, - { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 }, + { 0xfc8, 0x388, RTL_GIGA_MAC_VER_15 }, + { 0xfc8, 0x308, RTL_GIGA_MAC_VER_14 }, /* 8110 family. */ - { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 }, - { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 }, - { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 }, - { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 }, - { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 }, - { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 }, + { 0xfc8, 0x980, RTL_GIGA_MAC_VER_06 }, + { 0xfc8, 0x180, RTL_GIGA_MAC_VER_05 }, + { 0xfc8, 0x100, RTL_GIGA_MAC_VER_04 }, + { 0xfc8, 0x040, RTL_GIGA_MAC_VER_03 }, + { 0xfc8, 0x008, RTL_GIGA_MAC_VER_02 }, + { 0xfc8, 0x000, RTL_GIGA_MAC_VER_01 }, /* Catch-all */ - { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE } + { 0x000, 0x000, RTL_GIGA_MAC_NONE } }; const struct rtl_mac_info *p = mac_info; - u32 reg; + u16 reg = RTL_R32(tp, TxConfig) >> 20; - reg = RTL_R32(tp, TxConfig); while ((reg & p->mask) != p->val) p++; tp->mac_version = p->mac_version; if (tp->mac_version == RTL_GIGA_MAC_NONE) { - dev_notice(tp_to_dev(tp), - "unknown MAC, using family default\n"); - tp->mac_version = default_version; - } else if (tp->mac_version == RTL_GIGA_MAC_VER_42) { - tp->mac_version = tp->supports_gmii ? - RTL_GIGA_MAC_VER_42 : - RTL_GIGA_MAC_VER_43; - } else if (tp->mac_version == RTL_GIGA_MAC_VER_45) { - tp->mac_version = tp->supports_gmii ? - RTL_GIGA_MAC_VER_45 : - RTL_GIGA_MAC_VER_47; - } else if (tp->mac_version == RTL_GIGA_MAC_VER_46) { - tp->mac_version = tp->supports_gmii ? - RTL_GIGA_MAC_VER_46 : - RTL_GIGA_MAC_VER_48; + dev_err(tp_to_dev(tp), "unknown chip XID %03x\n", reg & 0xfcf); + } else if (!tp->supports_gmii) { + if (tp->mac_version == RTL_GIGA_MAC_VER_42) + tp->mac_version = RTL_GIGA_MAC_VER_43; + else if (tp->mac_version == RTL_GIGA_MAC_VER_45) + tp->mac_version = RTL_GIGA_MAC_VER_47; + else if (tp->mac_version == RTL_GIGA_MAC_VER_46) + tp->mac_version = RTL_GIGA_MAC_VER_48; } } -static void rtl8169_print_mac_version(struct rtl8169_private *tp) -{ - netif_dbg(tp, drv, tp->dev, "mac_version = 0x%02x\n", tp->mac_version); -} - struct phy_reg { u16 reg; u16 val; @@ -3902,8 +3839,6 @@ static void rtl_hw_phy_config(struct net_device *dev) { struct rtl8169_private *tp = netdev_priv(dev); - rtl8169_print_mac_version(tp); - switch (tp->mac_version) { case RTL_GIGA_MAC_VER_01: break; @@ -4643,7 +4578,7 @@ static void rtl_hw_start(struct rtl8169_private *tp) rtl_set_rx_mode(tp->dev); /* no early-rx interrupts */ RTL_W16(tp, MultiIntr, RTL_R16(tp, MultiIntr) & 0xf000); - rtl_irq_enable_all(tp); + rtl_irq_enable(tp); } static void rtl_hw_start_8169(struct rtl8169_private *tp) @@ -5394,8 +5329,8 @@ static void rtl_hw_start_8168(struct rtl8169_private *tp) /* Work around for RxFIFO overflow. */ if (tp->mac_version == RTL_GIGA_MAC_VER_11) { - tp->event_slow |= RxFIFOOver | PCSTimeout; - tp->event_slow &= ~RxOverflow; + tp->irq_mask |= RxFIFOOver; + tp->irq_mask &= ~RxOverflow; } switch (tp->mac_version) { @@ -5632,7 +5567,7 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp) static void rtl_hw_start_8101(struct rtl8169_private *tp) { if (tp->mac_version >= RTL_GIGA_MAC_VER_30) - tp->event_slow &= ~RxFIFOOver; + tp->irq_mask &= ~RxFIFOOver; if (tp->mac_version == RTL_GIGA_MAC_VER_13 || tp->mac_version == RTL_GIGA_MAC_VER_16) @@ -5888,6 +5823,16 @@ static void rtl8169_tx_timeout(struct net_device *dev) rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); } +static __le32 rtl8169_get_txd_opts1(u32 opts0, u32 len, unsigned int entry) +{ + u32 status = opts0 | len; + + if (entry == NUM_TX_DESC - 1) + status |= RingEnd; + + return cpu_to_le32(status); +} + static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb, u32 *opts) { @@ -5900,7 +5845,7 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb, for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) { const skb_frag_t *frag = info->frags + cur_frag; dma_addr_t mapping; - u32 status, len; + u32 len; void *addr; entry = (entry + 1) % NUM_TX_DESC; @@ -5916,11 +5861,7 @@ static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb, goto err_out; } - /* Anti gcc 2.95.3 bugware (sic) */ - status = opts[0] | len | - (RingEnd * !((entry + 1) % NUM_TX_DESC)); - - txd->opts1 = cpu_to_le32(status); + txd->opts1 = rtl8169_get_txd_opts1(opts[0], len, entry); txd->opts2 = cpu_to_le32(opts[1]); txd->addr = cpu_to_le64(mapping); @@ -6108,6 +6049,15 @@ static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp, return true; } +static bool rtl_tx_slots_avail(struct rtl8169_private *tp, + unsigned int nr_frags) +{ + unsigned int slots_avail = tp->dirty_tx + NUM_TX_DESC - tp->cur_tx; + + /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */ + return slots_avail > nr_frags; +} + static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev) { @@ -6116,11 +6066,11 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, struct TxDesc *txd = tp->TxDescArray + entry; struct device *d = tp_to_dev(tp); dma_addr_t mapping; - u32 status, len; - u32 opts[2]; + u32 opts[2], len; + bool stop_queue; int frags; - if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) { + if (unlikely(!rtl_tx_slots_avail(tp, skb_shinfo(skb)->nr_frags))) { netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n"); goto err_stop_0; } @@ -6159,32 +6109,26 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, txd->opts2 = cpu_to_le32(opts[1]); - netdev_sent_queue(dev, skb->len); - skb_tx_timestamp(skb); /* Force memory writes to complete before releasing descriptor */ dma_wmb(); - /* Anti gcc 2.95.3 bugware (sic) */ - status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC)); - txd->opts1 = cpu_to_le32(status); + txd->opts1 = rtl8169_get_txd_opts1(opts[0], len, entry); /* Force all memory writes to complete before notifying device */ wmb(); tp->cur_tx += frags + 1; - RTL_W8(tp, TxPoll, NPQ); + stop_queue = !rtl_tx_slots_avail(tp, MAX_SKB_FRAGS); + if (unlikely(stop_queue)) + netif_stop_queue(dev); - mmiowb(); + if (__netdev_sent_queue(dev, skb->len, skb->xmit_more)) + RTL_W8(tp, TxPoll, NPQ); - if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) { - /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must - * not miss a ring update when it notices a stopped queue. - */ - smp_wmb(); - netif_stop_queue(dev); + if (unlikely(stop_queue)) { /* Sync with rtl_tx: * - publish queue status and cur_tx ring index (write barrier) * - refresh dirty_tx ring index (read barrier). @@ -6193,7 +6137,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, * can't. */ smp_mb(); - if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) + if (rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) netif_wake_queue(dev); } @@ -6257,7 +6201,8 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev) rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING); } -static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) +static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp, + int budget) { unsigned int dirty_tx, tx_left, bytes_compl = 0, pkts_compl = 0; @@ -6285,7 +6230,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) if (status & LastFrag) { pkts_compl++; bytes_compl += tx_skb->skb->len; - dev_consume_skb_any(tx_skb->skb); + napi_consume_skb(tx_skb->skb, budget); tx_skb->skb = NULL; } dirty_tx++; @@ -6310,7 +6255,7 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp) */ smp_mb(); if (netif_queue_stopped(dev) && - TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) { + rtl_tx_slots_avail(tp, MAX_SKB_FRAGS)) { netif_wake_queue(dev); } /* @@ -6460,8 +6405,9 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance) { struct rtl8169_private *tp = dev_instance; u16 status = rtl_get_events(tp); + u16 irq_mask = RTL_R16(tp, IntrMask); - if (status == 0xffff || !(status & (RTL_EVENT_NAPI | tp->event_slow))) + if (status == 0xffff || !(status & irq_mask)) return IRQ_NONE; if (unlikely(status & SYSErr)) { @@ -6528,13 +6474,11 @@ static int rtl8169_poll(struct napi_struct *napi, int budget) work_done = rtl_rx(dev, tp, (u32) budget); - rtl_tx(dev, tp); + rtl_tx(dev, tp, budget); if (work_done < budget) { napi_complete_done(napi, work_done); - - rtl_irq_enable_all(tp); - mmiowb(); + rtl_irq_enable(tp); } return work_done; @@ -6584,7 +6528,7 @@ static int r8169_phy_connect(struct rtl8169_private *tp) phy_set_max_speed(phydev, SPEED_100); /* Ensure to advertise everything, incl. pause */ - phydev->advertising = phydev->supported; + linkmode_copy(phydev->advertising, phydev->supported); phy_attached_info(phydev); @@ -6824,8 +6768,7 @@ static void rtl8169_net_suspend(struct net_device *dev) static int rtl8169_suspend(struct device *device) { - struct pci_dev *pdev = to_pci_dev(device); - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(device); struct rtl8169_private *tp = netdev_priv(dev); rtl8169_net_suspend(dev); @@ -6855,8 +6798,7 @@ static void __rtl8169_resume(struct net_device *dev) static int rtl8169_resume(struct device *device) { - struct pci_dev *pdev = to_pci_dev(device); - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(device); struct rtl8169_private *tp = netdev_priv(dev); clk_prepare_enable(tp->clk); @@ -6869,8 +6811,7 @@ static int rtl8169_resume(struct device *device) static int rtl8169_runtime_suspend(struct device *device) { - struct pci_dev *pdev = to_pci_dev(device); - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(device); struct rtl8169_private *tp = netdev_priv(dev); if (!tp->TxDescArray) @@ -6891,8 +6832,7 @@ static int rtl8169_runtime_suspend(struct device *device) static int rtl8169_runtime_resume(struct device *device) { - struct pci_dev *pdev = to_pci_dev(device); - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(device); struct rtl8169_private *tp = netdev_priv(dev); rtl_rar_set(tp, dev->dev_addr); @@ -6910,8 +6850,7 @@ static int rtl8169_runtime_resume(struct device *device) static int rtl8169_runtime_idle(struct device *device) { - struct pci_dev *pdev = to_pci_dev(device); - struct net_device *dev = pci_get_drvdata(pdev); + struct net_device *dev = dev_get_drvdata(device); if (!netif_running(dev) || !netif_carrier_ok(dev)) pm_schedule_suspend(device, 10000); @@ -7023,31 +6962,26 @@ static const struct net_device_ops rtl_netdev_ops = { static const struct rtl_cfg_info { void (*hw_start)(struct rtl8169_private *tp); - u16 event_slow; + u16 irq_mask; unsigned int has_gmii:1; const struct rtl_coalesce_info *coalesce_info; - u8 default_ver; } rtl_cfg_infos [] = { [RTL_CFG_0] = { .hw_start = rtl_hw_start_8169, - .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver, + .irq_mask = SYSErr | LinkChg | RxOverflow | RxFIFOOver, .has_gmii = 1, .coalesce_info = rtl_coalesce_info_8169, - .default_ver = RTL_GIGA_MAC_VER_01, }, [RTL_CFG_1] = { .hw_start = rtl_hw_start_8168, - .event_slow = SYSErr | LinkChg | RxOverflow, + .irq_mask = LinkChg | RxOverflow, .has_gmii = 1, .coalesce_info = rtl_coalesce_info_8168_8136, - .default_ver = RTL_GIGA_MAC_VER_11, }, [RTL_CFG_2] = { .hw_start = rtl_hw_start_8101, - .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver | - PCSTimeout, + .irq_mask = LinkChg | RxOverflow | RxFIFOOver, .coalesce_info = rtl_coalesce_info_8168_8136, - .default_ver = RTL_GIGA_MAC_VER_13, } }; @@ -7309,11 +7243,10 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) tp->mmio_addr = pcim_iomap_table(pdev)[region]; - if (!pci_is_pcie(pdev)) - dev_info(&pdev->dev, "not PCI Express\n"); - /* Identify chip attached to board */ - rtl8169_get_mac_version(tp, cfg->default_ver); + rtl8169_get_mac_version(tp); + if (tp->mac_version == RTL_GIGA_MAC_NONE) + return -ENODEV; if (rtl_tbi_enabled(tp)) { dev_err(&pdev->dev, "TBI fiber mode not supported\n"); @@ -7351,8 +7284,6 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rtl_init_mdio_ops(tp); rtl_init_jumbo_ops(tp); - rtl8169_print_mac_version(tp); - chipset = tp->mac_version; rc = rtl_alloc_irq(tp); @@ -7426,7 +7357,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) dev->max_mtu = jumbo_max; tp->hw_start = cfg->hw_start; - tp->event_slow = cfg->event_slow; + tp->irq_mask = RTL_EVENT_NAPI | cfg->irq_mask; tp->coalesce_info = cfg->coalesce_info; tp->rtl_fw = RTL_FIRMWARE_UNKNOWN; @@ -7450,9 +7381,9 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (rc) goto err_mdio_unregister; - netif_info(tp, probe, dev, "%s, %pM, XID %08x, IRQ %d\n", + netif_info(tp, probe, dev, "%s, %pM, XID %03x, IRQ %d\n", rtl_chip_infos[chipset].name, dev->dev_addr, - (u32)(RTL_R32(tp, TxConfig) & 0xfcf0f8ff), + (RTL_R32(tp, TxConfig) >> 20) & 0xfcf, pci_irq_vector(pdev, 0)); if (jumbo_max > JUMBO_1K) diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index 1c6e4df94f01..ac9195add811 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -1032,7 +1032,6 @@ struct ravb_private { phy_interface_t phy_interface; int msg_enable; int speed; - int duplex; int emac_irq; enum ravb_chip_id chip_id; int rx_irqs[NUM_RX_QUEUE]; diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index defed0d0c51d..ffc1ada4e6da 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -82,13 +82,6 @@ static int ravb_config(struct net_device *ndev) return error; } -static void ravb_set_duplex(struct net_device *ndev) -{ - struct ravb_private *priv = netdev_priv(ndev); - - ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex ? ECMR_DM : 0); -} - static void ravb_set_rate(struct net_device *ndev) { struct ravb_private *priv = netdev_priv(ndev); @@ -406,13 +399,11 @@ error: /* E-MAC init function */ static void ravb_emac_init(struct net_device *ndev) { - struct ravb_private *priv = netdev_priv(ndev); - /* Receive frame limit set register */ ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR); /* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */ - ravb_write(ndev, ECMR_ZPF | (priv->duplex ? ECMR_DM : 0) | + ravb_write(ndev, ECMR_ZPF | ECMR_DM | (ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) | ECMR_TE | ECMR_RE, ECMR); @@ -995,12 +986,6 @@ static void ravb_adjust_link(struct net_device *ndev) ravb_rcv_snd_disable(ndev); if (phydev->link) { - if (phydev->duplex != priv->duplex) { - new_state = true; - priv->duplex = phydev->duplex; - ravb_set_duplex(ndev); - } - if (phydev->speed != priv->speed) { new_state = true; priv->speed = phydev->speed; @@ -1015,7 +1000,6 @@ static void ravb_adjust_link(struct net_device *ndev) new_state = true; priv->link = 0; priv->speed = 0; - priv->duplex = -1; } /* Enable TX and RX right over here, if E-MAC change is ignored */ @@ -1045,7 +1029,6 @@ static int ravb_phy_init(struct net_device *ndev) priv->link = 0; priv->speed = 0; - priv->duplex = -1; /* Try connecting to PHY */ pn = of_parse_phandle(np, "phy-handle", 0); @@ -1088,6 +1071,10 @@ static int ravb_phy_init(struct net_device *ndev) phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Pause_BIT); phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_Asym_Pause_BIT); + /* Half Duplex is not supported */ + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); + phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); + phy_attached_info(phydev); return 0; diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index beb06628f22d..6213827e3956 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -1632,9 +1632,6 @@ rocker_world_port_obj_vlan_add(struct rocker_port *rocker_port, { struct rocker_world_ops *wops = rocker_port->rocker->wops; - if (netif_is_bridge_master(vlan->obj.orig_dev)) - return -EOPNOTSUPP; - if (!wops->port_obj_vlan_add) return -EOPNOTSUPP; @@ -2145,8 +2142,6 @@ static int rocker_port_obj_del(struct net_device *dev, static const struct switchdev_ops rocker_port_switchdev_ops = { .switchdev_port_attr_get = rocker_port_attr_get, .switchdev_port_attr_set = rocker_port_attr_set, - .switchdev_port_obj_add = rocker_port_obj_add, - .switchdev_port_obj_del = rocker_port_obj_del, }; struct rocker_fib_event_work { @@ -2812,12 +2807,54 @@ static int rocker_switchdev_event(struct notifier_block *unused, return NOTIFY_DONE; } +static int +rocker_switchdev_port_obj_event(unsigned long event, struct net_device *netdev, + struct switchdev_notifier_port_obj_info *port_obj_info) +{ + int err = -EOPNOTSUPP; + + switch (event) { + case SWITCHDEV_PORT_OBJ_ADD: + err = rocker_port_obj_add(netdev, port_obj_info->obj, + port_obj_info->trans); + break; + case SWITCHDEV_PORT_OBJ_DEL: + err = rocker_port_obj_del(netdev, port_obj_info->obj); + break; + } + + port_obj_info->handled = true; + return notifier_from_errno(err); +} + +static int rocker_switchdev_blocking_event(struct notifier_block *unused, + unsigned long event, void *ptr) +{ + struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + + if (!rocker_port_dev_check(dev)) + return NOTIFY_DONE; + + switch (event) { + case SWITCHDEV_PORT_OBJ_ADD: + case SWITCHDEV_PORT_OBJ_DEL: + return rocker_switchdev_port_obj_event(event, dev, ptr); + } + + return NOTIFY_DONE; +} + static struct notifier_block rocker_switchdev_notifier = { .notifier_call = rocker_switchdev_event, }; +static struct notifier_block rocker_switchdev_blocking_notifier = { + .notifier_call = rocker_switchdev_blocking_event, +}; + static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id) { + struct notifier_block *nb; struct rocker *rocker; int err; @@ -2933,6 +2970,13 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id) goto err_register_switchdev_notifier; } + nb = &rocker_switchdev_blocking_notifier; + err = register_switchdev_blocking_notifier(nb); + if (err) { + dev_err(&pdev->dev, "Failed to register switchdev blocking notifier\n"); + goto err_register_switchdev_blocking_notifier; + } + rocker->hw.id = rocker_read64(rocker, SWITCH_ID); dev_info(&pdev->dev, "Rocker switch with id %*phN\n", @@ -2940,6 +2984,8 @@ static int rocker_probe(struct pci_dev *pdev, const struct pci_device_id *id) return 0; +err_register_switchdev_blocking_notifier: + unregister_switchdev_notifier(&rocker_switchdev_notifier); err_register_switchdev_notifier: unregister_fib_notifier(&rocker->fib_nb); err_register_fib_notifier: @@ -2971,6 +3017,10 @@ err_pci_enable_device: static void rocker_remove(struct pci_dev *pdev) { struct rocker *rocker = pci_get_drvdata(pdev); + struct notifier_block *nb; + + nb = &rocker_switchdev_blocking_notifier; + unregister_switchdev_blocking_notifier(nb); unregister_switchdev_notifier(&rocker_switchdev_notifier); unregister_fib_notifier(&rocker->fib_nb); diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index 7eeac3d6cfe8..b6a50058bb8d 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -6041,6 +6041,10 @@ static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = { { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" }, { NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" }, { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" }, + /* MUM and SUC firmware share the same partition type */ + { NVRAM_PARTITION_TYPE_MUM_FIRMWARE, 0, 0, "sfc_mumfw" }, + { NVRAM_PARTITION_TYPE_EXPANSION_UEFI, 0, 0, "sfc_uefi" }, + { NVRAM_PARTITION_TYPE_STATUS, 0, 0, "sfc_status" } }; static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, @@ -6091,6 +6095,9 @@ static int efx_ef10_mtd_probe_partition(struct efx_nic *efx, part->common.mtd.flags = MTD_CAP_NORFLASH; part->common.mtd.size = size; part->common.mtd.erasesize = erase_size; + /* sfc_status is read-only */ + if (!erase_size) + part->common.mtd.flags |= MTD_NO_ERASE; return 0; } diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c index 3143588ffd77..600d7b895cf2 100644 --- a/drivers/net/ethernet/sfc/ethtool.c +++ b/drivers/net/ethernet/sfc/ethtool.c @@ -539,7 +539,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev, /* We need rx buffers and interrupts. */ already_up = (efx->net_dev->flags & IFF_UP); if (!already_up) { - rc = dev_open(efx->net_dev); + rc = dev_open(efx->net_dev, NULL); if (rc) { netif_err(efx, drv, efx->net_dev, "failed opening device.\n"); diff --git a/drivers/net/ethernet/sfc/falcon/ethtool.c b/drivers/net/ethernet/sfc/falcon/ethtool.c index 1ccdb7a82e2a..72cedec945c1 100644 --- a/drivers/net/ethernet/sfc/falcon/ethtool.c +++ b/drivers/net/ethernet/sfc/falcon/ethtool.c @@ -517,7 +517,7 @@ static void ef4_ethtool_self_test(struct net_device *net_dev, /* We need rx buffers and interrupts. */ already_up = (efx->net_dev->flags & IFF_UP); if (!already_up) { - rc = dev_open(efx->net_dev); + rc = dev_open(efx->net_dev, NULL); if (rc) { netif_err(efx, drv, efx->net_dev, "failed opening device.\n"); diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c index c3ad564ac4c0..22eb059086f7 100644 --- a/drivers/net/ethernet/sfc/tx.c +++ b/drivers/net/ethernet/sfc/tx.c @@ -553,13 +553,10 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb) if (!data_mapped && (efx_tx_map_data(tx_queue, skb, segments))) goto err; - /* Update BQL */ - netdev_tx_sent_queue(tx_queue->core_txq, skb_len); - efx_tx_maybe_stop_queue(tx_queue); /* Pass off to hardware */ - if (!xmit_more || netif_xmit_stopped(tx_queue->core_txq)) { + if (__netdev_tx_sent_queue(tx_queue->core_txq, skb_len, xmit_more)) { struct efx_tx_queue *txq2 = efx_tx_queue_partner(tx_queue); /* There could be packets left on the partner queue if those diff --git a/drivers/net/ethernet/smsc/Kconfig b/drivers/net/ethernet/smsc/Kconfig index 358820282ef0..79612060d0ba 100644 --- a/drivers/net/ethernet/smsc/Kconfig +++ b/drivers/net/ethernet/smsc/Kconfig @@ -27,7 +27,7 @@ config SMC9194 option if you have a DELL laptop with the docking station, or another SMC9192/9194 based chipset. Say Y if you want it compiled into the kernel, and read the file - <file:Documentation/networking/smc9.txt>. + <file:Documentation/networking/device_drivers/smsc/smc9.txt>. To compile this driver as a module, choose M here. The module will be called smc9194. @@ -43,7 +43,7 @@ config SMC91X This is a driver for SMC's 91x series of Ethernet chipsets, including the SMC91C94 and the SMC91C111. Say Y if you want it compiled into the kernel, and read the file - <file:Documentation/networking/smc9.txt>. + <file:Documentation/networking/device_drivers/smsc/smc9.txt>. This driver is also available as a module ( = code which can be inserted in and removed from the running kernel whenever you want). diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index d9d0d03e4ce7..05a0948ad929 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -234,6 +234,9 @@ #define DESC_NUM 256 +#define NETSEC_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) +#define NETSEC_RX_BUF_SZ 1536 + #define DESC_SZ sizeof(struct netsec_de) #define NETSEC_F_NETSEC_VER_MAJOR_NUM(x) ((x) & 0xffff0000) @@ -254,7 +257,6 @@ struct netsec_desc_ring { dma_addr_t desc_dma; struct netsec_desc *desc; void *vaddr; - u16 pkt_cnt; u16 head, tail; }; @@ -571,34 +573,10 @@ static const struct ethtool_ops netsec_ethtool_ops = { /************* NETDEV_OPS FOLLOW *************/ -static struct sk_buff *netsec_alloc_skb(struct netsec_priv *priv, - struct netsec_desc *desc) -{ - struct sk_buff *skb; - - if (device_get_dma_attr(priv->dev) == DEV_DMA_COHERENT) { - skb = netdev_alloc_skb_ip_align(priv->ndev, desc->len); - } else { - desc->len = L1_CACHE_ALIGN(desc->len); - skb = netdev_alloc_skb(priv->ndev, desc->len); - } - if (!skb) - return NULL; - - desc->addr = skb->data; - desc->dma_addr = dma_map_single(priv->dev, desc->addr, desc->len, - DMA_FROM_DEVICE); - if (dma_mapping_error(priv->dev, desc->dma_addr)) { - dev_kfree_skb_any(skb); - return NULL; - } - return skb; -} static void netsec_set_rx_de(struct netsec_priv *priv, struct netsec_desc_ring *dring, u16 idx, - const struct netsec_desc *desc, - struct sk_buff *skb) + const struct netsec_desc *desc) { struct netsec_de *de = dring->vaddr + DESC_SZ * idx; u32 attr = (1 << NETSEC_RX_PKT_OWN_FIELD) | @@ -617,88 +595,28 @@ static void netsec_set_rx_de(struct netsec_priv *priv, dring->desc[idx].dma_addr = desc->dma_addr; dring->desc[idx].addr = desc->addr; dring->desc[idx].len = desc->len; - dring->desc[idx].skb = skb; -} - -static struct sk_buff *netsec_get_rx_de(struct netsec_priv *priv, - struct netsec_desc_ring *dring, - u16 idx, - struct netsec_rx_pkt_info *rxpi, - struct netsec_desc *desc, u16 *len) -{ - struct netsec_de de = {}; - - memcpy(&de, dring->vaddr + DESC_SZ * idx, DESC_SZ); - - *len = de.buf_len_info >> 16; - - rxpi->err_flag = (de.attr >> NETSEC_RX_PKT_ER_FIELD) & 1; - rxpi->rx_cksum_result = (de.attr >> NETSEC_RX_PKT_CO_FIELD) & 3; - rxpi->err_code = (de.attr >> NETSEC_RX_PKT_ERR_FIELD) & - NETSEC_RX_PKT_ERR_MASK; - *desc = dring->desc[idx]; - return desc->skb; -} - -static struct sk_buff *netsec_get_rx_pkt_data(struct netsec_priv *priv, - struct netsec_rx_pkt_info *rxpi, - struct netsec_desc *desc, - u16 *len) -{ - struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; - struct sk_buff *tmp_skb, *skb = NULL; - struct netsec_desc td; - int tail; - - *rxpi = (struct netsec_rx_pkt_info){}; - - td.len = priv->ndev->mtu + 22; - - tmp_skb = netsec_alloc_skb(priv, &td); - - tail = dring->tail; - - if (!tmp_skb) { - netsec_set_rx_de(priv, dring, tail, &dring->desc[tail], - dring->desc[tail].skb); - } else { - skb = netsec_get_rx_de(priv, dring, tail, rxpi, desc, len); - netsec_set_rx_de(priv, dring, tail, &td, tmp_skb); - } - - /* move tail ahead */ - dring->tail = (dring->tail + 1) % DESC_NUM; - - return skb; } -static int netsec_clean_tx_dring(struct netsec_priv *priv, int budget) +static bool netsec_clean_tx_dring(struct netsec_priv *priv) { struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX]; unsigned int pkts, bytes; - - dring->pkt_cnt += netsec_read(priv, NETSEC_REG_NRM_TX_DONE_PKTCNT); - - if (dring->pkt_cnt < budget) - budget = dring->pkt_cnt; + struct netsec_de *entry; + int tail = dring->tail; + int cnt = 0; pkts = 0; bytes = 0; + entry = dring->vaddr + DESC_SZ * tail; - while (pkts < budget) { + while (!(entry->attr & (1U << NETSEC_TX_SHIFT_OWN_FIELD)) && + cnt < DESC_NUM) { struct netsec_desc *desc; - struct netsec_de *entry; - int tail, eop; - - tail = dring->tail; - - /* move tail ahead */ - dring->tail = (tail + 1) % DESC_NUM; + int eop; desc = &dring->desc[tail]; - entry = dring->vaddr + DESC_SZ * tail; - eop = (entry->attr >> NETSEC_TX_LAST) & 1; + dma_rmb(); dma_unmap_single(priv->dev, desc->dma_addr, desc->len, DMA_TO_DEVICE); @@ -707,33 +625,94 @@ static int netsec_clean_tx_dring(struct netsec_priv *priv, int budget) bytes += desc->skb->len; dev_kfree_skb(desc->skb); } + /* clean up so netsec_uninit_pkt_dring() won't free the skb + * again + */ *desc = (struct netsec_desc){}; + + /* entry->attr is not going to be accessed by the NIC until + * netsec_set_tx_de() is called. No need for a dma_wmb() here + */ + entry->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD; + /* move tail ahead */ + dring->tail = (tail + 1) % DESC_NUM; + + tail = dring->tail; + entry = dring->vaddr + DESC_SZ * tail; + cnt++; } - dring->pkt_cnt -= budget; - priv->ndev->stats.tx_packets += budget; + if (!cnt) + return false; + + /* reading the register clears the irq */ + netsec_read(priv, NETSEC_REG_NRM_TX_DONE_PKTCNT); + + priv->ndev->stats.tx_packets += cnt; priv->ndev->stats.tx_bytes += bytes; - netdev_completed_queue(priv->ndev, budget, bytes); + netdev_completed_queue(priv->ndev, cnt, bytes); - return budget; + return true; } -static int netsec_process_tx(struct netsec_priv *priv, int budget) +static void netsec_process_tx(struct netsec_priv *priv) { struct net_device *ndev = priv->ndev; - int new, done = 0; + bool cleaned; - do { - new = netsec_clean_tx_dring(priv, budget); - done += new; - budget -= new; - } while (new); + cleaned = netsec_clean_tx_dring(priv); - if (done && netif_queue_stopped(ndev)) + if (cleaned && netif_queue_stopped(ndev)) { + /* Make sure we update the value, anyone stopping the queue + * after this will read the proper consumer idx + */ + smp_wmb(); netif_wake_queue(ndev); + } +} - return done; +static void *netsec_alloc_rx_data(struct netsec_priv *priv, + dma_addr_t *dma_handle, u16 *desc_len) +{ + size_t total_len = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + size_t payload_len = NETSEC_RX_BUF_SZ; + dma_addr_t mapping; + void *buf; + + total_len += SKB_DATA_ALIGN(payload_len + NETSEC_SKB_PAD); + + buf = napi_alloc_frag(total_len); + if (!buf) + return NULL; + + mapping = dma_map_single(priv->dev, buf + NETSEC_SKB_PAD, payload_len, + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(priv->dev, mapping))) + goto err_out; + + *dma_handle = mapping; + *desc_len = payload_len; + + return buf; + +err_out: + skb_free_frag(buf); + return NULL; +} + +static void netsec_rx_fill(struct netsec_priv *priv, u16 from, u16 num) +{ + struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; + u16 idx = from; + + while (num) { + netsec_set_rx_de(priv, dring, idx, &dring->desc[idx]); + idx++; + if (idx >= DESC_NUM) + idx = 0; + num--; + } } static int netsec_process_rx(struct netsec_priv *priv, int budget) @@ -741,14 +720,17 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget) struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; struct net_device *ndev = priv->ndev; struct netsec_rx_pkt_info rx_info; - int done = 0; - struct netsec_desc desc; struct sk_buff *skb; - u16 len; + int done = 0; while (done < budget) { u16 idx = dring->tail; struct netsec_de *de = dring->vaddr + (DESC_SZ * idx); + struct netsec_desc *desc = &dring->desc[idx]; + u16 pkt_len, desc_len; + dma_addr_t dma_handle; + void *buf_addr; + u32 truesize; if (de->attr & (1U << NETSEC_RX_PKT_OWN_FIELD)) { /* reading the register clears the irq */ @@ -762,18 +744,59 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget) */ dma_rmb(); done++; - skb = netsec_get_rx_pkt_data(priv, &rx_info, &desc, &len); - if (unlikely(!skb) || rx_info.err_flag) { + + pkt_len = de->buf_len_info >> 16; + rx_info.err_code = (de->attr >> NETSEC_RX_PKT_ERR_FIELD) & + NETSEC_RX_PKT_ERR_MASK; + rx_info.err_flag = (de->attr >> NETSEC_RX_PKT_ER_FIELD) & 1; + if (rx_info.err_flag) { netif_err(priv, drv, priv->ndev, - "%s: rx fail err(%d)\n", - __func__, rx_info.err_code); + "%s: rx fail err(%d)\n", __func__, + rx_info.err_code); ndev->stats.rx_dropped++; + dring->tail = (dring->tail + 1) % DESC_NUM; + /* reuse buffer page frag */ + netsec_rx_fill(priv, idx, 1); continue; } + rx_info.rx_cksum_result = + (de->attr >> NETSEC_RX_PKT_CO_FIELD) & 3; - dma_unmap_single(priv->dev, desc.dma_addr, desc.len, - DMA_FROM_DEVICE); - skb_put(skb, len); + /* allocate a fresh buffer and map it to the hardware. + * This will eventually replace the old buffer in the hardware + */ + buf_addr = netsec_alloc_rx_data(priv, &dma_handle, &desc_len); + if (unlikely(!buf_addr)) + break; + + dma_sync_single_for_cpu(priv->dev, desc->dma_addr, pkt_len, + DMA_FROM_DEVICE); + prefetch(desc->addr); + + truesize = SKB_DATA_ALIGN(desc->len + NETSEC_SKB_PAD) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + skb = build_skb(desc->addr, truesize); + if (unlikely(!skb)) { + /* free the newly allocated buffer, we are not going to + * use it + */ + dma_unmap_single(priv->dev, dma_handle, desc_len, + DMA_FROM_DEVICE); + skb_free_frag(buf_addr); + netif_err(priv, drv, priv->ndev, + "rx failed to build skb\n"); + break; + } + dma_unmap_single_attrs(priv->dev, desc->dma_addr, desc->len, + DMA_FROM_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); + + /* Update the descriptor with the new buffer we allocated */ + desc->len = desc_len; + desc->dma_addr = dma_handle; + desc->addr = buf_addr; + + skb_reserve(skb, NETSEC_SKB_PAD); + skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, priv->ndev); if (priv->rx_cksum_offload_flag && @@ -782,8 +805,11 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget) if (napi_gro_receive(&priv->napi, skb) != GRO_DROP) { ndev->stats.rx_packets++; - ndev->stats.rx_bytes += len; + ndev->stats.rx_bytes += pkt_len; } + + netsec_rx_fill(priv, idx, 1); + dring->tail = (dring->tail + 1) % DESC_NUM; } return done; @@ -792,24 +818,17 @@ static int netsec_process_rx(struct netsec_priv *priv, int budget) static int netsec_napi_poll(struct napi_struct *napi, int budget) { struct netsec_priv *priv; - int tx, rx, done, todo; + int rx, done, todo; priv = container_of(napi, struct netsec_priv, napi); + netsec_process_tx(priv); + todo = budget; do { - if (!todo) - break; - - tx = netsec_process_tx(priv, todo); - todo -= tx; - - if (!todo) - break; - rx = netsec_process_rx(priv, todo); todo -= rx; - } while (rx || tx); + } while (rx); done = budget - todo; @@ -861,6 +880,41 @@ static void netsec_set_tx_de(struct netsec_priv *priv, dring->head = (dring->head + 1) % DESC_NUM; } +static int netsec_desc_used(struct netsec_desc_ring *dring) +{ + int used; + + if (dring->head >= dring->tail) + used = dring->head - dring->tail; + else + used = dring->head + DESC_NUM - dring->tail; + + return used; +} + +static int netsec_check_stop_tx(struct netsec_priv *priv, int used) +{ + struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_TX]; + + /* keep tail from touching the queue */ + if (DESC_NUM - used < 2) { + netif_stop_queue(priv->ndev); + + /* Make sure we read the updated value in case + * descriptors got freed + */ + smp_rmb(); + + used = netsec_desc_used(dring); + if (DESC_NUM - used < 2) + return NETDEV_TX_BUSY; + + netif_wake_queue(priv->ndev); + } + + return 0; +} + static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb, struct net_device *ndev) { @@ -871,16 +925,10 @@ static netdev_tx_t netsec_netdev_start_xmit(struct sk_buff *skb, u16 tso_seg_len = 0; int filled; - /* differentiate between full/emtpy ring */ - if (dring->head >= dring->tail) - filled = dring->head - dring->tail; - else - filled = dring->head + DESC_NUM - dring->tail; - - if (DESC_NUM - filled < 2) { /* if less than 2 available */ - netif_err(priv, drv, priv->ndev, "%s: TxQFull!\n", __func__); - netif_stop_queue(priv->ndev); - dma_wmb(); + filled = netsec_desc_used(dring); + if (netsec_check_stop_tx(priv, filled)) { + net_warn_ratelimited("%s %s Tx queue full\n", + dev_name(priv->dev), ndev->name); return NETDEV_TX_BUSY; } @@ -946,7 +994,10 @@ static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id) dma_unmap_single(priv->dev, desc->dma_addr, desc->len, id == NETSEC_RING_RX ? DMA_FROM_DEVICE : DMA_TO_DEVICE); - dev_kfree_skb(desc->skb); + if (id == NETSEC_RING_RX) + skb_free_frag(desc->addr); + else if (id == NETSEC_RING_TX) + dev_kfree_skb(desc->skb); } memset(dring->desc, 0, sizeof(struct netsec_desc) * DESC_NUM); @@ -954,7 +1005,6 @@ static void netsec_uninit_pkt_dring(struct netsec_priv *priv, int id) dring->head = 0; dring->tail = 0; - dring->pkt_cnt = 0; if (id == NETSEC_RING_TX) netdev_reset_queue(priv->ndev); @@ -977,47 +1027,64 @@ static void netsec_free_dring(struct netsec_priv *priv, int id) static int netsec_alloc_dring(struct netsec_priv *priv, enum ring_id id) { struct netsec_desc_ring *dring = &priv->desc_ring[id]; - int ret = 0; + int i; dring->vaddr = dma_zalloc_coherent(priv->dev, DESC_SZ * DESC_NUM, &dring->desc_dma, GFP_KERNEL); - if (!dring->vaddr) { - ret = -ENOMEM; + if (!dring->vaddr) goto err; - } dring->desc = kcalloc(DESC_NUM, sizeof(*dring->desc), GFP_KERNEL); - if (!dring->desc) { - ret = -ENOMEM; + if (!dring->desc) goto err; + + if (id == NETSEC_RING_TX) { + for (i = 0; i < DESC_NUM; i++) { + struct netsec_de *de; + + de = dring->vaddr + (DESC_SZ * i); + /* de->attr is not going to be accessed by the NIC + * until netsec_set_tx_de() is called. + * No need for a dma_wmb() here + */ + de->attr = 1U << NETSEC_TX_SHIFT_OWN_FIELD; + } } return 0; err: netsec_free_dring(priv, id); - return ret; + return -ENOMEM; } static int netsec_setup_rx_dring(struct netsec_priv *priv) { struct netsec_desc_ring *dring = &priv->desc_ring[NETSEC_RING_RX]; - struct netsec_desc desc; - struct sk_buff *skb; - int n; + int i; - desc.len = priv->ndev->mtu + 22; + for (i = 0; i < DESC_NUM; i++) { + struct netsec_desc *desc = &dring->desc[i]; + dma_addr_t dma_handle; + void *buf; + u16 len; - for (n = 0; n < DESC_NUM; n++) { - skb = netsec_alloc_skb(priv, &desc); - if (!skb) { + buf = netsec_alloc_rx_data(priv, &dma_handle, &len); + if (!buf) { netsec_uninit_pkt_dring(priv, NETSEC_RING_RX); - return -ENOMEM; + goto err_out; } - netsec_set_rx_de(priv, dring, n, &desc, skb); + desc->dma_addr = dma_handle; + desc->addr = buf; + desc->len = len; } + netsec_rx_fill(priv, 0, DESC_NUM); + return 0; + +err_out: + return -ENOMEM; } static int netsec_netdev_load_ucode_region(struct netsec_priv *priv, u32 reg, @@ -1377,6 +1444,8 @@ static int netsec_netdev_init(struct net_device *ndev) int ret; u16 data; + BUILD_BUG_ON_NOT_POWER_OF_2(DESC_NUM); + ret = netsec_alloc_dring(priv, NETSEC_RING_TX); if (ret) return ret; diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index 7c7cd9d94bcc..bb6d5fb73035 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -262,6 +262,7 @@ struct ave_private { struct regmap *regmap; unsigned int pinmode_mask; unsigned int pinmode_val; + u32 wolopts; /* stats */ struct ave_stats stats_rx; @@ -1119,7 +1120,7 @@ static void ave_phy_adjust_link(struct net_device *ndev) if (phydev->asym_pause) rmt_adv |= LPA_PAUSE_ASYM; - lcl_adv = ethtool_adv_to_lcl_adv_t(phydev->advertising); + lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising); cap = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); if (cap & FLOW_CTRL_TX) txcr |= AVE_TXCR_FLOCTR; @@ -1210,9 +1211,13 @@ static int ave_init(struct net_device *ndev) priv->phydev = phydev; - phy_ethtool_get_wol(phydev, &wol); + ave_ethtool_get_wol(ndev, &wol); device_set_wakeup_capable(&ndev->dev, !!wol.supported); + /* set wol initial state disabled */ + wol.wolopts = 0; + ave_ethtool_set_wol(ndev, &wol); + if (!phy_interface_is_rgmii(phydev)) phy_set_max_speed(phydev, SPEED_100); @@ -1737,6 +1742,58 @@ static int ave_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM_SLEEP +static int ave_suspend(struct device *dev) +{ + struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; + struct net_device *ndev = dev_get_drvdata(dev); + struct ave_private *priv = netdev_priv(ndev); + int ret = 0; + + if (netif_running(ndev)) { + ret = ave_stop(ndev); + netif_device_detach(ndev); + } + + ave_ethtool_get_wol(ndev, &wol); + priv->wolopts = wol.wolopts; + + return ret; +} + +static int ave_resume(struct device *dev) +{ + struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL }; + struct net_device *ndev = dev_get_drvdata(dev); + struct ave_private *priv = netdev_priv(ndev); + int ret = 0; + + ave_global_reset(ndev); + + ave_ethtool_get_wol(ndev, &wol); + wol.wolopts = priv->wolopts; + ave_ethtool_set_wol(ndev, &wol); + + if (ndev->phydev) { + ret = phy_resume(ndev->phydev); + if (ret) + return ret; + } + + if (netif_running(ndev)) { + ret = ave_open(ndev); + netif_device_attach(ndev); + } + + return ret; +} + +static SIMPLE_DEV_PM_OPS(ave_pm_ops, ave_suspend, ave_resume); +#define AVE_PM_OPS (&ave_pm_ops) +#else +#define AVE_PM_OPS NULL +#endif + static int ave_pro4_get_pinmode(struct ave_private *priv, phy_interface_t phy_mode, u32 arg) { @@ -1911,6 +1968,7 @@ static struct platform_driver ave_driver = { .remove = ave_remove, .driver = { .name = "ave", + .pm = AVE_PM_OPS, .of_match_table = of_ave_match, }, }; diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index 324049eebb9b..6209cc1fb305 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -75,6 +75,14 @@ config DWMAC_LPC18XX ---help--- Support for NXP LPC18xx/43xx DWMAC Ethernet. +config DWMAC_MEDIATEK + tristate "MediaTek MT27xx GMAC support" + depends on OF && (ARCH_MEDIATEK || COMPILE_TEST) + help + Support for MediaTek GMAC Ethernet controller. + + This selects the MT2712 SoC support for the stmmac driver. + config DWMAC_MESON tristate "Amlogic Meson dwmac support" default ARCH_MESON diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index 99967a80a8c8..bf09701d2623 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -13,6 +13,7 @@ obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o obj-$(CONFIG_DWMAC_ANARION) += dwmac-anarion.o obj-$(CONFIG_DWMAC_IPQ806X) += dwmac-ipq806x.o obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o +obj-$(CONFIG_DWMAC_MEDIATEK) += dwmac-mediatek.o obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o obj-$(CONFIG_DWMAC_OXNAS) += dwmac-oxnas.o obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c new file mode 100644 index 000000000000..bf2562995fc8 --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-mediatek.c @@ -0,0 +1,390 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2018 MediaTek Inc. + */ +#include <linux/bitfield.h> +#include <linux/io.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_net.h> +#include <linux/regmap.h> +#include <linux/stmmac.h> + +#include "stmmac.h" +#include "stmmac_platform.h" + +/* Peri Configuration register for mt2712 */ +#define PERI_ETH_PHY_INTF_SEL 0x418 +#define PHY_INTF_MII 0 +#define PHY_INTF_RGMII 1 +#define PHY_INTF_RMII 4 +#define RMII_CLK_SRC_RXC BIT(4) +#define RMII_CLK_SRC_INTERNAL BIT(5) + +#define PERI_ETH_DLY 0x428 +#define ETH_DLY_GTXC_INV BIT(6) +#define ETH_DLY_GTXC_ENABLE BIT(5) +#define ETH_DLY_GTXC_STAGES GENMASK(4, 0) +#define ETH_DLY_TXC_INV BIT(20) +#define ETH_DLY_TXC_ENABLE BIT(19) +#define ETH_DLY_TXC_STAGES GENMASK(18, 14) +#define ETH_DLY_RXC_INV BIT(13) +#define ETH_DLY_RXC_ENABLE BIT(12) +#define ETH_DLY_RXC_STAGES GENMASK(11, 7) + +#define PERI_ETH_DLY_FINE 0x800 +#define ETH_RMII_DLY_TX_INV BIT(2) +#define ETH_FINE_DLY_GTXC BIT(1) +#define ETH_FINE_DLY_RXC BIT(0) + +struct mac_delay_struct { + u32 tx_delay; + u32 rx_delay; + bool tx_inv; + bool rx_inv; +}; + +struct mediatek_dwmac_plat_data { + const struct mediatek_dwmac_variant *variant; + struct mac_delay_struct mac_delay; + struct clk_bulk_data *clks; + struct device_node *np; + struct regmap *peri_regmap; + struct device *dev; + int phy_mode; + bool rmii_rxc; +}; + +struct mediatek_dwmac_variant { + int (*dwmac_set_phy_interface)(struct mediatek_dwmac_plat_data *plat); + int (*dwmac_set_delay)(struct mediatek_dwmac_plat_data *plat); + + /* clock ids to be requested */ + const char * const *clk_list; + int num_clks; + + u32 dma_bit_mask; + u32 rx_delay_max; + u32 tx_delay_max; +}; + +/* list of clocks required for mac */ +static const char * const mt2712_dwmac_clk_l[] = { + "axi", "apb", "mac_main", "ptp_ref" +}; + +static int mt2712_set_interface(struct mediatek_dwmac_plat_data *plat) +{ + int rmii_rxc = plat->rmii_rxc ? RMII_CLK_SRC_RXC : 0; + u32 intf_val = 0; + + /* select phy interface in top control domain */ + switch (plat->phy_mode) { + case PHY_INTERFACE_MODE_MII: + intf_val |= PHY_INTF_MII; + break; + case PHY_INTERFACE_MODE_RMII: + intf_val |= (PHY_INTF_RMII | rmii_rxc); + break; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_TXID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_ID: + intf_val |= PHY_INTF_RGMII; + break; + default: + dev_err(plat->dev, "phy interface not supported\n"); + return -EINVAL; + } + + regmap_write(plat->peri_regmap, PERI_ETH_PHY_INTF_SEL, intf_val); + + return 0; +} + +static void mt2712_delay_ps2stage(struct mediatek_dwmac_plat_data *plat) +{ + struct mac_delay_struct *mac_delay = &plat->mac_delay; + + switch (plat->phy_mode) { + case PHY_INTERFACE_MODE_MII: + case PHY_INTERFACE_MODE_RMII: + /* 550ps per stage for MII/RMII */ + mac_delay->tx_delay /= 550; + mac_delay->rx_delay /= 550; + break; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_TXID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_ID: + /* 170ps per stage for RGMII */ + mac_delay->tx_delay /= 170; + mac_delay->rx_delay /= 170; + break; + default: + dev_err(plat->dev, "phy interface not supported\n"); + break; + } +} + +static int mt2712_set_delay(struct mediatek_dwmac_plat_data *plat) +{ + struct mac_delay_struct *mac_delay = &plat->mac_delay; + u32 delay_val = 0, fine_val = 0; + + mt2712_delay_ps2stage(plat); + + switch (plat->phy_mode) { + case PHY_INTERFACE_MODE_MII: + delay_val |= FIELD_PREP(ETH_DLY_TXC_ENABLE, !!mac_delay->tx_delay); + delay_val |= FIELD_PREP(ETH_DLY_TXC_STAGES, mac_delay->tx_delay); + delay_val |= FIELD_PREP(ETH_DLY_TXC_INV, mac_delay->tx_inv); + + delay_val |= FIELD_PREP(ETH_DLY_RXC_ENABLE, !!mac_delay->rx_delay); + delay_val |= FIELD_PREP(ETH_DLY_RXC_STAGES, mac_delay->rx_delay); + delay_val |= FIELD_PREP(ETH_DLY_RXC_INV, mac_delay->rx_inv); + break; + case PHY_INTERFACE_MODE_RMII: + /* the rmii reference clock is from external phy, + * and the property "rmii_rxc" indicates which pin(TXC/RXC) + * the reference clk is connected to. The reference clock is a + * received signal, so rx_delay/rx_inv are used to indicate + * the reference clock timing adjustment + */ + if (plat->rmii_rxc) { + /* the rmii reference clock from outside is connected + * to RXC pin, the reference clock will be adjusted + * by RXC delay macro circuit. + */ + delay_val |= FIELD_PREP(ETH_DLY_RXC_ENABLE, !!mac_delay->rx_delay); + delay_val |= FIELD_PREP(ETH_DLY_RXC_STAGES, mac_delay->rx_delay); + delay_val |= FIELD_PREP(ETH_DLY_RXC_INV, mac_delay->rx_inv); + } else { + /* the rmii reference clock from outside is connected + * to TXC pin, the reference clock will be adjusted + * by TXC delay macro circuit. + */ + delay_val |= FIELD_PREP(ETH_DLY_TXC_ENABLE, !!mac_delay->rx_delay); + delay_val |= FIELD_PREP(ETH_DLY_TXC_STAGES, mac_delay->rx_delay); + delay_val |= FIELD_PREP(ETH_DLY_TXC_INV, mac_delay->rx_inv); + } + /* tx_inv will inverse the tx clock inside mac relateive to + * reference clock from external phy, + * and this bit is located in the same register with fine-tune + */ + if (mac_delay->tx_inv) + fine_val = ETH_RMII_DLY_TX_INV; + break; + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_TXID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_ID: + fine_val = ETH_FINE_DLY_GTXC | ETH_FINE_DLY_RXC; + + delay_val |= FIELD_PREP(ETH_DLY_GTXC_ENABLE, !!mac_delay->tx_delay); + delay_val |= FIELD_PREP(ETH_DLY_GTXC_STAGES, mac_delay->tx_delay); + delay_val |= FIELD_PREP(ETH_DLY_GTXC_INV, mac_delay->tx_inv); + + delay_val |= FIELD_PREP(ETH_DLY_RXC_ENABLE, !!mac_delay->rx_delay); + delay_val |= FIELD_PREP(ETH_DLY_RXC_STAGES, mac_delay->rx_delay); + delay_val |= FIELD_PREP(ETH_DLY_RXC_INV, mac_delay->rx_inv); + break; + default: + dev_err(plat->dev, "phy interface not supported\n"); + return -EINVAL; + } + regmap_write(plat->peri_regmap, PERI_ETH_DLY, delay_val); + regmap_write(plat->peri_regmap, PERI_ETH_DLY_FINE, fine_val); + + return 0; +} + +static const struct mediatek_dwmac_variant mt2712_gmac_variant = { + .dwmac_set_phy_interface = mt2712_set_interface, + .dwmac_set_delay = mt2712_set_delay, + .clk_list = mt2712_dwmac_clk_l, + .num_clks = ARRAY_SIZE(mt2712_dwmac_clk_l), + .dma_bit_mask = 33, + .rx_delay_max = 17600, + .tx_delay_max = 17600, +}; + +static int mediatek_dwmac_config_dt(struct mediatek_dwmac_plat_data *plat) +{ + struct mac_delay_struct *mac_delay = &plat->mac_delay; + u32 tx_delay_ps, rx_delay_ps; + + plat->peri_regmap = syscon_regmap_lookup_by_phandle(plat->np, "mediatek,pericfg"); + if (IS_ERR(plat->peri_regmap)) { + dev_err(plat->dev, "Failed to get pericfg syscon\n"); + return PTR_ERR(plat->peri_regmap); + } + + plat->phy_mode = of_get_phy_mode(plat->np); + if (plat->phy_mode < 0) { + dev_err(plat->dev, "not find phy-mode\n"); + return -EINVAL; + } + + if (!of_property_read_u32(plat->np, "mediatek,tx-delay-ps", &tx_delay_ps)) { + if (tx_delay_ps < plat->variant->tx_delay_max) { + mac_delay->tx_delay = tx_delay_ps; + } else { + dev_err(plat->dev, "Invalid TX clock delay: %dps\n", tx_delay_ps); + return -EINVAL; + } + } + + if (!of_property_read_u32(plat->np, "mediatek,rx-delay-ps", &rx_delay_ps)) { + if (rx_delay_ps < plat->variant->rx_delay_max) { + mac_delay->rx_delay = rx_delay_ps; + } else { + dev_err(plat->dev, "Invalid RX clock delay: %dps\n", rx_delay_ps); + return -EINVAL; + } + } + + mac_delay->tx_inv = of_property_read_bool(plat->np, "mediatek,txc-inverse"); + mac_delay->rx_inv = of_property_read_bool(plat->np, "mediatek,rxc-inverse"); + plat->rmii_rxc = of_property_read_bool(plat->np, "mediatek,rmii-rxc"); + + return 0; +} + +static int mediatek_dwmac_clk_init(struct mediatek_dwmac_plat_data *plat) +{ + const struct mediatek_dwmac_variant *variant = plat->variant; + int i, num = variant->num_clks; + + plat->clks = devm_kcalloc(plat->dev, num, sizeof(*plat->clks), GFP_KERNEL); + if (!plat->clks) + return -ENOMEM; + + for (i = 0; i < num; i++) + plat->clks[i].id = variant->clk_list[i]; + + return devm_clk_bulk_get(plat->dev, num, plat->clks); +} + +static int mediatek_dwmac_init(struct platform_device *pdev, void *priv) +{ + struct mediatek_dwmac_plat_data *plat = priv; + const struct mediatek_dwmac_variant *variant = plat->variant; + int ret; + + ret = dma_set_mask_and_coherent(plat->dev, DMA_BIT_MASK(variant->dma_bit_mask)); + if (ret) { + dev_err(plat->dev, "No suitable DMA available, err = %d\n", ret); + return ret; + } + + ret = variant->dwmac_set_phy_interface(plat); + if (ret) { + dev_err(plat->dev, "failed to set phy interface, err = %d\n", ret); + return ret; + } + + ret = variant->dwmac_set_delay(plat); + if (ret) { + dev_err(plat->dev, "failed to set delay value, err = %d\n", ret); + return ret; + } + + ret = clk_bulk_prepare_enable(variant->num_clks, plat->clks); + if (ret) { + dev_err(plat->dev, "failed to enable clks, err = %d\n", ret); + return ret; + } + + return 0; +} + +static void mediatek_dwmac_exit(struct platform_device *pdev, void *priv) +{ + struct mediatek_dwmac_plat_data *plat = priv; + const struct mediatek_dwmac_variant *variant = plat->variant; + + clk_bulk_disable_unprepare(variant->num_clks, plat->clks); +} + +static int mediatek_dwmac_probe(struct platform_device *pdev) +{ + struct mediatek_dwmac_plat_data *priv_plat; + struct plat_stmmacenet_data *plat_dat; + struct stmmac_resources stmmac_res; + int ret; + + priv_plat = devm_kzalloc(&pdev->dev, sizeof(*priv_plat), GFP_KERNEL); + if (!priv_plat) + return -ENOMEM; + + priv_plat->variant = of_device_get_match_data(&pdev->dev); + if (!priv_plat->variant) { + dev_err(&pdev->dev, "Missing dwmac-mediatek variant\n"); + return -EINVAL; + } + + priv_plat->dev = &pdev->dev; + priv_plat->np = pdev->dev.of_node; + + ret = mediatek_dwmac_config_dt(priv_plat); + if (ret) + return ret; + + ret = mediatek_dwmac_clk_init(priv_plat); + if (ret) + return ret; + + ret = stmmac_get_platform_resources(pdev, &stmmac_res); + if (ret) + return ret; + + plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); + + plat_dat->interface = priv_plat->phy_mode; + /* clk_csr_i = 250-300MHz & MDC = clk_csr_i/124 */ + plat_dat->clk_csr = 5; + plat_dat->has_gmac4 = 1; + plat_dat->has_gmac = 0; + plat_dat->pmt = 0; + plat_dat->maxmtu = ETH_DATA_LEN; + plat_dat->bsp_priv = priv_plat; + plat_dat->init = mediatek_dwmac_init; + plat_dat->exit = mediatek_dwmac_exit; + mediatek_dwmac_init(pdev, priv_plat); + + ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); + if (ret) { + stmmac_remove_config_dt(pdev, plat_dat); + return ret; + } + + return 0; +} + +static const struct of_device_id mediatek_dwmac_match[] = { + { .compatible = "mediatek,mt2712-gmac", + .data = &mt2712_gmac_variant }, + { } +}; + +MODULE_DEVICE_TABLE(of, mediatek_dwmac_match); + +static struct platform_driver mediatek_dwmac_driver = { + .probe = mediatek_dwmac_probe, + .remove = stmmac_pltfr_remove, + .driver = { + .name = "dwmac-mediatek", + .pm = &stmmac_pltfr_pm_ops, + .of_match_table = mediatek_dwmac_match, + }, +}; +module_platform_driver(mediatek_dwmac_driver); + +MODULE_AUTHOR("Biao Huang <biao.huang@mediatek.com>"); +MODULE_DESCRIPTION("MediaTek DWMAC specific glue layer"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 5710864fa809..d1f61c25d82b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -458,8 +458,10 @@ stmmac_get_pauseparam(struct net_device *netdev, if (!adv_lp.pause) return; } else { - if (!(netdev->phydev->supported & SUPPORTED_Pause) || - !(netdev->phydev->supported & SUPPORTED_Asym_Pause)) + if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, + netdev->phydev->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + netdev->phydev->supported)) return; } @@ -487,8 +489,10 @@ stmmac_set_pauseparam(struct net_device *netdev, if (!adv_lp.pause) return -EOPNOTSUPP; } else { - if (!(phy->supported & SUPPORTED_Pause) || - !(phy->supported & SUPPORTED_Asym_Pause)) + if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, + phy->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + phy->supported)) return -EOPNOTSUPP; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index c4a35e932f05..0e0a0789c2ed 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -3881,7 +3881,7 @@ static void sysfs_display_ring(void *head, int size, int extend_desc, } } -static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v) +static int stmmac_rings_status_show(struct seq_file *seq, void *v) { struct net_device *dev = seq->private; struct stmmac_priv *priv = netdev_priv(dev); @@ -3926,23 +3926,9 @@ static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v) return 0; } +DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status); -static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file) -{ - return single_open(file, stmmac_sysfs_ring_read, inode->i_private); -} - -/* Debugfs files, should appear in /sys/kernel/debug/stmmaceth/eth0 */ - -static const struct file_operations stmmac_rings_status_fops = { - .owner = THIS_MODULE, - .open = stmmac_sysfs_ring_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v) +static int stmmac_dma_cap_show(struct seq_file *seq, void *v) { struct net_device *dev = seq->private; struct stmmac_priv *priv = netdev_priv(dev); @@ -4005,19 +3991,7 @@ static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v) return 0; } - -static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file) -{ - return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private); -} - -static const struct file_operations stmmac_dma_cap_fops = { - .owner = THIS_MODULE, - .open = stmmac_sysfs_dma_cap_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap); static int stmmac_init_fs(struct net_device *dev) { @@ -4101,7 +4075,7 @@ static void stmmac_reset_subtask(struct stmmac_priv *priv) set_bit(STMMAC_DOWN, &priv->state); dev_close(priv->dev); - dev_open(priv->dev); + dev_open(priv->dev, NULL); clear_bit(STMMAC_DOWN, &priv->state); clear_bit(STMMAC_RESETING, &priv->state); rtnl_unlock(); diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index 863fd602fd33..ff641cf30a4e 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -2691,7 +2691,7 @@ static int happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe) sbus_dp = op->dev.parent->of_node; /* We can match PCI devices too, do not accept those here. */ - if (strcmp(sbus_dp->name, "sbus") && strcmp(sbus_dp->name, "sbi")) + if (!of_node_name_eq(sbus_dp, "sbus") && !of_node_name_eq(sbus_dp, "sbi")) return err; if (is_qfe) { diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig index f932923f7d56..bb126be1eb72 100644 --- a/drivers/net/ethernet/ti/Kconfig +++ b/drivers/net/ethernet/ti/Kconfig @@ -121,7 +121,8 @@ config TLAN Devices currently supported by this driver are Compaq Netelligent, Compaq NetFlex and Olicom cards. Please read the file - <file:Documentation/networking/tlan.txt> for more details. + <file:Documentation/networking/device_drivers/ti/tlan.txt> + for more details. To compile this driver as a module, choose M here. The module will be called tlan. diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c index 9b8a30bf939b..810dfc7de1f9 100644 --- a/drivers/net/ethernet/ti/cpmac.c +++ b/drivers/net/ethernet/ti/cpmac.c @@ -991,7 +991,6 @@ static int cpmac_open(struct net_device *dev) cpmac_hw_start(dev); napi_enable(&priv->napi); - dev->phydev->state = PHY_CHANGELINK; phy_start(dev->phydev); return 0; diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 500f7ed8c58c..0e8f61a29479 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -283,7 +283,7 @@ struct cpsw_ss_regs { #define CTRL_V2_TS_BITS \ (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\ - TS_TTL_NONZERO | TS_ANNEX_D_EN | TS_LTYPE1_EN) + TS_TTL_NONZERO | TS_ANNEX_D_EN | TS_LTYPE1_EN | VLAN_LTYPE1_EN) #define CTRL_V2_ALL_TS_MASK (CTRL_V2_TS_BITS | TS_TX_EN | TS_RX_EN) #define CTRL_V2_TX_TS_BITS (CTRL_V2_TS_BITS | TS_TX_EN) @@ -293,7 +293,7 @@ struct cpsw_ss_regs { #define CTRL_V3_TS_BITS \ (TS_107 | TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 |\ TS_TTL_NONZERO | TS_ANNEX_F_EN | TS_ANNEX_D_EN |\ - TS_LTYPE1_EN) + TS_LTYPE1_EN | VLAN_LTYPE1_EN) #define CTRL_V3_ALL_TS_MASK (CTRL_V3_TS_BITS | TS_TX_EN | TS_RX_EN) #define CTRL_V3_TX_TS_BITS (CTRL_V3_TS_BITS | TS_TX_EN) @@ -466,6 +466,8 @@ struct cpsw_priv { bool mqprio_hw; int fifo_bw[CPSW_TC_NUM]; int shp_cfg_speed; + int tx_ts_enabled; + int rx_ts_enabled; u32 emac_port; struct cpsw_common *cpsw; }; @@ -565,26 +567,14 @@ static const struct cpsw_stats cpsw_gstrings_ch_stats[] = { (func)(slave++, ##arg); \ } while (0) +static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev, + __be16 proto, u16 vid); + static inline int cpsw_get_slave_port(u32 slave_num) { return slave_num + 1; } -static void cpsw_add_mcast(struct cpsw_priv *priv, const u8 *addr) -{ - struct cpsw_common *cpsw = priv->cpsw; - - if (cpsw->data.dual_emac) { - struct cpsw_slave *slave = cpsw->slaves + priv->emac_port; - - cpsw_ale_add_mcast(cpsw->ale, addr, ALE_PORT_HOST, - ALE_VLAN, slave->port_vlan, 0); - return; - } - - cpsw_ale_add_mcast(cpsw->ale, addr, ALE_ALL_PORTS, 0, 0, 0); -} - static void cpsw_set_promiscious(struct net_device *ndev, bool enable) { struct cpsw_common *cpsw = ndev_to_cpsw(ndev); @@ -640,7 +630,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) /* Clear all mcast from ALE */ cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS, -1); - __dev_mc_unsync(ndev, NULL); + __hw_addr_ref_unsync_dev(&ndev->mc, ndev, NULL); /* Flood All Unicast Packets to Host port */ cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1); @@ -661,29 +651,148 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) } } -static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr) +struct addr_sync_ctx { + struct net_device *ndev; + const u8 *addr; /* address to be synched */ + int consumed; /* number of address instances */ + int flush; /* flush flag */ +}; + +/** + * cpsw_set_mc - adds multicast entry to the table if it's not added or deletes + * if it's not deleted + * @ndev: device to sync + * @addr: address to be added or deleted + * @vid: vlan id, if vid < 0 set/unset address for real device + * @add: add address if the flag is set or remove otherwise + */ +static int cpsw_set_mc(struct net_device *ndev, const u8 *addr, + int vid, int add) { struct cpsw_priv *priv = netdev_priv(ndev); + struct cpsw_common *cpsw = priv->cpsw; + int mask, flags, ret; + + if (vid < 0) { + if (cpsw->data.dual_emac) + vid = cpsw->slaves[priv->emac_port].port_vlan; + else + vid = 0; + } + + mask = cpsw->data.dual_emac ? ALE_PORT_HOST : ALE_ALL_PORTS; + flags = vid ? ALE_VLAN : 0; + + if (add) + ret = cpsw_ale_add_mcast(cpsw->ale, addr, mask, flags, vid, 0); + else + ret = cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid); + + return ret; +} + +static int cpsw_update_vlan_mc(struct net_device *vdev, int vid, void *ctx) +{ + struct addr_sync_ctx *sync_ctx = ctx; + struct netdev_hw_addr *ha; + int found = 0, ret = 0; + + if (!vdev || !(vdev->flags & IFF_UP)) + return 0; + + /* vlan address is relevant if its sync_cnt != 0 */ + netdev_for_each_mc_addr(ha, vdev) { + if (ether_addr_equal(ha->addr, sync_ctx->addr)) { + found = ha->sync_cnt; + break; + } + } + + if (found) + sync_ctx->consumed++; + + if (sync_ctx->flush) { + if (!found) + cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0); + return 0; + } + + if (found) + ret = cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 1); + + return ret; +} + +static int cpsw_add_mc_addr(struct net_device *ndev, const u8 *addr, int num) +{ + struct addr_sync_ctx sync_ctx; + int ret; + + sync_ctx.consumed = 0; + sync_ctx.addr = addr; + sync_ctx.ndev = ndev; + sync_ctx.flush = 0; + + ret = vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx); + if (sync_ctx.consumed < num && !ret) + ret = cpsw_set_mc(ndev, addr, -1, 1); + + return ret; +} + +static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr, int num) +{ + struct addr_sync_ctx sync_ctx; + + sync_ctx.consumed = 0; + sync_ctx.addr = addr; + sync_ctx.ndev = ndev; + sync_ctx.flush = 1; + + vlan_for_each(ndev, cpsw_update_vlan_mc, &sync_ctx); + if (sync_ctx.consumed == num) + cpsw_set_mc(ndev, addr, -1, 0); - cpsw_add_mcast(priv, addr); return 0; } -static int cpsw_del_mc_addr(struct net_device *ndev, const u8 *addr) +static int cpsw_purge_vlan_mc(struct net_device *vdev, int vid, void *ctx) { - struct cpsw_priv *priv = netdev_priv(ndev); - struct cpsw_common *cpsw = priv->cpsw; - int vid, flags; + struct addr_sync_ctx *sync_ctx = ctx; + struct netdev_hw_addr *ha; + int found = 0; - if (cpsw->data.dual_emac) { - vid = cpsw->slaves[priv->emac_port].port_vlan; - flags = ALE_VLAN; - } else { - vid = 0; - flags = 0; + if (!vdev || !(vdev->flags & IFF_UP)) + return 0; + + /* vlan address is relevant if its sync_cnt != 0 */ + netdev_for_each_mc_addr(ha, vdev) { + if (ether_addr_equal(ha->addr, sync_ctx->addr)) { + found = ha->sync_cnt; + break; + } } - cpsw_ale_del_mcast(cpsw->ale, addr, 0, flags, vid); + if (!found) + return 0; + + sync_ctx->consumed++; + cpsw_set_mc(sync_ctx->ndev, sync_ctx->addr, vid, 0); + return 0; +} + +static int cpsw_purge_all_mc(struct net_device *ndev, const u8 *addr, int num) +{ + struct addr_sync_ctx sync_ctx; + + sync_ctx.addr = addr; + sync_ctx.ndev = ndev; + sync_ctx.consumed = 0; + + vlan_for_each(ndev, cpsw_purge_vlan_mc, &sync_ctx); + if (sync_ctx.consumed < num) + cpsw_set_mc(ndev, addr, -1, 0); + return 0; } @@ -704,7 +813,9 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev) /* Restore allmulti on vlans if necessary */ cpsw_ale_set_allmulti(cpsw->ale, ndev->flags & IFF_ALLMULTI); - __dev_mc_sync(ndev, cpsw_add_mc_addr, cpsw_del_mc_addr); + /* add/remove mcast address either for real netdev or for vlan */ + __hw_addr_ref_sync_dev(&ndev->mc, ndev, cpsw_add_mc_addr, + cpsw_del_mc_addr); } static void cpsw_intr_enable(struct cpsw_common *cpsw) @@ -796,6 +907,7 @@ static void cpsw_rx_handler(void *token, int len, int status) struct net_device *ndev = skb->dev; int ret = 0, port; struct cpsw_common *cpsw = ndev_to_cpsw(ndev); + struct cpsw_priv *priv; if (cpsw->data.dual_emac) { port = CPDMA_RX_SOURCE_PORT(status); @@ -830,7 +942,9 @@ static void cpsw_rx_handler(void *token, int len, int status) skb_put(skb, len); if (status & CPDMA_RX_VLAN_ENCAP) cpsw_rx_vlan_encap(skb); - cpts_rx_timestamp(cpsw->cpts, skb); + priv = netdev_priv(ndev); + if (priv->rx_ts_enabled) + cpts_rx_timestamp(cpsw->cpts, skb); skb->protocol = eth_type_trans(skb, ndev); netif_receive_skb(skb); ndev->stats.rx_bytes += len; @@ -1845,9 +1959,23 @@ static void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv) slave_write(slave, tx_prio_map, tx_prio_rg); } +static int cpsw_restore_vlans(struct net_device *vdev, int vid, void *arg) +{ + struct cpsw_priv *priv = arg; + + if (!vdev) + return 0; + + cpsw_ndo_vlan_rx_add_vid(priv->ndev, 0, vid); + return 0; +} + /* restore resources after port reset */ static void cpsw_restore(struct cpsw_priv *priv) { + /* restore vlan configurations */ + vlan_for_each(priv->ndev, cpsw_restore_vlans, priv); + /* restore MQPRIO offload */ for_each_slave(priv, cpsw_mqprio_resume, priv); @@ -1964,7 +2092,7 @@ static int cpsw_ndo_stop(struct net_device *ndev) struct cpsw_common *cpsw = priv->cpsw; cpsw_info(priv, ifdown, "shutting down cpsw device\n"); - __dev_mc_unsync(priv->ndev, cpsw_del_mc_addr); + __hw_addr_ref_unsync_dev(&ndev->mc, ndev, cpsw_purge_all_mc); netif_tx_stop_all_queues(priv->ndev); netif_carrier_off(priv->ndev); @@ -2003,7 +2131,7 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb, } if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && - cpts_is_tx_enabled(cpts) && cpts_can_timestamp(cpts, skb)) + priv->tx_ts_enabled && cpts_can_timestamp(cpts, skb)) skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; q_idx = skb_get_queue_mapping(skb); @@ -2047,13 +2175,13 @@ fail: #if IS_ENABLED(CONFIG_TI_CPTS) -static void cpsw_hwtstamp_v1(struct cpsw_common *cpsw) +static void cpsw_hwtstamp_v1(struct cpsw_priv *priv) { + struct cpsw_common *cpsw = priv->cpsw; struct cpsw_slave *slave = &cpsw->slaves[cpsw->data.active_slave]; u32 ts_en, seq_id; - if (!cpts_is_tx_enabled(cpsw->cpts) && - !cpts_is_rx_enabled(cpsw->cpts)) { + if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) { slave_write(slave, 0, CPSW1_TS_CTL); return; } @@ -2061,10 +2189,10 @@ static void cpsw_hwtstamp_v1(struct cpsw_common *cpsw) seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588; ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS; - if (cpts_is_tx_enabled(cpsw->cpts)) + if (priv->tx_ts_enabled) ts_en |= CPSW_V1_TS_TX_EN; - if (cpts_is_rx_enabled(cpsw->cpts)) + if (priv->rx_ts_enabled) ts_en |= CPSW_V1_TS_RX_EN; slave_write(slave, ts_en, CPSW1_TS_CTL); @@ -2084,20 +2212,20 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv) case CPSW_VERSION_2: ctrl &= ~CTRL_V2_ALL_TS_MASK; - if (cpts_is_tx_enabled(cpsw->cpts)) + if (priv->tx_ts_enabled) ctrl |= CTRL_V2_TX_TS_BITS; - if (cpts_is_rx_enabled(cpsw->cpts)) + if (priv->rx_ts_enabled) ctrl |= CTRL_V2_RX_TS_BITS; break; case CPSW_VERSION_3: default: ctrl &= ~CTRL_V3_ALL_TS_MASK; - if (cpts_is_tx_enabled(cpsw->cpts)) + if (priv->tx_ts_enabled) ctrl |= CTRL_V3_TX_TS_BITS; - if (cpts_is_rx_enabled(cpsw->cpts)) + if (priv->rx_ts_enabled) ctrl |= CTRL_V3_RX_TS_BITS; break; } @@ -2107,6 +2235,7 @@ static void cpsw_hwtstamp_v2(struct cpsw_priv *priv) slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE); slave_write(slave, ctrl, CPSW2_CONTROL); writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype); + writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype); } static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) @@ -2114,7 +2243,6 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) struct cpsw_priv *priv = netdev_priv(dev); struct hwtstamp_config cfg; struct cpsw_common *cpsw = priv->cpsw; - struct cpts *cpts = cpsw->cpts; if (cpsw->version != CPSW_VERSION_1 && cpsw->version != CPSW_VERSION_2 && @@ -2133,7 +2261,7 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) switch (cfg.rx_filter) { case HWTSTAMP_FILTER_NONE: - cpts_rx_enable(cpts, 0); + priv->rx_ts_enabled = 0; break; case HWTSTAMP_FILTER_ALL: case HWTSTAMP_FILTER_NTP_ALL: @@ -2141,7 +2269,7 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: - cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V1_L4_EVENT); + priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; break; case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: @@ -2153,18 +2281,18 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: - cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V2_EVENT); + priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT; cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; break; default: return -ERANGE; } - cpts_tx_enable(cpts, cfg.tx_type == HWTSTAMP_TX_ON); + priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON; switch (cpsw->version) { case CPSW_VERSION_1: - cpsw_hwtstamp_v1(cpsw); + cpsw_hwtstamp_v1(priv); break; case CPSW_VERSION_2: case CPSW_VERSION_3: @@ -2180,7 +2308,7 @@ static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr) static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) { struct cpsw_common *cpsw = ndev_to_cpsw(dev); - struct cpts *cpts = cpsw->cpts; + struct cpsw_priv *priv = netdev_priv(dev); struct hwtstamp_config cfg; if (cpsw->version != CPSW_VERSION_1 && @@ -2189,10 +2317,8 @@ static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr) return -EOPNOTSUPP; cfg.flags = 0; - cfg.tx_type = cpts_is_tx_enabled(cpts) ? - HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; - cfg.rx_filter = (cpts_is_rx_enabled(cpts) ? - cpts->rx_enable : HWTSTAMP_FILTER_NONE); + cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + cfg.rx_filter = priv->rx_ts_enabled; return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; } @@ -2415,6 +2541,7 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev, HOST_PORT_NUM, ALE_VLAN, vid); ret |= cpsw_ale_del_mcast(cpsw->ale, priv->ndev->broadcast, 0, ALE_VLAN, vid); + ret |= cpsw_ale_flush_multicast(cpsw->ale, 0, vid); err: pm_runtime_put(cpsw->dev); return ret; @@ -3144,7 +3271,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data, const __be32 *parp; /* This is no slave child node, continue */ - if (strcmp(slave_node->name, "slave")) + if (!of_node_name_eq(slave_node, "slave")) continue; slave_data->phy_node = of_parse_phandle(slave_node, @@ -3240,7 +3367,7 @@ static void cpsw_remove_dt(struct platform_device *pdev) for_each_available_child_of_node(node, slave_node) { struct cpsw_slave_data *slave_data = &data->slave_data[i]; - if (strcmp(slave_node->name, "slave")) + if (!of_node_name_eq(slave_node, "slave")) continue; if (of_phy_is_fixed_link(slave_node)) diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c index b96b93c686bf..054f78295d1d 100644 --- a/drivers/net/ethernet/ti/cpts.c +++ b/drivers/net/ethernet/ti/cpts.c @@ -86,6 +86,25 @@ static int cpts_purge_events(struct cpts *cpts) return removed ? 0 : -1; } +static void cpts_purge_txq(struct cpts *cpts) +{ + struct cpts_skb_cb_data *skb_cb; + struct sk_buff *skb, *tmp; + int removed = 0; + + skb_queue_walk_safe(&cpts->txq, skb, tmp) { + skb_cb = (struct cpts_skb_cb_data *)skb->cb; + if (time_after(jiffies, skb_cb->tmo)) { + __skb_unlink(skb, &cpts->txq); + dev_consume_skb_any(skb); + ++removed; + } + } + + if (removed) + dev_dbg(cpts->dev, "txq cleaned up %d\n", removed); +} + static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event) { struct sk_buff *skb, *tmp; @@ -119,9 +138,7 @@ static bool cpts_match_tx_ts(struct cpts *cpts, struct cpts_event *event) if (time_after(jiffies, skb_cb->tmo)) { /* timeout any expired skbs over 1s */ - dev_dbg(cpts->dev, - "expiring tx timestamp mtype %u seqid %04x\n", - mtype, seqid); + dev_dbg(cpts->dev, "expiring tx timestamp from txq\n"); __skb_unlink(skb, &cpts->txq); dev_consume_skb_any(skb); } @@ -294,8 +311,11 @@ static long cpts_overflow_check(struct ptp_clock_info *ptp) spin_lock_irqsave(&cpts->lock, flags); ts = ns_to_timespec64(timecounter_read(&cpts->tc)); - if (!skb_queue_empty(&cpts->txq)) - delay = CPTS_SKB_TX_WORK_TIMEOUT; + if (!skb_queue_empty(&cpts->txq)) { + cpts_purge_txq(cpts); + if (!skb_queue_empty(&cpts->txq)) + delay = CPTS_SKB_TX_WORK_TIMEOUT; + } spin_unlock_irqrestore(&cpts->lock, flags); pr_debug("cpts overflow check at %lld.%09ld\n", @@ -410,8 +430,6 @@ void cpts_rx_timestamp(struct cpts *cpts, struct sk_buff *skb) u64 ns; struct skb_shared_hwtstamps *ssh; - if (!cpts->rx_enable) - return; ns = cpts_find_ts(cpts, skb, CPTS_EV_RX); if (!ns) return; diff --git a/drivers/net/ethernet/ti/cpts.h b/drivers/net/ethernet/ti/cpts.h index 73d73faf0f38..d2c7decd59b6 100644 --- a/drivers/net/ethernet/ti/cpts.h +++ b/drivers/net/ethernet/ti/cpts.h @@ -136,26 +136,6 @@ struct cpts *cpts_create(struct device *dev, void __iomem *regs, struct device_node *node); void cpts_release(struct cpts *cpts); -static inline void cpts_rx_enable(struct cpts *cpts, int enable) -{ - cpts->rx_enable = enable; -} - -static inline bool cpts_is_rx_enabled(struct cpts *cpts) -{ - return !!cpts->rx_enable; -} - -static inline void cpts_tx_enable(struct cpts *cpts, int enable) -{ - cpts->tx_enable = enable; -} - -static inline bool cpts_is_tx_enabled(struct cpts *cpts) -{ - return !!cpts->tx_enable; -} - static inline bool cpts_can_timestamp(struct cpts *cpts, struct sk_buff *skb) { unsigned int class = ptp_classify_raw(skb); @@ -197,24 +177,6 @@ static inline void cpts_unregister(struct cpts *cpts) { } -static inline void cpts_rx_enable(struct cpts *cpts, int enable) -{ -} - -static inline bool cpts_is_rx_enabled(struct cpts *cpts) -{ - return false; -} - -static inline void cpts_tx_enable(struct cpts *cpts, int enable) -{ -} - -static inline bool cpts_is_tx_enabled(struct cpts *cpts) -{ - return false; -} - static inline bool cpts_can_timestamp(struct cpts *cpts, struct sk_buff *skb) { return false; diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index 9153db120352..840820402cd0 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -1912,11 +1912,15 @@ static int davinci_emac_probe(struct platform_device *pdev) ether_addr_copy(ndev->dev_addr, priv->mac_addr); if (!is_valid_ether_addr(priv->mac_addr)) { - /* Use random MAC if none passed */ - eth_hw_addr_random(ndev); - memcpy(priv->mac_addr, ndev->dev_addr, ndev->addr_len); - dev_warn(&pdev->dev, "using random MAC addr: %pM\n", - priv->mac_addr); + /* Try nvmem if MAC wasn't passed over pdata or DT. */ + rc = nvmem_get_mac_address(&pdev->dev, priv->mac_addr); + if (rc) { + /* Use random MAC if still none obtained. */ + eth_hw_addr_random(ndev); + memcpy(priv->mac_addr, ndev->dev_addr, ndev->addr_len); + dev_warn(&pdev->dev, "using random MAC addr: %pM\n", + priv->mac_addr); + } } ndev->netdev_ops = &emac_netdev_ops; diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 0397ccb6597e..5174d318901e 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -763,6 +763,8 @@ struct gbe_priv { int cpts_registered; struct cpts *cpts; + int rx_ts_enabled; + int tx_ts_enabled; }; struct gbe_intf { @@ -2564,7 +2566,7 @@ static int gbe_txtstamp_mark_pkt(struct gbe_intf *gbe_intf, struct gbe_priv *gbe_dev = gbe_intf->gbe_dev; if (!(skb_shinfo(p_info->skb)->tx_flags & SKBTX_HW_TSTAMP) || - !cpts_is_tx_enabled(gbe_dev->cpts)) + !gbe_dev->tx_ts_enabled) return 0; /* If phy has the txtstamp api, assume it will do it. @@ -2598,7 +2600,9 @@ static int gbe_rxtstamp(struct gbe_intf *gbe_intf, struct netcp_packet *p_info) return 0; } - cpts_rx_timestamp(gbe_dev->cpts, p_info->skb); + if (gbe_dev->rx_ts_enabled) + cpts_rx_timestamp(gbe_dev->cpts, p_info->skb); + p_info->rxtstamp_complete = true; return 0; @@ -2614,10 +2618,8 @@ static int gbe_hwtstamp_get(struct gbe_intf *gbe_intf, struct ifreq *ifr) return -EOPNOTSUPP; cfg.flags = 0; - cfg.tx_type = cpts_is_tx_enabled(cpts) ? - HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; - cfg.rx_filter = (cpts_is_rx_enabled(cpts) ? - cpts->rx_enable : HWTSTAMP_FILTER_NONE); + cfg.tx_type = gbe_dev->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF; + cfg.rx_filter = gbe_dev->rx_ts_enabled; return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0; } @@ -2628,8 +2630,8 @@ static void gbe_hwtstamp(struct gbe_intf *gbe_intf) struct gbe_slave *slave = gbe_intf->slave; u32 ts_en, seq_id, ctl; - if (!cpts_is_rx_enabled(gbe_dev->cpts) && - !cpts_is_tx_enabled(gbe_dev->cpts)) { + if (!gbe_dev->rx_ts_enabled && + !gbe_dev->tx_ts_enabled) { writel(0, GBE_REG_ADDR(slave, port_regs, ts_ctl)); return; } @@ -2641,10 +2643,10 @@ static void gbe_hwtstamp(struct gbe_intf *gbe_intf) (slave->ts_ctl.uni ? TS_UNI_EN : slave->ts_ctl.maddr_map << TS_CTL_MADDR_SHIFT); - if (cpts_is_tx_enabled(gbe_dev->cpts)) + if (gbe_dev->tx_ts_enabled) ts_en |= (TS_TX_ANX_ALL_EN | TS_TX_VLAN_LT1_EN); - if (cpts_is_rx_enabled(gbe_dev->cpts)) + if (gbe_dev->rx_ts_enabled) ts_en |= (TS_RX_ANX_ALL_EN | TS_RX_VLAN_LT1_EN); writel(ts_en, GBE_REG_ADDR(slave, port_regs, ts_ctl)); @@ -2670,10 +2672,10 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr) switch (cfg.tx_type) { case HWTSTAMP_TX_OFF: - cpts_tx_enable(cpts, 0); + gbe_dev->tx_ts_enabled = 0; break; case HWTSTAMP_TX_ON: - cpts_tx_enable(cpts, 1); + gbe_dev->tx_ts_enabled = 1; break; default: return -ERANGE; @@ -2681,12 +2683,12 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr) switch (cfg.rx_filter) { case HWTSTAMP_FILTER_NONE: - cpts_rx_enable(cpts, 0); + gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_NONE; break; case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: - cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V1_L4_EVENT); + gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; cfg.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT; break; case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: @@ -2698,7 +2700,7 @@ static int gbe_hwtstamp_set(struct gbe_intf *gbe_intf, struct ifreq *ifr) case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: - cpts_rx_enable(cpts, HWTSTAMP_FILTER_PTP_V2_EVENT); + gbe_dev->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT; cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; break; default: @@ -3621,7 +3623,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, return -EINVAL; } - if (!strcmp(node->name, "gbe")) { + if (of_node_name_eq(node, "gbe")) { ret = get_gbe_resource_version(gbe_dev, node); if (ret) return ret; @@ -3635,7 +3637,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, else ret = -ENODEV; - } else if (!strcmp(node->name, "xgbe")) { + } else if (of_node_name_eq(node, "xgbe")) { ret = set_xgbe_ethss10_priv(gbe_dev, node); if (ret) return ret; diff --git a/drivers/net/ethernet/ti/tlan.c b/drivers/net/ethernet/ti/tlan.c index 93d142867c2a..b4ab1a5f6cd0 100644 --- a/drivers/net/ethernet/ti/tlan.c +++ b/drivers/net/ethernet/ti/tlan.c @@ -69,7 +69,9 @@ MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>"); MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters"); MODULE_LICENSE("GPL"); -/* Turn on debugging. See Documentation/networking/tlan.txt for details */ +/* Turn on debugging. + * See Documentation/networking/device_drivers/ti/tlan.txt for details + */ static int debug; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "ThunderLAN debug mask"); diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c index 6a71c2c0f17d..c50a9772f4af 100644 --- a/drivers/net/ethernet/toshiba/tc35815.c +++ b/drivers/net/ethernet/toshiba/tc35815.c @@ -607,9 +607,9 @@ static void tc_handle_link_change(struct net_device *dev) static int tc_mii_probe(struct net_device *dev) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; struct tc35815_local *lp = netdev_priv(dev); struct phy_device *phydev; - u32 dropmask; phydev = phy_find_first(lp->mii_bus); if (!phydev) { @@ -630,17 +630,22 @@ static int tc_mii_probe(struct net_device *dev) /* mask with MAC supported features */ phy_set_max_speed(phydev, SPEED_100); - dropmask = 0; - if (options.speed == 10) - dropmask |= SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full; - else if (options.speed == 100) - dropmask |= SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full; - if (options.duplex == 1) - dropmask |= SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Full; - else if (options.duplex == 2) - dropmask |= SUPPORTED_10baseT_Half | SUPPORTED_100baseT_Half; - phydev->supported &= ~dropmask; - phydev->advertising = phydev->supported; + if (options.speed == 10) { + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask); + } else if (options.speed == 100) { + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, mask); + } + if (options.duplex == 1) { + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask); + } else if (options.duplex == 2) { + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask); + } + linkmode_and(phydev->supported, phydev->supported, mask); + linkmode_copy(phydev->advertising, phydev->supported); lp->link = 0; lp->speed = 0; diff --git a/drivers/net/fjes/fjes_debugfs.c b/drivers/net/fjes/fjes_debugfs.c index 30052ebd52bf..7fed88ea27a5 100644 --- a/drivers/net/fjes/fjes_debugfs.c +++ b/drivers/net/fjes/fjes_debugfs.c @@ -62,19 +62,7 @@ static int fjes_dbg_status_show(struct seq_file *m, void *v) return 0; } - -static int fjes_dbg_status_open(struct inode *inode, struct file *file) -{ - return single_open(file, fjes_dbg_status_show, inode->i_private); -} - -static const struct file_operations fjes_dbg_status_fops = { - .owner = THIS_MODULE, - .open = fjes_dbg_status_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(fjes_dbg_status); void fjes_dbg_adapter_init(struct fjes_adapter *adapter) { diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index a0cd1c41cf5f..58bbba8582b0 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -70,6 +70,7 @@ struct geneve_dev { bool collect_md; bool use_udp6_rx_checksums; bool ttl_inherit; + enum ifla_geneve_df df; }; struct geneve_sock { @@ -387,6 +388,59 @@ drop: return 0; } +/* Callback from net/ipv{4,6}/udp.c to check that we have a tunnel for errors */ +static int geneve_udp_encap_err_lookup(struct sock *sk, struct sk_buff *skb) +{ + struct genevehdr *geneveh; + struct geneve_sock *gs; + u8 zero_vni[3] = { 0 }; + u8 *vni = zero_vni; + + if (skb->len < GENEVE_BASE_HLEN) + return -EINVAL; + + geneveh = geneve_hdr(skb); + if (geneveh->ver != GENEVE_VER) + return -EINVAL; + + if (geneveh->proto_type != htons(ETH_P_TEB)) + return -EINVAL; + + gs = rcu_dereference_sk_user_data(sk); + if (!gs) + return -ENOENT; + + if (geneve_get_sk_family(gs) == AF_INET) { + struct iphdr *iph = ip_hdr(skb); + __be32 addr4 = 0; + + if (!gs->collect_md) { + vni = geneve_hdr(skb)->vni; + addr4 = iph->daddr; + } + + return geneve_lookup(gs, addr4, vni) ? 0 : -ENOENT; + } + +#if IS_ENABLED(CONFIG_IPV6) + if (geneve_get_sk_family(gs) == AF_INET6) { + struct ipv6hdr *ip6h = ipv6_hdr(skb); + struct in6_addr addr6; + + memset(&addr6, 0, sizeof(struct in6_addr)); + + if (!gs->collect_md) { + vni = geneve_hdr(skb)->vni; + addr6 = ip6h->daddr; + } + + return geneve6_lookup(gs, addr6, vni) ? 0 : -ENOENT; + } +#endif + + return -EPFNOSUPPORT; +} + static struct socket *geneve_create_sock(struct net *net, bool ipv6, __be16 port, bool ipv6_rx_csum) { @@ -544,6 +598,7 @@ static struct geneve_sock *geneve_socket_create(struct net *net, __be16 port, tunnel_cfg.gro_receive = geneve_gro_receive; tunnel_cfg.gro_complete = geneve_gro_complete; tunnel_cfg.encap_rcv = geneve_udp_encap_recv; + tunnel_cfg.encap_err_lookup = geneve_udp_encap_err_lookup; tunnel_cfg.encap_destroy = NULL; setup_udp_tunnel_sock(net, sock, &tunnel_cfg); list_add(&gs->list, &gn->sock_list); @@ -823,8 +878,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, struct rtable *rt; struct flowi4 fl4; __u8 tos, ttl; + __be16 df = 0; __be16 sport; - __be16 df; int err; rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info); @@ -838,6 +893,8 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, if (geneve->collect_md) { tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); ttl = key->ttl; + + df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; } else { tos = ip_tunnel_ecn_encap(fl4.flowi4_tos, ip_hdr(skb), skb); if (geneve->ttl_inherit) @@ -845,8 +902,22 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, else ttl = key->ttl; ttl = ttl ? : ip4_dst_hoplimit(&rt->dst); + + if (geneve->df == GENEVE_DF_SET) { + df = htons(IP_DF); + } else if (geneve->df == GENEVE_DF_INHERIT) { + struct ethhdr *eth = eth_hdr(skb); + + if (ntohs(eth->h_proto) == ETH_P_IPV6) { + df = htons(IP_DF); + } else if (ntohs(eth->h_proto) == ETH_P_IP) { + struct iphdr *iph = ip_hdr(skb); + + if (iph->frag_off & htons(IP_DF)) + df = htons(IP_DF); + } + } } - df = key->tun_flags & TUNNEL_DONT_FRAGMENT ? htons(IP_DF) : 0; err = geneve_build_skb(&rt->dst, skb, info, xnet, sizeof(struct iphdr)); if (unlikely(err)) @@ -1093,6 +1164,7 @@ static const struct nla_policy geneve_policy[IFLA_GENEVE_MAX + 1] = { [IFLA_GENEVE_UDP_ZERO_CSUM6_TX] = { .type = NLA_U8 }, [IFLA_GENEVE_UDP_ZERO_CSUM6_RX] = { .type = NLA_U8 }, [IFLA_GENEVE_TTL_INHERIT] = { .type = NLA_U8 }, + [IFLA_GENEVE_DF] = { .type = NLA_U8 }, }; static int geneve_validate(struct nlattr *tb[], struct nlattr *data[], @@ -1128,6 +1200,16 @@ static int geneve_validate(struct nlattr *tb[], struct nlattr *data[], } } + if (data[IFLA_GENEVE_DF]) { + enum ifla_geneve_df df = nla_get_u8(data[IFLA_GENEVE_DF]); + + if (df < 0 || df > GENEVE_DF_MAX) { + NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_GENEVE_DF], + "Invalid DF attribute"); + return -EINVAL; + } + } + return 0; } @@ -1173,7 +1255,7 @@ static int geneve_configure(struct net *net, struct net_device *dev, struct netlink_ext_ack *extack, const struct ip_tunnel_info *info, bool metadata, bool ipv6_rx_csum, - bool ttl_inherit) + bool ttl_inherit, enum ifla_geneve_df df) { struct geneve_net *gn = net_generic(net, geneve_net_id); struct geneve_dev *t, *geneve = netdev_priv(dev); @@ -1223,6 +1305,7 @@ static int geneve_configure(struct net *net, struct net_device *dev, geneve->collect_md = metadata; geneve->use_udp6_rx_checksums = ipv6_rx_csum; geneve->ttl_inherit = ttl_inherit; + geneve->df = df; err = register_netdevice(dev); if (err) @@ -1242,7 +1325,7 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack, struct ip_tunnel_info *info, bool *metadata, bool *use_udp6_rx_checksums, bool *ttl_inherit, - bool changelink) + enum ifla_geneve_df *df, bool changelink) { int attrtype; @@ -1330,6 +1413,9 @@ static int geneve_nl2info(struct nlattr *tb[], struct nlattr *data[], if (data[IFLA_GENEVE_TOS]) info->key.tos = nla_get_u8(data[IFLA_GENEVE_TOS]); + if (data[IFLA_GENEVE_DF]) + *df = nla_get_u8(data[IFLA_GENEVE_DF]); + if (data[IFLA_GENEVE_LABEL]) { info->key.label = nla_get_be32(data[IFLA_GENEVE_LABEL]) & IPV6_FLOWLABEL_MASK; @@ -1448,6 +1534,7 @@ static int geneve_newlink(struct net *net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { + enum ifla_geneve_df df = GENEVE_DF_UNSET; bool use_udp6_rx_checksums = false; struct ip_tunnel_info info; bool ttl_inherit = false; @@ -1456,12 +1543,12 @@ static int geneve_newlink(struct net *net, struct net_device *dev, init_tnl_info(&info, GENEVE_UDP_PORT); err = geneve_nl2info(tb, data, extack, &info, &metadata, - &use_udp6_rx_checksums, &ttl_inherit, false); + &use_udp6_rx_checksums, &ttl_inherit, &df, false); if (err) return err; err = geneve_configure(net, dev, extack, &info, metadata, - use_udp6_rx_checksums, ttl_inherit); + use_udp6_rx_checksums, ttl_inherit, df); if (err) return err; @@ -1524,6 +1611,7 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[], struct ip_tunnel_info info; bool metadata; bool use_udp6_rx_checksums; + enum ifla_geneve_df df; bool ttl_inherit; int err; @@ -1539,7 +1627,7 @@ static int geneve_changelink(struct net_device *dev, struct nlattr *tb[], use_udp6_rx_checksums = geneve->use_udp6_rx_checksums; ttl_inherit = geneve->ttl_inherit; err = geneve_nl2info(tb, data, extack, &info, &metadata, - &use_udp6_rx_checksums, &ttl_inherit, true); + &use_udp6_rx_checksums, &ttl_inherit, &df, true); if (err) return err; @@ -1572,6 +1660,7 @@ static size_t geneve_get_size(const struct net_device *dev) nla_total_size(sizeof(struct in6_addr)) + /* IFLA_GENEVE_REMOTE{6} */ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TTL */ nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_TOS */ + nla_total_size(sizeof(__u8)) + /* IFLA_GENEVE_DF */ nla_total_size(sizeof(__be32)) + /* IFLA_GENEVE_LABEL */ nla_total_size(sizeof(__be16)) + /* IFLA_GENEVE_PORT */ nla_total_size(0) + /* IFLA_GENEVE_COLLECT_METADATA */ @@ -1620,6 +1709,9 @@ static int geneve_fill_info(struct sk_buff *skb, const struct net_device *dev) nla_put_be32(skb, IFLA_GENEVE_LABEL, info->key.label)) goto nla_put_failure; + if (nla_put_u8(skb, IFLA_GENEVE_DF, geneve->df)) + goto nla_put_failure; + if (nla_put_be16(skb, IFLA_GENEVE_PORT, info->key.tp_dst)) goto nla_put_failure; @@ -1666,12 +1758,13 @@ struct net_device *geneve_dev_create_fb(struct net *net, const char *name, memset(tb, 0, sizeof(tb)); dev = rtnl_create_link(net, name, name_assign_type, - &geneve_link_ops, tb); + &geneve_link_ops, tb, NULL); if (IS_ERR(dev)) return dev; init_tnl_info(&info, dst_port); - err = geneve_configure(net, dev, NULL, &info, true, true, false); + err = geneve_configure(net, dev, NULL, &info, + true, true, false, GENEVE_DF_UNSET); if (err) { free_netdev(dev); return ERR_PTR(err); diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c index 17e6dcd2eb42..28c749980359 100644 --- a/drivers/net/hamradio/6pack.c +++ b/drivers/net/hamradio/6pack.c @@ -120,7 +120,7 @@ struct sixpack { struct timer_list tx_t; struct timer_list resync_t; refcount_t refcnt; - struct semaphore dead_sem; + struct completion dead; spinlock_t lock; }; @@ -389,7 +389,7 @@ static struct sixpack *sp_get(struct tty_struct *tty) static void sp_put(struct sixpack *sp) { if (refcount_dec_and_test(&sp->refcnt)) - up(&sp->dead_sem); + complete(&sp->dead); } /* @@ -576,7 +576,7 @@ static int sixpack_open(struct tty_struct *tty) spin_lock_init(&sp->lock); refcount_set(&sp->refcnt, 1); - sema_init(&sp->dead_sem, 0); + init_completion(&sp->dead); /* !!! length of the buffers. MTU is IP MTU, not PACLEN! */ @@ -670,10 +670,10 @@ static void sixpack_close(struct tty_struct *tty) * we have to wait for all existing users to finish. */ if (!refcount_dec_and_test(&sp->refcnt)) - down(&sp->dead_sem); + wait_for_completion(&sp->dead); /* We must stop the queue to avoid potentially scribbling - * on the free buffers. The sp->dead_sem is not sufficient + * on the free buffers. The sp->dead completion is not sufficient * to protect us from sp->xbuff access. */ netif_stop_queue(sp->dev); diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c index 802233d41b25..4938cf4c184c 100644 --- a/drivers/net/hamradio/mkiss.c +++ b/drivers/net/hamradio/mkiss.c @@ -81,7 +81,7 @@ struct mkiss { #define CRC_MODE_SMACK_TEST 4 atomic_t refcnt; - struct semaphore dead_sem; + struct completion dead; }; /*---------------------------------------------------------------------------*/ @@ -687,7 +687,7 @@ static struct mkiss *mkiss_get(struct tty_struct *tty) static void mkiss_put(struct mkiss *ax) { if (atomic_dec_and_test(&ax->refcnt)) - up(&ax->dead_sem); + complete(&ax->dead); } static int crc_force = 0; /* Can be overridden with insmod */ @@ -715,7 +715,7 @@ static int mkiss_open(struct tty_struct *tty) spin_lock_init(&ax->buflock); atomic_set(&ax->refcnt, 1); - sema_init(&ax->dead_sem, 0); + init_completion(&ax->dead); ax->tty = tty; tty->disc_data = ax; @@ -795,7 +795,7 @@ static void mkiss_close(struct tty_struct *tty) * we have to wait for all existing users to finish. */ if (!atomic_dec_and_test(&ax->refcnt)) - down(&ax->dead_sem); + wait_for_completion(&ax->dead); /* * Halt the transmit queue so that a new transmit cannot scribble * on our buffers diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index cf36e7ff3191..91ed15ea5883 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -137,7 +137,7 @@ static int netvsc_open(struct net_device *net) * slave as up. If open fails, then slave will be * still be offline (and not used). */ - ret = dev_open(vf_netdev); + ret = dev_open(vf_netdev, NULL); if (ret) netdev_warn(net, "unable to open slave: %s: %d\n", @@ -605,9 +605,9 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net) IEEE_8021Q_INFO); vlan->value = 0; - vlan->vlanid = skb->vlan_tci & VLAN_VID_MASK; - vlan->pri = (skb->vlan_tci & VLAN_PRIO_MASK) >> - VLAN_PRIO_SHIFT; + vlan->vlanid = skb_vlan_tag_get_id(skb); + vlan->cfi = skb_vlan_tag_get_cfi(skb); + vlan->pri = skb_vlan_tag_get_prio(skb); } if (skb_is_gso(skb)) { @@ -781,7 +781,8 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net, } if (vlan) { - u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT); + u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT) | + (vlan->cfi ? VLAN_CFI_MASK : 0); __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci); @@ -1246,7 +1247,7 @@ static int netvsc_set_mac_addr(struct net_device *ndev, void *p) return -ENODEV; if (vf_netdev) { - err = dev_set_mac_address(vf_netdev, addr); + err = dev_set_mac_address(vf_netdev, addr, NULL); if (err) return err; } @@ -1257,7 +1258,7 @@ static int netvsc_set_mac_addr(struct net_device *ndev, void *p) } else if (vf_netdev) { /* rollback change on VF */ memcpy(addr->sa_data, ndev->dev_addr, ETH_ALEN); - dev_set_mac_address(vf_netdev, addr); + dev_set_mac_address(vf_netdev, addr, NULL); } return err; @@ -1992,7 +1993,7 @@ static void __netvsc_vf_setup(struct net_device *ndev, "unable to change mtu to %u\n", ndev->mtu); /* set multicast etc flags on VF */ - dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE); + dev_change_flags(vf_netdev, ndev->flags | IFF_SLAVE, NULL); /* sync address list from ndev to VF */ netif_addr_lock_bh(ndev); @@ -2001,7 +2002,7 @@ static void __netvsc_vf_setup(struct net_device *ndev, netif_addr_unlock_bh(ndev); if (netif_running(ndev)) { - ret = dev_open(vf_netdev); + ret = dev_open(vf_netdev, NULL); if (ret) netdev_warn(vf_netdev, "unable to open: %d\n", ret); diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 3d9e91579866..0253eb502153 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -1632,18 +1632,7 @@ static int at86rf230_stats_show(struct seq_file *file, void *offset) seq_printf(file, "INVALID:\t\t%8llu\n", lp->trac.invalid); return 0; } - -static int at86rf230_stats_open(struct inode *inode, struct file *file) -{ - return single_open(file, at86rf230_stats_show, inode->i_private); -} - -static const struct file_operations at86rf230_stats_fops = { - .open = at86rf230_stats_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(at86rf230_stats); static int at86rf230_debugfs_init(struct at86rf230_local *lp) { diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c index 4a949569ec4c..19bdde60680c 100644 --- a/drivers/net/ipvlan/ipvlan_main.c +++ b/drivers/net/ipvlan/ipvlan_main.c @@ -71,7 +71,8 @@ static void ipvlan_unregister_nf_hook(struct net *net) ARRAY_SIZE(ipvl_nfops)); } -static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval) +static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval, + struct netlink_ext_ack *extack) { struct ipvl_dev *ipvlan; struct net_device *mdev = port->dev; @@ -84,10 +85,12 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval) flags = ipvlan->dev->flags; if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S) { err = dev_change_flags(ipvlan->dev, - flags | IFF_NOARP); + flags | IFF_NOARP, + extack); } else { err = dev_change_flags(ipvlan->dev, - flags & ~IFF_NOARP); + flags & ~IFF_NOARP, + extack); } if (unlikely(err)) goto fail; @@ -116,9 +119,11 @@ fail: flags = ipvlan->dev->flags; if (port->mode == IPVLAN_MODE_L3 || port->mode == IPVLAN_MODE_L3S) - dev_change_flags(ipvlan->dev, flags | IFF_NOARP); + dev_change_flags(ipvlan->dev, flags | IFF_NOARP, + NULL); else - dev_change_flags(ipvlan->dev, flags & ~IFF_NOARP); + dev_change_flags(ipvlan->dev, flags & ~IFF_NOARP, + NULL); } return err; @@ -498,7 +503,7 @@ static int ipvlan_nl_changelink(struct net_device *dev, if (data[IFLA_IPVLAN_MODE]) { u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]); - err = ipvlan_set_port_mode(port, nmode); + err = ipvlan_set_port_mode(port, nmode, extack); } if (!err && data[IFLA_IPVLAN_FLAGS]) { @@ -535,7 +540,7 @@ static int ipvlan_nl_validate(struct nlattr *tb[], struct nlattr *data[], if (data[IFLA_IPVLAN_MODE]) { u16 mode = nla_get_u16(data[IFLA_IPVLAN_MODE]); - if (mode < IPVLAN_MODE_L2 || mode >= IPVLAN_MODE_MAX) + if (mode >= IPVLAN_MODE_MAX) return -EINVAL; } if (data[IFLA_IPVLAN_FLAGS]) { @@ -672,7 +677,7 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev, if (data && data[IFLA_IPVLAN_MODE]) mode = nla_get_u16(data[IFLA_IPVLAN_MODE]); - err = ipvlan_set_port_mode(port, mode); + err = ipvlan_set_port_mode(port, mode, extack); if (err) goto unlink_netdev; @@ -754,10 +759,13 @@ EXPORT_SYMBOL_GPL(ipvlan_link_register); static int ipvlan_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { + struct netlink_ext_ack *extack = netdev_notifier_info_to_extack(ptr); + struct netdev_notifier_pre_changeaddr_info *prechaddr_info; struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct ipvl_dev *ipvlan, *next; struct ipvl_port *port; LIST_HEAD(lst_kill); + int err; if (!netif_is_ipvlan_port(dev)) return NOTIFY_DONE; @@ -813,6 +821,17 @@ static int ipvlan_device_event(struct notifier_block *unused, ipvlan_adjust_mtu(ipvlan, dev); break; + case NETDEV_PRE_CHANGEADDR: + prechaddr_info = ptr; + list_for_each_entry(ipvlan, &port->ipvlans, pnode) { + err = dev_pre_changeaddr_notify(ipvlan->dev, + prechaddr_info->dev_addr, + extack); + if (err) + return notifier_from_errno(err); + } + break; + case NETDEV_CHANGEADDR: list_for_each_entry(ipvlan, &port->ipvlans, pnode) { ether_addr_copy(ipvlan->dev->dev_addr, dev->dev_addr); diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 0da3d36b283b..fc726ce4c164 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -744,7 +744,7 @@ static int macvlan_set_mac_address(struct net_device *dev, void *p) if (vlan->mode == MACVLAN_MODE_PASSTHRU) { macvlan_set_addr_change(vlan->port); - return dev_set_mac_address(vlan->lowerdev, addr); + return dev_set_mac_address(vlan->lowerdev, addr, NULL); } if (macvlan_addr_busy(vlan->port, addr->sa_data)) @@ -1213,7 +1213,7 @@ static void macvlan_port_destroy(struct net_device *dev) sa.sa_family = port->dev->type; memcpy(&sa.sa_data, port->perm_addr, port->dev->addr_len); - dev_set_mac_address(port->dev, &sa); + dev_set_mac_address(port->dev, &sa, NULL); } kfree(port); diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c index e964d312f4ca..ed1166adaa2f 100644 --- a/drivers/net/net_failover.c +++ b/drivers/net/net_failover.c @@ -40,14 +40,14 @@ static int net_failover_open(struct net_device *dev) primary_dev = rtnl_dereference(nfo_info->primary_dev); if (primary_dev) { - err = dev_open(primary_dev); + err = dev_open(primary_dev, NULL); if (err) goto err_primary_open; } standby_dev = rtnl_dereference(nfo_info->standby_dev); if (standby_dev) { - err = dev_open(standby_dev); + err = dev_open(standby_dev, NULL); if (err) goto err_standby_open; } @@ -517,7 +517,7 @@ static int net_failover_slave_register(struct net_device *slave_dev, dev_hold(slave_dev); if (netif_running(failover_dev)) { - err = dev_open(slave_dev); + err = dev_open(slave_dev, NULL); if (err && (err != -EBUSY)) { netdev_err(failover_dev, "Opening slave %s failed err:%d\n", slave_dev->name, err); @@ -680,7 +680,7 @@ static int net_failover_slave_name_change(struct net_device *slave_dev, /* We need to bring up the slave after the rename by udev in case * open failed with EBUSY when it was registered. */ - dev_open(slave_dev); + dev_open(slave_dev, NULL); return 0; } diff --git a/drivers/net/netdevsim/bpf.c b/drivers/net/netdevsim/bpf.c index cb3518474f0e..172b271c8bd2 100644 --- a/drivers/net/netdevsim/bpf.c +++ b/drivers/net/netdevsim/bpf.c @@ -48,7 +48,7 @@ struct nsim_bpf_bound_map { struct list_head l; }; -static int nsim_debugfs_bpf_string_read(struct seq_file *file, void *data) +static int nsim_bpf_string_show(struct seq_file *file, void *data) { const char **str = file->private; @@ -57,19 +57,7 @@ static int nsim_debugfs_bpf_string_read(struct seq_file *file, void *data) return 0; } - -static int nsim_debugfs_bpf_string_open(struct inode *inode, struct file *f) -{ - return single_open(f, nsim_debugfs_bpf_string_read, inode->i_private); -} - -static const struct file_operations nsim_bpf_string_fops = { - .owner = THIS_MODULE, - .open = nsim_debugfs_bpf_string_open, - .release = single_release, - .read = seq_read, - .llseek = seq_lseek -}; +DEFINE_SHOW_ATTRIBUTE(nsim_bpf_string); static int nsim_bpf_verify_insn(struct bpf_verifier_env *env, int insn_idx, int prev_insn) @@ -91,11 +79,6 @@ static int nsim_bpf_finalize(struct bpf_verifier_env *env) return 0; } -static const struct bpf_prog_offload_ops nsim_bpf_analyzer_ops = { - .insn_hook = nsim_bpf_verify_insn, - .finalize = nsim_bpf_finalize, -}; - static bool nsim_xdp_offload_active(struct netdevsim *ns) { return ns->xdp_hw.prog; @@ -263,6 +246,24 @@ static int nsim_bpf_create_prog(struct netdevsim *ns, struct bpf_prog *prog) return 0; } +static int nsim_bpf_verifier_prep(struct bpf_prog *prog) +{ + struct netdevsim *ns = netdev_priv(prog->aux->offload->netdev); + + if (!ns->bpf_bind_accept) + return -EOPNOTSUPP; + + return nsim_bpf_create_prog(ns, prog); +} + +static int nsim_bpf_translate(struct bpf_prog *prog) +{ + struct nsim_bpf_bound_prog *state = prog->aux->offload->dev_priv; + + state->state = "xlated"; + return 0; +} + static void nsim_bpf_destroy_prog(struct bpf_prog *prog) { struct nsim_bpf_bound_prog *state; @@ -275,6 +276,14 @@ static void nsim_bpf_destroy_prog(struct bpf_prog *prog) kfree(state); } +static const struct bpf_prog_offload_ops nsim_bpf_dev_ops = { + .insn_hook = nsim_bpf_verify_insn, + .finalize = nsim_bpf_finalize, + .prepare = nsim_bpf_verifier_prep, + .translate = nsim_bpf_translate, + .destroy = nsim_bpf_destroy_prog, +}; + static int nsim_setup_prog_checks(struct netdevsim *ns, struct netdev_bpf *bpf) { if (bpf->prog && bpf->prog->aux->offload) { @@ -533,30 +542,11 @@ static void nsim_bpf_map_free(struct bpf_offloaded_map *offmap) int nsim_bpf(struct net_device *dev, struct netdev_bpf *bpf) { struct netdevsim *ns = netdev_priv(dev); - struct nsim_bpf_bound_prog *state; int err; ASSERT_RTNL(); switch (bpf->command) { - case BPF_OFFLOAD_VERIFIER_PREP: - if (!ns->bpf_bind_accept) - return -EOPNOTSUPP; - - err = nsim_bpf_create_prog(ns, bpf->verifier.prog); - if (err) - return err; - - bpf->verifier.ops = &nsim_bpf_analyzer_ops; - return 0; - case BPF_OFFLOAD_TRANSLATE: - state = bpf->offload.prog->aux->offload->dev_priv; - - state->state = "xlated"; - return 0; - case BPF_OFFLOAD_DESTROY: - nsim_bpf_destroy_prog(bpf->offload.prog); - return 0; case XDP_QUERY_PROG: return xdp_attachment_query(&ns->xdp, bpf); case XDP_QUERY_PROG_HW: @@ -599,7 +589,7 @@ int nsim_bpf_init(struct netdevsim *ns) if (IS_ERR_OR_NULL(ns->sdev->ddir_bpf_bound_progs)) return -ENOMEM; - ns->sdev->bpf_dev = bpf_offload_dev_create(); + ns->sdev->bpf_dev = bpf_offload_dev_create(&nsim_bpf_dev_ops); err = PTR_ERR_OR_ZERO(ns->sdev->bpf_dev); if (err) return err; diff --git a/drivers/net/netdevsim/ipsec.c b/drivers/net/netdevsim/ipsec.c index 2dcf6cc269d0..76e11d889bb6 100644 --- a/drivers/net/netdevsim/ipsec.c +++ b/drivers/net/netdevsim/ipsec.c @@ -227,18 +227,19 @@ static const struct xfrmdev_ops nsim_xfrmdev_ops = { bool nsim_ipsec_tx(struct netdevsim *ns, struct sk_buff *skb) { + struct sec_path *sp = skb_sec_path(skb); struct nsim_ipsec *ipsec = &ns->ipsec; struct xfrm_state *xs; struct nsim_sa *tsa; u32 sa_idx; /* do we even need to check this packet? */ - if (!skb->sp) + if (!sp) return true; - if (unlikely(!skb->sp->len)) { + if (unlikely(!sp->len)) { netdev_err(ns->netdev, "no xfrm state len = %d\n", - skb->sp->len); + sp->len); return false; } diff --git a/drivers/net/phy/amd.c b/drivers/net/phy/amd.c index 6fe5dc9201d0..9d0504f3e3b2 100644 --- a/drivers/net/phy/amd.c +++ b/drivers/net/phy/amd.c @@ -66,7 +66,6 @@ static struct phy_driver am79c_driver[] = { { .name = "AM79C874", .phy_id_mask = 0xfffffff0, .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = am79c_config_init, .ack_interrupt = am79c_ack_interrupt, .config_intr = am79c_config_intr, diff --git a/drivers/net/phy/aquantia.c b/drivers/net/phy/aquantia.c index 632472cab3bb..beb3309bb0f0 100644 --- a/drivers/net/phy/aquantia.c +++ b/drivers/net/phy/aquantia.c @@ -25,15 +25,10 @@ #define PHY_ID_AQR107 0x03a1b4e0 #define PHY_ID_AQR405 0x03a1b4b0 -#define PHY_AQUANTIA_FEATURES (SUPPORTED_10000baseT_Full | \ - SUPPORTED_1000baseT_Full | \ - SUPPORTED_100baseT_Full | \ - PHY_DEFAULT_FEATURES) - static int aquantia_config_aneg(struct phy_device *phydev) { - phydev->supported = PHY_AQUANTIA_FEATURES; - phydev->advertising = phydev->supported; + linkmode_copy(phydev->supported, phy_10gbit_features); + linkmode_copy(phydev->advertising, phydev->supported); return 0; } @@ -116,7 +111,6 @@ static struct phy_driver aquantia_driver[] = { .phy_id_mask = 0xfffffff0, .name = "Aquantia AQ1202", .features = PHY_10GBIT_FULL_FEATURES, - .flags = PHY_HAS_INTERRUPT, .aneg_done = genphy_c45_aneg_done, .config_aneg = aquantia_config_aneg, .config_intr = aquantia_config_intr, @@ -128,7 +122,6 @@ static struct phy_driver aquantia_driver[] = { .phy_id_mask = 0xfffffff0, .name = "Aquantia AQ2104", .features = PHY_10GBIT_FULL_FEATURES, - .flags = PHY_HAS_INTERRUPT, .aneg_done = genphy_c45_aneg_done, .config_aneg = aquantia_config_aneg, .config_intr = aquantia_config_intr, @@ -140,7 +133,6 @@ static struct phy_driver aquantia_driver[] = { .phy_id_mask = 0xfffffff0, .name = "Aquantia AQR105", .features = PHY_10GBIT_FULL_FEATURES, - .flags = PHY_HAS_INTERRUPT, .aneg_done = genphy_c45_aneg_done, .config_aneg = aquantia_config_aneg, .config_intr = aquantia_config_intr, @@ -152,7 +144,6 @@ static struct phy_driver aquantia_driver[] = { .phy_id_mask = 0xfffffff0, .name = "Aquantia AQR106", .features = PHY_10GBIT_FULL_FEATURES, - .flags = PHY_HAS_INTERRUPT, .aneg_done = genphy_c45_aneg_done, .config_aneg = aquantia_config_aneg, .config_intr = aquantia_config_intr, @@ -164,7 +155,6 @@ static struct phy_driver aquantia_driver[] = { .phy_id_mask = 0xfffffff0, .name = "Aquantia AQR107", .features = PHY_10GBIT_FULL_FEATURES, - .flags = PHY_HAS_INTERRUPT, .aneg_done = genphy_c45_aneg_done, .config_aneg = aquantia_config_aneg, .config_intr = aquantia_config_intr, @@ -176,7 +166,6 @@ static struct phy_driver aquantia_driver[] = { .phy_id_mask = 0xfffffff0, .name = "Aquantia AQR405", .features = PHY_10GBIT_FULL_FEATURES, - .flags = PHY_HAS_INTERRUPT, .aneg_done = genphy_c45_aneg_done, .config_aneg = aquantia_config_aneg, .config_intr = aquantia_config_intr, diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index e74a047a846e..f9432d053a22 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -379,7 +379,6 @@ static struct phy_driver at803x_driver[] = { .suspend = at803x_suspend, .resume = at803x_resume, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .ack_interrupt = at803x_ack_interrupt, .config_intr = at803x_config_intr, }, { @@ -395,7 +394,6 @@ static struct phy_driver at803x_driver[] = { .suspend = at803x_suspend, .resume = at803x_resume, .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .ack_interrupt = at803x_ack_interrupt, .config_intr = at803x_config_intr, }, { @@ -410,7 +408,6 @@ static struct phy_driver at803x_driver[] = { .suspend = at803x_suspend, .resume = at803x_resume, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .aneg_done = at803x_aneg_done, .ack_interrupt = &at803x_ack_interrupt, .config_intr = &at803x_config_intr, diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c index d95bffdec4c1..a88dd14a25c0 100644 --- a/drivers/net/phy/bcm63xx.c +++ b/drivers/net/phy/bcm63xx.c @@ -43,7 +43,7 @@ static int bcm63xx_config_init(struct phy_device *phydev) int reg, err; /* ASYM_PAUSE bit is marked RO in datasheet, so don't cheat */ - phydev->supported |= SUPPORTED_Pause; + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported); reg = phy_read(phydev, MII_BCM63XX_IR); if (reg < 0) @@ -69,7 +69,7 @@ static struct phy_driver bcm63xx_driver[] = { .phy_id_mask = 0xfffffc00, .name = "Broadcom BCM63XX (1)", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT | PHY_IS_INTERNAL, + .flags = PHY_IS_INTERNAL, .config_init = bcm63xx_config_init, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm63xx_config_intr, @@ -78,7 +78,7 @@ static struct phy_driver bcm63xx_driver[] = { .phy_id = 0x002bdc00, .phy_id_mask = 0xfffffc00, .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT | PHY_IS_INTERNAL, + .flags = PHY_IS_INTERNAL, .config_init = bcm63xx_config_init, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm63xx_config_intr, diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c index b2b6307d64a4..712224cc442d 100644 --- a/drivers/net/phy/bcm7xxx.c +++ b/drivers/net/phy/bcm7xxx.c @@ -650,6 +650,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev) static struct phy_driver bcm7xxx_driver[] = { BCM7XXX_28NM_GPHY(PHY_ID_BCM7250, "Broadcom BCM7250"), + BCM7XXX_28NM_EPHY(PHY_ID_BCM7255, "Broadcom BCM7255"), BCM7XXX_28NM_EPHY(PHY_ID_BCM7260, "Broadcom BCM7260"), BCM7XXX_28NM_EPHY(PHY_ID_BCM7268, "Broadcom BCM7268"), BCM7XXX_28NM_EPHY(PHY_ID_BCM7271, "Broadcom BCM7271"), @@ -670,6 +671,7 @@ static struct phy_driver bcm7xxx_driver[] = { static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = { { PHY_ID_BCM7250, 0xfffffff0, }, + { PHY_ID_BCM7255, 0xfffffff0, }, { PHY_ID_BCM7260, 0xfffffff0, }, { PHY_ID_BCM7268, 0xfffffff0, }, { PHY_ID_BCM7271, 0xfffffff0, }, diff --git a/drivers/net/phy/bcm87xx.c b/drivers/net/phy/bcm87xx.c index f7ebdcff53e4..1b350183bffb 100644 --- a/drivers/net/phy/bcm87xx.c +++ b/drivers/net/phy/bcm87xx.c @@ -86,8 +86,12 @@ static int bcm87xx_of_reg_init(struct phy_device *phydev) static int bcm87xx_config_init(struct phy_device *phydev) { - phydev->supported = SUPPORTED_10000baseR_FEC; - phydev->advertising = ADVERTISED_10000baseR_FEC; + linkmode_zero(phydev->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, + phydev->supported); + linkmode_zero(phydev->advertising); + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, + phydev->advertising); phydev->state = PHY_NOLINK; phydev->autoneg = AUTONEG_DISABLE; @@ -193,7 +197,6 @@ static struct phy_driver bcm87xx_driver[] = { .phy_id = PHY_ID_BCM8706, .phy_id_mask = 0xffffffff, .name = "Broadcom BCM8706", - .flags = PHY_HAS_INTERRUPT, .config_init = bcm87xx_config_init, .config_aneg = bcm87xx_config_aneg, .read_status = bcm87xx_read_status, @@ -205,7 +208,6 @@ static struct phy_driver bcm87xx_driver[] = { .phy_id = PHY_ID_BCM8727, .phy_id_mask = 0xffffffff, .name = "Broadcom BCM8727", - .flags = PHY_HAS_INTERRUPT, .config_init = bcm87xx_config_init, .config_aneg = bcm87xx_config_aneg, .read_status = bcm87xx_read_status, diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index 704537010453..aa73c5cc5f86 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -602,7 +602,6 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5411", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, @@ -611,7 +610,6 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5421", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, @@ -620,7 +618,6 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM54210E", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, @@ -629,7 +626,6 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5461", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, @@ -638,7 +634,6 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM54612E", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, @@ -647,7 +642,6 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM54616S", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, .config_aneg = bcm54616s_config_aneg, .ack_interrupt = bcm_phy_ack_intr, @@ -657,7 +651,6 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5464", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, @@ -666,7 +659,6 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5481", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, .config_aneg = bcm5481_config_aneg, .ack_interrupt = bcm_phy_ack_intr, @@ -676,7 +668,6 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM54810", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, .config_aneg = bcm5481_config_aneg, .ack_interrupt = bcm_phy_ack_intr, @@ -686,7 +677,6 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5482", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = bcm5482_config_init, .read_status = bcm5482_read_status, .ack_interrupt = bcm_phy_ack_intr, @@ -696,7 +686,6 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM50610", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, @@ -705,7 +694,6 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM50610M", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, @@ -714,7 +702,6 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM57780", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, @@ -723,7 +710,6 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCMAC131", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = brcm_fet_config_init, .ack_interrupt = brcm_fet_ack_interrupt, .config_intr = brcm_fet_config_intr, @@ -732,7 +718,6 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM5241", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = brcm_fet_config_init, .ack_interrupt = brcm_fet_ack_interrupt, .config_intr = brcm_fet_config_intr, @@ -751,7 +736,6 @@ static struct phy_driver broadcom_drivers[] = { .phy_id_mask = 0xfffffff0, .name = "Broadcom BCM89610", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = bcm54xx_config_init, .ack_interrupt = bcm_phy_ack_intr, .config_intr = bcm_phy_config_intr, diff --git a/drivers/net/phy/cicada.c b/drivers/net/phy/cicada.c index c05af00bf4b6..fea61c81bda9 100644 --- a/drivers/net/phy/cicada.c +++ b/drivers/net/phy/cicada.c @@ -108,7 +108,6 @@ static struct phy_driver cis820x_driver[] = { .name = "Cicada Cis8201", .phy_id_mask = 0x000ffff0, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = &cis820x_config_init, .ack_interrupt = &cis820x_ack_interrupt, .config_intr = &cis820x_config_intr, @@ -117,7 +116,6 @@ static struct phy_driver cis820x_driver[] = { .name = "Cicada Cis8204", .phy_id_mask = 0x000fffc0, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = &cis820x_config_init, .ack_interrupt = &cis820x_ack_interrupt, .config_intr = &cis820x_config_intr, diff --git a/drivers/net/phy/davicom.c b/drivers/net/phy/davicom.c index 5ee99b3b428c..97162008f42b 100644 --- a/drivers/net/phy/davicom.c +++ b/drivers/net/phy/davicom.c @@ -150,7 +150,6 @@ static struct phy_driver dm91xx_driver[] = { .name = "Davicom DM9161E", .phy_id_mask = 0x0ffffff0, .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = dm9161_config_init, .config_aneg = dm9161_config_aneg, .ack_interrupt = dm9161_ack_interrupt, @@ -160,7 +159,6 @@ static struct phy_driver dm91xx_driver[] = { .name = "Davicom DM9161B/C", .phy_id_mask = 0x0ffffff0, .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = dm9161_config_init, .config_aneg = dm9161_config_aneg, .ack_interrupt = dm9161_ack_interrupt, @@ -170,7 +168,6 @@ static struct phy_driver dm91xx_driver[] = { .name = "Davicom DM9161A", .phy_id_mask = 0x0ffffff0, .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = dm9161_config_init, .config_aneg = dm9161_config_aneg, .ack_interrupt = dm9161_ack_interrupt, @@ -180,7 +177,6 @@ static struct phy_driver dm91xx_driver[] = { .name = "Davicom DM9131", .phy_id_mask = 0x0ffffff0, .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .ack_interrupt = dm9161_ack_interrupt, .config_intr = dm9161_config_intr, } }; diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c index edd4d44a386d..18b41bc345ab 100644 --- a/drivers/net/phy/dp83640.c +++ b/drivers/net/phy/dp83640.c @@ -1521,7 +1521,6 @@ static struct phy_driver dp83640_driver = { .phy_id_mask = 0xfffffff0, .name = "NatSemi DP83640", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = dp83640_probe, .remove = dp83640_remove, .soft_reset = dp83640_soft_reset, diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c index 6e8a2a4f3a6e..24c7f149f3e6 100644 --- a/drivers/net/phy/dp83822.c +++ b/drivers/net/phy/dp83822.c @@ -318,7 +318,6 @@ static struct phy_driver dp83822_driver[] = { .phy_id_mask = 0xfffffff0, .name = "TI DP83822", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = dp83822_config_init, .soft_reset = dp83822_phy_reset, .get_wol = dp83822_get_wol, diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c index 6e8e42361fd5..a6b55909d1dc 100644 --- a/drivers/net/phy/dp83848.c +++ b/drivers/net/phy/dp83848.c @@ -108,7 +108,6 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl); .phy_id_mask = 0xfffffff0, \ .name = _name, \ .features = PHY_BASIC_FEATURES, \ - .flags = PHY_HAS_INTERRUPT, \ \ .soft_reset = genphy_soft_reset, \ .config_init = _config_init, \ diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index b3935778b19f..da6a67d47ce9 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -334,7 +334,6 @@ static struct phy_driver dp83867_driver[] = { .phy_id_mask = 0xfffffff0, .name = "TI DP83867", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = dp83867_config_init, .soft_reset = dp83867_phy_reset, diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c index 78cad134a79e..da13356999e5 100644 --- a/drivers/net/phy/dp83tc811.c +++ b/drivers/net/phy/dp83tc811.c @@ -346,7 +346,6 @@ static struct phy_driver dp83811_driver[] = { .phy_id_mask = 0xfffffff0, .name = "TI DP83TC811", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = dp83811_config_init, .config_aneg = dp83811_config_aneg, .soft_reset = dp83811_phy_reset, diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c index 67b260877f30..72d43c88e6ff 100644 --- a/drivers/net/phy/fixed_phy.c +++ b/drivers/net/phy/fixed_phy.c @@ -25,6 +25,7 @@ #include <linux/gpio.h> #include <linux/seqlock.h> #include <linux/idr.h> +#include <linux/netdevice.h> #include "swphy.h" @@ -38,6 +39,7 @@ struct fixed_phy { struct phy_device *phydev; seqcount_t seqcount; struct fixed_phy_status status; + bool no_carrier; int (*link_update)(struct net_device *, struct fixed_phy_status *); struct list_head node; int link_gpio; @@ -48,9 +50,28 @@ static struct fixed_mdio_bus platform_fmb = { .phys = LIST_HEAD_INIT(platform_fmb.phys), }; +int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier) +{ + struct fixed_mdio_bus *fmb = &platform_fmb; + struct phy_device *phydev = dev->phydev; + struct fixed_phy *fp; + + if (!phydev || !phydev->mdio.bus) + return -EINVAL; + + list_for_each_entry(fp, &fmb->phys, node) { + if (fp->addr == phydev->mdio.addr) { + fp->no_carrier = !new_carrier; + return 0; + } + } + return -EINVAL; +} +EXPORT_SYMBOL_GPL(fixed_phy_change_carrier); + static void fixed_phy_update(struct fixed_phy *fp) { - if (gpio_is_valid(fp->link_gpio)) + if (!fp->no_carrier && gpio_is_valid(fp->link_gpio)) fp->status.link = !!gpio_get_value_cansleep(fp->link_gpio); } @@ -66,6 +87,7 @@ static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num) do { s = read_seqcount_begin(&fp->seqcount); + fp->status.link = !fp->no_carrier; /* Issue callback if user registered it. */ if (fp->link_update) { fp->link_update(fp->phydev->attached_dev, @@ -223,14 +245,23 @@ struct phy_device *fixed_phy_register(unsigned int irq, switch (status->speed) { case SPEED_1000: - phy->supported = PHY_1000BT_FEATURES; - break; + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + phy->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + phy->supported); + /* fall through */ case SPEED_100: - phy->supported = PHY_100BT_FEATURES; - break; + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, + phy->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + phy->supported); + /* fall through */ case SPEED_10: default: - phy->supported = PHY_10BT_FEATURES; + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, + phy->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, + phy->supported); } ret = phy_device_register(phy); diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c index 791587a49215..7d5938b87660 100644 --- a/drivers/net/phy/icplus.c +++ b/drivers/net/phy/icplus.c @@ -25,6 +25,7 @@ #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/phy.h> +#include <linux/property.h> #include <asm/io.h> #include <asm/irq.h> @@ -36,14 +37,34 @@ MODULE_LICENSE("GPL"); /* IP101A/G - IP1001 */ #define IP10XX_SPEC_CTRL_STATUS 16 /* Spec. Control Register */ -#define IP1001_RXPHASE_SEL (1<<0) /* Add delay on RX_CLK */ -#define IP1001_TXPHASE_SEL (1<<1) /* Add delay on TX_CLK */ +#define IP1001_RXPHASE_SEL BIT(0) /* Add delay on RX_CLK */ +#define IP1001_TXPHASE_SEL BIT(1) /* Add delay on TX_CLK */ #define IP1001_SPEC_CTRL_STATUS_2 20 /* IP1001 Spec. Control Reg 2 */ #define IP1001_APS_ON 11 /* IP1001 APS Mode bit */ -#define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */ +#define IP101A_G_APS_ON BIT(1) /* IP101A/G APS Mode bit */ #define IP101A_G_IRQ_CONF_STATUS 0x11 /* Conf Info IRQ & Status Reg */ -#define IP101A_G_IRQ_PIN_USED (1<<15) /* INTR pin used */ -#define IP101A_G_IRQ_DEFAULT IP101A_G_IRQ_PIN_USED +#define IP101A_G_IRQ_PIN_USED BIT(15) /* INTR pin used */ +#define IP101A_G_IRQ_ALL_MASK BIT(11) /* IRQ's inactive */ +#define IP101A_G_IRQ_SPEED_CHANGE BIT(2) +#define IP101A_G_IRQ_DUPLEX_CHANGE BIT(1) +#define IP101A_G_IRQ_LINK_CHANGE BIT(0) + +#define IP101G_DIGITAL_IO_SPEC_CTRL 0x1d +#define IP101G_DIGITAL_IO_SPEC_CTRL_SEL_INTR32 BIT(2) + +/* The 32-pin IP101GR package can re-configure the mode of the RXER/INTR_32 pin + * (pin number 21). The hardware default is RXER (receive error) mode. But it + * can be configured to interrupt mode manually. + */ +enum ip101gr_sel_intr32 { + IP101GR_SEL_INTR32_KEEP, + IP101GR_SEL_INTR32_INTR, + IP101GR_SEL_INTR32_RXER, +}; + +struct ip101a_g_phy_priv { + enum ip101gr_sel_intr32 sel_intr32; +}; static int ip175c_config_init(struct phy_device *phydev) { @@ -162,18 +183,92 @@ static int ip1001_config_init(struct phy_device *phydev) return 0; } +static int ip175c_read_status(struct phy_device *phydev) +{ + if (phydev->mdio.addr == 4) /* WAN port */ + genphy_read_status(phydev); + else + /* Don't need to read status for switch ports */ + phydev->irq = PHY_IGNORE_INTERRUPT; + + return 0; +} + +static int ip175c_config_aneg(struct phy_device *phydev) +{ + if (phydev->mdio.addr == 4) /* WAN port */ + genphy_config_aneg(phydev); + + return 0; +} + +static int ip101a_g_probe(struct phy_device *phydev) +{ + struct device *dev = &phydev->mdio.dev; + struct ip101a_g_phy_priv *priv; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + /* Both functions (RX error and interrupt status) are sharing the same + * pin on the 32-pin IP101GR, so this is an exclusive choice. + */ + if (device_property_read_bool(dev, "icplus,select-rx-error") && + device_property_read_bool(dev, "icplus,select-interrupt")) { + dev_err(dev, + "RXER and INTR mode cannot be selected together\n"); + return -EINVAL; + } + + if (device_property_read_bool(dev, "icplus,select-rx-error")) + priv->sel_intr32 = IP101GR_SEL_INTR32_RXER; + else if (device_property_read_bool(dev, "icplus,select-interrupt")) + priv->sel_intr32 = IP101GR_SEL_INTR32_INTR; + else + priv->sel_intr32 = IP101GR_SEL_INTR32_KEEP; + + phydev->priv = priv; + + return 0; +} + static int ip101a_g_config_init(struct phy_device *phydev) { - int c; + struct ip101a_g_phy_priv *priv = phydev->priv; + int err, c; c = ip1xx_reset(phydev); if (c < 0) return c; - /* INTR pin used: speed/link/duplex will cause an interrupt */ - c = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, IP101A_G_IRQ_DEFAULT); - if (c < 0) - return c; + /* configure the RXER/INTR_32 pin of the 32-pin IP101GR if needed: */ + switch (priv->sel_intr32) { + case IP101GR_SEL_INTR32_RXER: + err = phy_modify(phydev, IP101G_DIGITAL_IO_SPEC_CTRL, + IP101G_DIGITAL_IO_SPEC_CTRL_SEL_INTR32, 0); + if (err < 0) + return err; + break; + + case IP101GR_SEL_INTR32_INTR: + err = phy_modify(phydev, IP101G_DIGITAL_IO_SPEC_CTRL, + IP101G_DIGITAL_IO_SPEC_CTRL_SEL_INTR32, + IP101G_DIGITAL_IO_SPEC_CTRL_SEL_INTR32); + if (err < 0) + return err; + break; + + default: + /* Don't touch IP101G_DIGITAL_IO_SPEC_CTRL because it's not + * documented on IP101A and it's not clear whether this would + * cause problems. + * For the 32-pin IP101GR we simply keep the SEL_INTR32 + * configuration as set by the bootloader when not configured + * to one of the special functions. + */ + break; + } /* Enable Auto Power Saving mode */ c = phy_read(phydev, IP10XX_SPEC_CTRL_STATUS); @@ -182,23 +277,29 @@ static int ip101a_g_config_init(struct phy_device *phydev) return phy_write(phydev, IP10XX_SPEC_CTRL_STATUS, c); } -static int ip175c_read_status(struct phy_device *phydev) +static int ip101a_g_config_intr(struct phy_device *phydev) { - if (phydev->mdio.addr == 4) /* WAN port */ - genphy_read_status(phydev); + u16 val; + + if (phydev->interrupts == PHY_INTERRUPT_ENABLED) + /* INTR pin used: Speed/link/duplex will cause an interrupt */ + val = IP101A_G_IRQ_PIN_USED; else - /* Don't need to read status for switch ports */ - phydev->irq = PHY_IGNORE_INTERRUPT; + val = IP101A_G_IRQ_ALL_MASK; - return 0; + return phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, val); } -static int ip175c_config_aneg(struct phy_device *phydev) +static int ip101a_g_did_interrupt(struct phy_device *phydev) { - if (phydev->mdio.addr == 4) /* WAN port */ - genphy_config_aneg(phydev); + int val = phy_read(phydev, IP101A_G_IRQ_CONF_STATUS); - return 0; + if (val < 0) + return 0; + + return val & (IP101A_G_IRQ_SPEED_CHANGE | + IP101A_G_IRQ_DUPLEX_CHANGE | + IP101A_G_IRQ_LINK_CHANGE); } static int ip101a_g_ack_interrupt(struct phy_device *phydev) @@ -234,7 +335,9 @@ static struct phy_driver icplus_driver[] = { .name = "ICPlus IP101A/G", .phy_id_mask = 0x0ffffff0, .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, + .probe = ip101a_g_probe, + .config_intr = ip101a_g_config_intr, + .did_interrupt = ip101a_g_did_interrupt, .ack_interrupt = ip101a_g_ack_interrupt, .config_init = &ip101a_g_config_init, .suspend = genphy_suspend, diff --git a/drivers/net/phy/intel-xway.c b/drivers/net/phy/intel-xway.c index 7d936fb61c22..fc0f5024a29e 100644 --- a/drivers/net/phy/intel-xway.c +++ b/drivers/net/phy/intel-xway.c @@ -242,7 +242,6 @@ static struct phy_driver xway_gphy[] = { .phy_id_mask = 0xffffffff, .name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.3", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = xway_gphy_config_init, .config_aneg = xway_gphy14_config_aneg, .ack_interrupt = xway_gphy_ack_interrupt, @@ -255,7 +254,6 @@ static struct phy_driver xway_gphy[] = { .phy_id_mask = 0xffffffff, .name = "Intel XWAY PHY22F (PEF 7061) v1.3", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = xway_gphy_config_init, .config_aneg = xway_gphy14_config_aneg, .ack_interrupt = xway_gphy_ack_interrupt, @@ -268,7 +266,6 @@ static struct phy_driver xway_gphy[] = { .phy_id_mask = 0xffffffff, .name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.4", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = xway_gphy_config_init, .config_aneg = xway_gphy14_config_aneg, .ack_interrupt = xway_gphy_ack_interrupt, @@ -281,7 +278,6 @@ static struct phy_driver xway_gphy[] = { .phy_id_mask = 0xffffffff, .name = "Intel XWAY PHY22F (PEF 7061) v1.4", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = xway_gphy_config_init, .config_aneg = xway_gphy14_config_aneg, .ack_interrupt = xway_gphy_ack_interrupt, @@ -294,7 +290,6 @@ static struct phy_driver xway_gphy[] = { .phy_id_mask = 0xffffffff, .name = "Intel XWAY PHY11G (PEF 7071/PEF 7072) v1.5 / v1.6", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = xway_gphy_config_init, .ack_interrupt = xway_gphy_ack_interrupt, .did_interrupt = xway_gphy_did_interrupt, @@ -306,7 +301,6 @@ static struct phy_driver xway_gphy[] = { .phy_id_mask = 0xffffffff, .name = "Intel XWAY PHY22F (PEF 7061) v1.5 / v1.6", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = xway_gphy_config_init, .ack_interrupt = xway_gphy_ack_interrupt, .did_interrupt = xway_gphy_did_interrupt, @@ -318,7 +312,6 @@ static struct phy_driver xway_gphy[] = { .phy_id_mask = 0xffffffff, .name = "Intel XWAY PHY11G (xRX v1.1 integrated)", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = xway_gphy_config_init, .ack_interrupt = xway_gphy_ack_interrupt, .did_interrupt = xway_gphy_did_interrupt, @@ -330,7 +323,6 @@ static struct phy_driver xway_gphy[] = { .phy_id_mask = 0xffffffff, .name = "Intel XWAY PHY22F (xRX v1.1 integrated)", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = xway_gphy_config_init, .ack_interrupt = xway_gphy_ack_interrupt, .did_interrupt = xway_gphy_did_interrupt, @@ -342,7 +334,6 @@ static struct phy_driver xway_gphy[] = { .phy_id_mask = 0xffffffff, .name = "Intel XWAY PHY11G (xRX v1.2 integrated)", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = xway_gphy_config_init, .ack_interrupt = xway_gphy_ack_interrupt, .did_interrupt = xway_gphy_did_interrupt, @@ -354,7 +345,6 @@ static struct phy_driver xway_gphy[] = { .phy_id_mask = 0xffffffff, .name = "Intel XWAY PHY22F (xRX v1.2 integrated)", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = xway_gphy_config_init, .ack_interrupt = xway_gphy_ack_interrupt, .did_interrupt = xway_gphy_did_interrupt, diff --git a/drivers/net/phy/lxt.c b/drivers/net/phy/lxt.c index c14b254b2879..c8bb29ae1a2a 100644 --- a/drivers/net/phy/lxt.c +++ b/drivers/net/phy/lxt.c @@ -177,7 +177,7 @@ static int lxt973a2_read_status(struct phy_device *phydev) */ } while (lpa == adv && retry--); - phydev->lp_advertising = mii_lpa_to_ethtool_lpa_t(lpa); + mii_lpa_to_linkmode_lpa_t(phydev->lp_advertising, lpa); lpa &= adv; @@ -218,7 +218,7 @@ static int lxt973a2_read_status(struct phy_device *phydev) phydev->speed = SPEED_10; phydev->pause = phydev->asym_pause = 0; - phydev->lp_advertising = 0; + linkmode_zero(phydev->lp_advertising); } return 0; @@ -257,7 +257,6 @@ static struct phy_driver lxt97x_driver[] = { .name = "LXT970", .phy_id_mask = 0xfffffff0, .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = lxt970_config_init, .ack_interrupt = lxt970_ack_interrupt, .config_intr = lxt970_config_intr, @@ -266,7 +265,6 @@ static struct phy_driver lxt97x_driver[] = { .name = "LXT971", .phy_id_mask = 0xfffffff0, .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .ack_interrupt = lxt971_ack_interrupt, .config_intr = lxt971_config_intr, }, { diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index cbec296107bd..a9c7c7f41b0c 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -491,25 +491,26 @@ static int m88e1318_config_aneg(struct phy_device *phydev) } /** - * ethtool_adv_to_fiber_adv_t - * @ethadv: the ethtool advertisement settings + * linkmode_adv_to_fiber_adv_t + * @advertise: the linkmode advertisement settings * - * A small helper function that translates ethtool advertisement - * settings to phy autonegotiation advertisements for the - * MII_ADV register for fiber link. + * A small helper function that translates linkmode advertisement + * settings to phy autonegotiation advertisements for the MII_ADV + * register for fiber link. */ -static inline u32 ethtool_adv_to_fiber_adv_t(u32 ethadv) +static inline u32 linkmode_adv_to_fiber_adv_t(unsigned long *advertise) { u32 result = 0; - if (ethadv & ADVERTISED_1000baseT_Half) + if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, advertise)) result |= ADVERTISE_FIBER_1000HALF; - if (ethadv & ADVERTISED_1000baseT_Full) + if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, advertise)) result |= ADVERTISE_FIBER_1000FULL; - if ((ethadv & ADVERTISE_PAUSE_ASYM) && (ethadv & ADVERTISE_PAUSE_CAP)) + if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, advertise) && + linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertise)) result |= LPA_PAUSE_ASYM_FIBER; - else if (ethadv & ADVERTISE_PAUSE_CAP) + else if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, advertise)) result |= (ADVERTISE_PAUSE_FIBER & (~ADVERTISE_PAUSE_ASYM_FIBER)); @@ -530,14 +531,13 @@ static int marvell_config_aneg_fiber(struct phy_device *phydev) int changed = 0; int err; int adv, oldadv; - u32 advertise; if (phydev->autoneg != AUTONEG_ENABLE) return genphy_setup_forced(phydev); /* Only allow advertising what this PHY supports */ - phydev->advertising &= phydev->supported; - advertise = phydev->advertising; + linkmode_and(phydev->advertising, phydev->advertising, + phydev->supported); /* Setup fiber advertisement */ adv = phy_read(phydev, MII_ADVERTISE); @@ -547,7 +547,7 @@ static int marvell_config_aneg_fiber(struct phy_device *phydev) oldadv = adv; adv &= ~(ADVERTISE_FIBER_1000HALF | ADVERTISE_FIBER_1000FULL | LPA_PAUSE_FIBER); - adv |= ethtool_adv_to_fiber_adv_t(advertise); + adv |= linkmode_adv_to_fiber_adv_t(phydev->advertising); if (adv != oldadv) { err = phy_write(phydev, MII_ADVERTISE, adv); @@ -847,7 +847,6 @@ static int m88e1510_config_init(struct phy_device *phydev) /* SGMII-to-Copper mode initialization */ if (phydev->interface == PHY_INTERFACE_MODE_SGMII) { - u32 pause; /* Select page 18 */ err = marvell_set_page(phydev, 18); @@ -878,9 +877,14 @@ static int m88e1510_config_init(struct phy_device *phydev) * This means we can never be truely sure what was advertised, * so disable Pause support. */ - pause = SUPPORTED_Pause | SUPPORTED_Asym_Pause; - phydev->supported &= ~pause; - phydev->advertising &= ~pause; + linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + phydev->supported); + linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, + phydev->supported); + linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + phydev->advertising); + linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, + phydev->advertising); } return m88e1318_config_init(phydev); @@ -1043,22 +1047,21 @@ static int m88e1145_config_init(struct phy_device *phydev) } /** - * fiber_lpa_to_ethtool_lpa_t + * fiber_lpa_mod_linkmode_lpa_t + * @advertising: the linkmode advertisement settings * @lpa: value of the MII_LPA register for fiber link * - * A small helper function that translates MII_LPA - * bits to ethtool LP advertisement settings. + * A small helper function that translates MII_LPA bits to linkmode LP + * advertisement settings. Other bits in advertising are left + * unchanged. */ -static u32 fiber_lpa_to_ethtool_lpa_t(u32 lpa) +static void fiber_lpa_mod_linkmode_lpa_t(unsigned long *advertising, u32 lpa) { - u32 result = 0; - - if (lpa & LPA_FIBER_1000HALF) - result |= ADVERTISED_1000baseT_Half; - if (lpa & LPA_FIBER_1000FULL) - result |= ADVERTISED_1000baseT_Full; + linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + advertising, lpa & LPA_FIBER_1000HALF); - return result; + linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + advertising, lpa & LPA_FIBER_1000FULL); } /** @@ -1134,9 +1137,8 @@ static int marvell_read_status_page_an(struct phy_device *phydev, } if (!fiber) { - phydev->lp_advertising = - mii_stat1000_to_ethtool_lpa_t(lpagb) | - mii_lpa_to_ethtool_lpa_t(lpa); + mii_lpa_to_linkmode_lpa_t(phydev->lp_advertising, lpa); + mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, lpagb); if (phydev->duplex == DUPLEX_FULL) { phydev->pause = lpa & LPA_PAUSE_CAP ? 1 : 0; @@ -1144,7 +1146,7 @@ static int marvell_read_status_page_an(struct phy_device *phydev, } } else { /* The fiber link is only 1000M capable */ - phydev->lp_advertising = fiber_lpa_to_ethtool_lpa_t(lpa); + fiber_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, lpa); if (phydev->duplex == DUPLEX_FULL) { if (!(lpa & LPA_PAUSE_FIBER)) { @@ -1183,7 +1185,7 @@ static int marvell_read_status_page_fixed(struct phy_device *phydev) phydev->pause = 0; phydev->asym_pause = 0; - phydev->lp_advertising = 0; + linkmode_zero(phydev->lp_advertising); return 0; } @@ -1235,7 +1237,8 @@ static int marvell_read_status(struct phy_device *phydev) int err; /* Check the fiber mode first */ - if (phydev->supported & SUPPORTED_FIBRE && + if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, + phydev->supported) && phydev->interface != PHY_INTERFACE_MODE_SGMII) { err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE); if (err < 0) @@ -1278,7 +1281,8 @@ static int marvell_suspend(struct phy_device *phydev) int err; /* Suspend the fiber mode first */ - if (!(phydev->supported & SUPPORTED_FIBRE)) { + if (!linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, + phydev->supported)) { err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE); if (err < 0) goto error; @@ -1312,7 +1316,8 @@ static int marvell_resume(struct phy_device *phydev) int err; /* Resume the fiber mode first */ - if (!(phydev->supported & SUPPORTED_FIBRE)) { + if (!linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, + phydev->supported)) { err = marvell_set_page(phydev, MII_MARVELL_FIBER_PAGE); if (err < 0) goto error; @@ -1463,7 +1468,8 @@ error: static int marvell_get_sset_count(struct phy_device *phydev) { - if (phydev->supported & SUPPORTED_FIBRE) + if (linkmode_test_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, + phydev->supported)) return ARRAY_SIZE(marvell_hw_stats); else return ARRAY_SIZE(marvell_hw_stats) - NB_FIBER_STATS; @@ -2005,7 +2011,6 @@ static struct phy_driver marvell_drivers[] = { .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1101", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = marvell_probe, .config_init = &marvell_config_init, .config_aneg = &m88e1101_config_aneg, @@ -2024,7 +2029,6 @@ static struct phy_driver marvell_drivers[] = { .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1112", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = marvell_probe, .config_init = &m88e1111_config_init, .config_aneg = &marvell_config_aneg, @@ -2043,7 +2047,6 @@ static struct phy_driver marvell_drivers[] = { .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1111", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = marvell_probe, .config_init = &m88e1111_config_init, .config_aneg = &marvell_config_aneg, @@ -2063,7 +2066,6 @@ static struct phy_driver marvell_drivers[] = { .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1118", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = marvell_probe, .config_init = &m88e1118_config_init, .config_aneg = &m88e1118_config_aneg, @@ -2082,7 +2084,6 @@ static struct phy_driver marvell_drivers[] = { .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1121R", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = &m88e1121_probe, .config_init = &marvell_config_init, .config_aneg = &m88e1121_config_aneg, @@ -2103,7 +2104,6 @@ static struct phy_driver marvell_drivers[] = { .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1318S", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = marvell_probe, .config_init = &m88e1318_config_init, .config_aneg = &m88e1318_config_aneg, @@ -2126,7 +2126,6 @@ static struct phy_driver marvell_drivers[] = { .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1145", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = marvell_probe, .config_init = &m88e1145_config_init, .config_aneg = &m88e1101_config_aneg, @@ -2146,7 +2145,6 @@ static struct phy_driver marvell_drivers[] = { .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1149R", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = marvell_probe, .config_init = &m88e1149_config_init, .config_aneg = &m88e1118_config_aneg, @@ -2165,7 +2163,6 @@ static struct phy_driver marvell_drivers[] = { .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1240", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = marvell_probe, .config_init = &m88e1111_config_init, .config_aneg = &marvell_config_aneg, @@ -2184,7 +2181,6 @@ static struct phy_driver marvell_drivers[] = { .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1116R", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = marvell_probe, .config_init = &m88e1116r_config_init, .ack_interrupt = &marvell_ack_interrupt, @@ -2202,7 +2198,6 @@ static struct phy_driver marvell_drivers[] = { .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1510", .features = PHY_GBIT_FIBRE_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = &m88e1510_probe, .config_init = &m88e1510_config_init, .config_aneg = &m88e1510_config_aneg, @@ -2226,7 +2221,6 @@ static struct phy_driver marvell_drivers[] = { .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E1540", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = m88e1510_probe, .config_init = &marvell_config_init, .config_aneg = &m88e1510_config_aneg, @@ -2248,7 +2242,6 @@ static struct phy_driver marvell_drivers[] = { .name = "Marvell 88E1545", .probe = m88e1510_probe, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = &marvell_config_init, .config_aneg = &m88e1510_config_aneg, .read_status = &marvell_read_status, @@ -2268,7 +2261,6 @@ static struct phy_driver marvell_drivers[] = { .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E3016", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = marvell_probe, .config_init = &m88e3016_config_init, .aneg_done = &marvell_aneg_done, @@ -2289,7 +2281,6 @@ static struct phy_driver marvell_drivers[] = { .phy_id_mask = MARVELL_PHY_ID_MASK, .name = "Marvell 88E6390", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = m88e6390_probe, .config_init = &marvell_config_init, .config_aneg = &m88e1510_config_aneg, diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index 1c9d039eec63..82ab6ed3b74e 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c @@ -252,7 +252,6 @@ static int mv3310_resume(struct phy_device *phydev) static int mv3310_config_init(struct phy_device *phydev) { __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, }; - u32 mask; int val; /* Check that the PHY interface type is compatible */ @@ -336,13 +335,9 @@ static int mv3310_config_init(struct phy_device *phydev) } } - if (!ethtool_convert_link_mode_to_legacy_u32(&mask, supported)) - phydev_warn(phydev, - "PHY supports (%*pb) more modes than phylib supports, some modes not supported.\n", - __ETHTOOL_LINK_MODE_MASK_NBITS, supported); - - phydev->supported &= mask; - phydev->advertising &= phydev->supported; + linkmode_copy(phydev->supported, supported); + linkmode_and(phydev->advertising, phydev->advertising, + phydev->supported); return 0; } @@ -350,7 +345,7 @@ static int mv3310_config_init(struct phy_device *phydev) static int mv3310_config_aneg(struct phy_device *phydev) { bool changed = false; - u32 advertising; + u16 reg; int ret; /* We don't support manual MDI control */ @@ -364,31 +359,35 @@ static int mv3310_config_aneg(struct phy_device *phydev) return genphy_c45_an_disable_aneg(phydev); } - phydev->advertising &= phydev->supported; - advertising = phydev->advertising; + linkmode_and(phydev->advertising, phydev->advertising, + phydev->supported); ret = mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_ADVERTISE, ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM, - ethtool_adv_to_mii_adv_t(advertising)); + linkmode_adv_to_mii_adv_t(phydev->advertising)); if (ret < 0) return ret; if (ret > 0) changed = true; + reg = linkmode_adv_to_mii_ctrl1000_t(phydev->advertising); ret = mv3310_modify(phydev, MDIO_MMD_AN, MV_AN_CTRL1000, - ADVERTISE_1000FULL | ADVERTISE_1000HALF, - ethtool_adv_to_mii_ctrl1000_t(advertising)); + ADVERTISE_1000FULL | ADVERTISE_1000HALF, reg); if (ret < 0) return ret; if (ret > 0) changed = true; /* 10G control register */ + if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + phydev->advertising)) + reg = MDIO_AN_10GBT_CTRL_ADV10G; + else + reg = 0; + ret = mv3310_modify(phydev, MDIO_MMD_AN, MDIO_AN_10GBT_CTRL, - MDIO_AN_10GBT_CTRL_ADV10G, - advertising & ADVERTISED_10000baseT_Full ? - MDIO_AN_10GBT_CTRL_ADV10G : 0); + MDIO_AN_10GBT_CTRL_ADV10G, reg); if (ret < 0) return ret; if (ret > 0) @@ -458,7 +457,7 @@ static int mv3310_read_status(struct phy_device *phydev) phydev->speed = SPEED_UNKNOWN; phydev->duplex = DUPLEX_UNKNOWN; - phydev->lp_advertising = 0; + linkmode_zero(phydev->lp_advertising); phydev->link = 0; phydev->pause = 0; phydev->asym_pause = 0; @@ -491,7 +490,7 @@ static int mv3310_read_status(struct phy_device *phydev) if (val < 0) return val; - phydev->lp_advertising |= mii_stat1000_to_ethtool_lpa_t(val); + mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, val); if (phydev->autoneg == AUTONEG_ENABLE) phy_resolve_aneg_linkmode(phydev); diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c index 0fbcedcdf6e2..ea9a0e339778 100644 --- a/drivers/net/phy/mdio-gpio.c +++ b/drivers/net/phy/mdio-gpio.c @@ -24,6 +24,7 @@ #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/platform_device.h> +#include <linux/platform_data/mdio-gpio.h> #include <linux/mdio-bitbang.h> #include <linux/mdio-gpio.h> #include <linux/gpio/consumer.h> @@ -112,6 +113,7 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev, struct mdio_gpio_info *bitbang, int bus_id) { + struct mdio_gpio_platform_data *pdata = dev_get_platdata(dev); struct mii_bus *new_bus; bitbang->ctrl.ops = &mdio_gpio_ops; @@ -128,6 +130,11 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev, else strncpy(new_bus->id, "gpio", MII_BUS_ID_SIZE); + if (pdata) { + new_bus->phy_mask = pdata->phy_mask; + new_bus->phy_ignore_ta_mask = pdata->phy_ignore_ta_mask; + } + dev_set_drvdata(dev, new_bus); return new_bus; diff --git a/drivers/net/phy/meson-gxl.c b/drivers/net/phy/meson-gxl.c index ddc2c5ea3787..b03bcf2c388a 100644 --- a/drivers/net/phy/meson-gxl.c +++ b/drivers/net/phy/meson-gxl.c @@ -232,7 +232,7 @@ static struct phy_driver meson_gxl_phy[] = { .phy_id_mask = 0xfffffff0, .name = "Meson GXL Internal PHY", .features = PHY_BASIC_FEATURES, - .flags = PHY_IS_INTERNAL | PHY_HAS_INTERRUPT, + .flags = PHY_IS_INTERNAL, .config_init = meson_gxl_config_init, .aneg_done = genphy_aneg_done, .read_status = meson_gxl_read_status, diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 9265dea79412..c33384710d26 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -311,17 +311,22 @@ static int kszphy_config_init(struct phy_device *phydev) static int ksz8041_config_init(struct phy_device *phydev) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(mask) = { 0, }; + struct device_node *of_node = phydev->mdio.dev.of_node; /* Limit supported and advertised modes in fiber mode */ if (of_property_read_bool(of_node, "micrel,fiber-mode")) { phydev->dev_flags |= MICREL_PHY_FXEN; - phydev->supported &= SUPPORTED_100baseT_Full | - SUPPORTED_100baseT_Half; - phydev->supported |= SUPPORTED_FIBRE; - phydev->advertising &= ADVERTISED_100baseT_Full | - ADVERTISED_100baseT_Half; - phydev->advertising |= ADVERTISED_FIBRE; + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, mask); + + linkmode_and(phydev->supported, phydev->supported, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, + phydev->supported); + linkmode_and(phydev->advertising, phydev->advertising, mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT, + phydev->advertising); phydev->autoneg = AUTONEG_DISABLE; } @@ -918,7 +923,6 @@ static struct phy_driver ksphy_driver[] = { .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Micrel KS8737", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .driver_data = &ks8737_type, .config_init = kszphy_config_init, .ack_interrupt = kszphy_ack_interrupt, @@ -930,7 +934,6 @@ static struct phy_driver ksphy_driver[] = { .phy_id_mask = 0x00ffffff, .name = "Micrel KSZ8021 or KSZ8031", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .driver_data = &ksz8021_type, .probe = kszphy_probe, .config_init = kszphy_config_init, @@ -946,7 +949,6 @@ static struct phy_driver ksphy_driver[] = { .phy_id_mask = 0x00ffffff, .name = "Micrel KSZ8031", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .driver_data = &ksz8021_type, .probe = kszphy_probe, .config_init = kszphy_config_init, @@ -962,7 +964,6 @@ static struct phy_driver ksphy_driver[] = { .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Micrel KSZ8041", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .driver_data = &ksz8041_type, .probe = kszphy_probe, .config_init = ksz8041_config_init, @@ -979,7 +980,6 @@ static struct phy_driver ksphy_driver[] = { .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Micrel KSZ8041RNLI", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .driver_data = &ksz8041_type, .probe = kszphy_probe, .config_init = kszphy_config_init, @@ -995,7 +995,6 @@ static struct phy_driver ksphy_driver[] = { .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Micrel KSZ8051", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .driver_data = &ksz8051_type, .probe = kszphy_probe, .config_init = kszphy_config_init, @@ -1011,7 +1010,6 @@ static struct phy_driver ksphy_driver[] = { .name = "Micrel KSZ8001 or KS8721", .phy_id_mask = 0x00fffffc, .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .driver_data = &ksz8041_type, .probe = kszphy_probe, .config_init = kszphy_config_init, @@ -1027,7 +1025,6 @@ static struct phy_driver ksphy_driver[] = { .name = "Micrel KSZ8081 or KSZ8091", .phy_id_mask = MICREL_PHY_ID_MASK, .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .driver_data = &ksz8081_type, .probe = kszphy_probe, .config_init = kszphy_config_init, @@ -1043,7 +1040,6 @@ static struct phy_driver ksphy_driver[] = { .name = "Micrel KSZ8061", .phy_id_mask = MICREL_PHY_ID_MASK, .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = kszphy_config_init, .ack_interrupt = kszphy_ack_interrupt, .config_intr = kszphy_config_intr, @@ -1054,7 +1050,6 @@ static struct phy_driver ksphy_driver[] = { .phy_id_mask = 0x000ffffe, .name = "Micrel KSZ9021 Gigabit PHY", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .driver_data = &ksz9021_type, .probe = kszphy_probe, .config_init = ksz9021_config_init, @@ -1072,7 +1067,6 @@ static struct phy_driver ksphy_driver[] = { .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Micrel KSZ9031 Gigabit PHY", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .driver_data = &ksz9021_type, .probe = kszphy_probe, .config_init = ksz9031_config_init, @@ -1089,7 +1083,6 @@ static struct phy_driver ksphy_driver[] = { .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Microchip KSZ9131 Gigabit PHY", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .driver_data = &ksz9021_type, .probe = kszphy_probe, .config_init = ksz9131_config_init, @@ -1115,7 +1108,6 @@ static struct phy_driver ksphy_driver[] = { .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Micrel KSZ886X Switch", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = kszphy_config_init, .suspend = genphy_suspend, .resume = genphy_resume, @@ -1124,7 +1116,6 @@ static struct phy_driver ksphy_driver[] = { .phy_id_mask = MICREL_PHY_ID_MASK, .name = "Micrel KSZ8795", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = kszphy_config_init, .config_aneg = ksz8873mll_config_aneg, .read_status = ksz8873mll_read_status, diff --git a/drivers/net/phy/microchip.c b/drivers/net/phy/microchip.c index 04b12e34da58..7557bebd5d7f 100644 --- a/drivers/net/phy/microchip.c +++ b/drivers/net/phy/microchip.c @@ -346,7 +346,6 @@ static struct phy_driver microchip_phy_driver[] = { .name = "Microchip LAN88xx", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = lan88xx_probe, .remove = lan88xx_remove, diff --git a/drivers/net/phy/microchip_t1.c b/drivers/net/phy/microchip_t1.c index c600a8509d60..3d09b471632c 100644 --- a/drivers/net/phy/microchip_t1.c +++ b/drivers/net/phy/microchip_t1.c @@ -47,7 +47,6 @@ static struct phy_driver microchip_t1_phy_driver[] = { .name = "Microchip LAN87xx T1", .features = PHY_BASIC_T1_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = genphy_config_init, .config_aneg = genphy_config_aneg, diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c index 7cae17517744..3949fe299b18 100644 --- a/drivers/net/phy/mscc.c +++ b/drivers/net/phy/mscc.c @@ -853,6 +853,51 @@ static void vsc85xx_tr_write(struct phy_device *phydev, u16 addr, u32 val) __phy_write(phydev, MSCC_PHY_TR_CNTL, TR_WRITE | TR_ADDR(addr)); } +static int vsc8531_pre_init_seq_set(struct phy_device *phydev) +{ + int rc; + const struct reg_val init_seq[] = { + {0x0f90, 0x00688980}, + {0x0696, 0x00000003}, + {0x07fa, 0x0050100f}, + {0x1686, 0x00000004}, + }; + unsigned int i; + int oldpage; + + rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_STANDARD, + MSCC_PHY_EXT_CNTL_STATUS, SMI_BROADCAST_WR_EN, + SMI_BROADCAST_WR_EN); + if (rc < 0) + return rc; + rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_TEST, + MSCC_PHY_TEST_PAGE_24, 0, 0x0400); + if (rc < 0) + return rc; + rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_TEST, + MSCC_PHY_TEST_PAGE_5, 0x0a00, 0x0e00); + if (rc < 0) + return rc; + rc = phy_modify_paged(phydev, MSCC_PHY_PAGE_TEST, + MSCC_PHY_TEST_PAGE_8, 0x8000, 0x8000); + if (rc < 0) + return rc; + + mutex_lock(&phydev->lock); + oldpage = phy_select_page(phydev, MSCC_PHY_PAGE_TR); + if (oldpage < 0) + goto out_unlock; + + for (i = 0; i < ARRAY_SIZE(init_seq); i++) + vsc85xx_tr_write(phydev, init_seq[i].reg, init_seq[i].val); + +out_unlock: + oldpage = phy_restore_page(phydev, oldpage, oldpage); + mutex_unlock(&phydev->lock); + + return oldpage; +} + static int vsc85xx_eee_init_seq_set(struct phy_device *phydev) { const struct reg_val init_eee[] = { @@ -1650,7 +1695,7 @@ err: static int vsc85xx_config_init(struct phy_device *phydev) { - int rc, i; + int rc, i, phy_id; struct vsc8531_private *vsc8531 = phydev->priv; rc = vsc85xx_default_config(phydev); @@ -1665,6 +1710,14 @@ static int vsc85xx_config_init(struct phy_device *phydev) if (rc) return rc; + phy_id = phydev->drv->phy_id & phydev->drv->phy_id_mask; + if (PHY_ID_VSC8531 == phy_id || PHY_ID_VSC8541 == phy_id || + PHY_ID_VSC8530 == phy_id || PHY_ID_VSC8540 == phy_id) { + rc = vsc8531_pre_init_seq_set(phydev); + if (rc) + return rc; + } + rc = vsc85xx_eee_init_seq_set(phydev); if (rc) return rc; @@ -1829,7 +1882,6 @@ static struct phy_driver vsc85xx_driver[] = { .name = "Microsemi FE VSC8530", .phy_id_mask = 0xfffffff0, .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .soft_reset = &genphy_soft_reset, .config_init = &vsc85xx_config_init, .config_aneg = &vsc85xx_config_aneg, @@ -1855,7 +1907,6 @@ static struct phy_driver vsc85xx_driver[] = { .name = "Microsemi VSC8531", .phy_id_mask = 0xfffffff0, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .soft_reset = &genphy_soft_reset, .config_init = &vsc85xx_config_init, .config_aneg = &vsc85xx_config_aneg, @@ -1881,7 +1932,6 @@ static struct phy_driver vsc85xx_driver[] = { .name = "Microsemi FE VSC8540 SyncE", .phy_id_mask = 0xfffffff0, .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .soft_reset = &genphy_soft_reset, .config_init = &vsc85xx_config_init, .config_aneg = &vsc85xx_config_aneg, @@ -1907,7 +1957,6 @@ static struct phy_driver vsc85xx_driver[] = { .name = "Microsemi VSC8541 SyncE", .phy_id_mask = 0xfffffff0, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .soft_reset = &genphy_soft_reset, .config_init = &vsc85xx_config_init, .config_aneg = &vsc85xx_config_aneg, @@ -1933,7 +1982,6 @@ static struct phy_driver vsc85xx_driver[] = { .name = "Microsemi GE VSC8574 SyncE", .phy_id_mask = 0xfffffff0, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .soft_reset = &genphy_soft_reset, .config_init = &vsc8584_config_init, .config_aneg = &vsc85xx_config_aneg, @@ -1960,7 +2008,6 @@ static struct phy_driver vsc85xx_driver[] = { .name = "Microsemi GE VSC8584 SyncE", .phy_id_mask = 0xfffffff0, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .soft_reset = &genphy_soft_reset, .config_init = &vsc8584_config_init, .config_aneg = &vsc85xx_config_aneg, diff --git a/drivers/net/phy/national.c b/drivers/net/phy/national.c index 2b1e336961f9..139bed2c8ab4 100644 --- a/drivers/net/phy/national.c +++ b/drivers/net/phy/national.c @@ -134,7 +134,6 @@ static struct phy_driver dp83865_driver[] = { { .phy_id_mask = 0xfffffff0, .name = "NatSemi DP83865", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = ns_config_init, .ack_interrupt = ns_ack_interrupt, .config_intr = ns_config_intr, diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c index d7636ff03bc7..03af927fa5ad 100644 --- a/drivers/net/phy/phy-c45.c +++ b/drivers/net/phy/phy-c45.c @@ -181,7 +181,7 @@ int genphy_c45_read_lpa(struct phy_device *phydev) if (val < 0) return val; - phydev->lp_advertising = mii_lpa_to_ethtool_lpa_t(val); + mii_lpa_to_linkmode_lpa_t(phydev->lp_advertising, val); phydev->pause = val & LPA_PAUSE_CAP ? 1 : 0; phydev->asym_pause = val & LPA_PAUSE_ASYM ? 1 : 0; @@ -191,7 +191,8 @@ int genphy_c45_read_lpa(struct phy_device *phydev) return val; if (val & MDIO_AN_10GBT_STAT_LP10G) - phydev->lp_advertising |= ADVERTISED_10000baseT_Full; + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + phydev->lp_advertising); return 0; } @@ -304,8 +305,11 @@ EXPORT_SYMBOL_GPL(gen10g_no_soft_reset); int gen10g_config_init(struct phy_device *phydev) { /* Temporarily just say we support everything */ - phydev->supported = SUPPORTED_10000baseT_Full; - phydev->advertising = SUPPORTED_10000baseT_Full; + linkmode_zero(phydev->supported); + + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + phydev->supported); + linkmode_copy(phydev->advertising, phydev->supported); return 0; } diff --git a/drivers/net/phy/phy-core.c b/drivers/net/phy/phy-core.c index c7da4cbb1103..20fbd5eb56fd 100644 --- a/drivers/net/phy/phy-core.c +++ b/drivers/net/phy/phy-core.c @@ -62,6 +62,124 @@ EXPORT_SYMBOL_GPL(phy_duplex_to_str); * must be grouped by speed and sorted in descending match priority * - iow, descending speed. */ static const struct phy_setting settings[] = { + /* 100G */ + { + .speed = SPEED_100000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT, + }, + { + .speed = SPEED_100000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT, + }, + { + .speed = SPEED_100000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT, + }, + { + .speed = SPEED_100000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT, + }, + /* 56G */ + { + .speed = SPEED_56000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT, + }, + { + .speed = SPEED_56000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT, + }, + { + .speed = SPEED_56000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT, + }, + { + .speed = SPEED_56000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT, + }, + /* 50G */ + { + .speed = SPEED_50000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT, + }, + { + .speed = SPEED_50000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT, + }, + { + .speed = SPEED_50000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT, + }, + /* 40G */ + { + .speed = SPEED_40000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT, + }, + { + .speed = SPEED_40000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT, + }, + { + .speed = SPEED_40000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT, + }, + { + .speed = SPEED_40000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT, + }, + /* 25G */ + { + .speed = SPEED_25000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT, + }, + { + .speed = SPEED_25000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT, + }, + { + .speed = SPEED_25000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT, + }, + + /* 20G */ + { + .speed = SPEED_20000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT, + }, + { + .speed = SPEED_20000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_20000baseMLD2_Full_BIT, + }, + /* 10G */ + { + .speed = SPEED_10000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT, + }, + { + .speed = SPEED_10000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_10000baseER_Full_BIT, + }, { .speed = SPEED_10000, .duplex = DUPLEX_FULL, @@ -75,22 +193,51 @@ static const struct phy_setting settings[] = { { .speed = SPEED_10000, .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT, + }, + { + .speed = SPEED_10000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT, + }, + { + .speed = SPEED_10000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_10000baseR_FEC_BIT, + }, + { + .speed = SPEED_10000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT, + }, + { + .speed = SPEED_10000, + .duplex = DUPLEX_FULL, .bit = ETHTOOL_LINK_MODE_10000baseT_Full_BIT, }, + /* 5G */ + { + .speed = SPEED_5000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_5000baseT_Full_BIT, + }, + + /* 2.5G */ { .speed = SPEED_2500, .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_2500baseX_Full_BIT, + .bit = ETHTOOL_LINK_MODE_2500baseT_Full_BIT, }, { - .speed = SPEED_1000, + .speed = SPEED_2500, .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + .bit = ETHTOOL_LINK_MODE_2500baseX_Full_BIT, }, + /* 1G */ { .speed = SPEED_1000, .duplex = DUPLEX_FULL, - .bit = ETHTOOL_LINK_MODE_1000baseX_Full_BIT, + .bit = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, }, { .speed = SPEED_1000, @@ -103,6 +250,12 @@ static const struct phy_setting settings[] = { .bit = ETHTOOL_LINK_MODE_1000baseT_Half_BIT, }, { + .speed = SPEED_1000, + .duplex = DUPLEX_FULL, + .bit = ETHTOOL_LINK_MODE_1000baseX_Full_BIT, + }, + /* 100M */ + { .speed = SPEED_100, .duplex = DUPLEX_FULL, .bit = ETHTOOL_LINK_MODE_100baseT_Full_BIT, @@ -112,6 +265,7 @@ static const struct phy_setting settings[] = { .duplex = DUPLEX_HALF, .bit = ETHTOOL_LINK_MODE_100baseT_Half_BIT, }, + /* 10M */ { .speed = SPEED_10, .duplex = DUPLEX_FULL, @@ -129,7 +283,6 @@ static const struct phy_setting settings[] = { * @speed: speed to match * @duplex: duplex to match * @mask: allowed link modes - * @maxbit: bit size of link modes * @exact: an exact match is required * * Search the settings array for a setting that matches the speed and @@ -143,14 +296,14 @@ static const struct phy_setting settings[] = { * they all fail, %NULL will be returned. */ const struct phy_setting * -phy_lookup_setting(int speed, int duplex, const unsigned long *mask, - size_t maxbit, bool exact) +phy_lookup_setting(int speed, int duplex, const unsigned long *mask, bool exact) { const struct phy_setting *p, *match = NULL, *last = NULL; int i; for (i = 0, p = settings; i < ARRAY_SIZE(settings); i++, p++) { - if (p->bit < maxbit && test_bit(p->bit, mask)) { + if (p->bit < __ETHTOOL_LINK_MODE_MASK_NBITS && + test_bit(p->bit, mask)) { last = p; if (p->speed == speed && p->duplex == duplex) { /* Exact match for speed and duplex */ @@ -175,13 +328,13 @@ phy_lookup_setting(int speed, int duplex, const unsigned long *mask, EXPORT_SYMBOL_GPL(phy_lookup_setting); size_t phy_speeds(unsigned int *speeds, size_t size, - unsigned long *mask, size_t maxbit) + unsigned long *mask) { size_t count; int i; for (i = 0, count = 0; i < ARRAY_SIZE(settings) && count < size; i++) - if (settings[i].bit < maxbit && + if (settings[i].bit < __ETHTOOL_LINK_MODE_MASK_NBITS && test_bit(settings[i].bit, mask) && (count == 0 || speeds[count - 1] != settings[i].speed)) speeds[count++] = settings[i].speed; @@ -199,35 +352,53 @@ size_t phy_speeds(unsigned int *speeds, size_t size, */ void phy_resolve_aneg_linkmode(struct phy_device *phydev) { - u32 common = phydev->lp_advertising & phydev->advertising; + __ETHTOOL_DECLARE_LINK_MODE_MASK(common); - if (common & ADVERTISED_10000baseT_Full) { + linkmode_and(common, phydev->lp_advertising, phydev->advertising); + + if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, common)) { phydev->speed = SPEED_10000; phydev->duplex = DUPLEX_FULL; - } else if (common & ADVERTISED_1000baseT_Full) { + } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, + common)) { + phydev->speed = SPEED_5000; + phydev->duplex = DUPLEX_FULL; + } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, + common)) { + phydev->speed = SPEED_2500; + phydev->duplex = DUPLEX_FULL; + } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + common)) { phydev->speed = SPEED_1000; phydev->duplex = DUPLEX_FULL; - } else if (common & ADVERTISED_1000baseT_Half) { + } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + common)) { phydev->speed = SPEED_1000; phydev->duplex = DUPLEX_HALF; - } else if (common & ADVERTISED_100baseT_Full) { + } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + common)) { phydev->speed = SPEED_100; phydev->duplex = DUPLEX_FULL; - } else if (common & ADVERTISED_100baseT_Half) { + } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, + common)) { phydev->speed = SPEED_100; phydev->duplex = DUPLEX_HALF; - } else if (common & ADVERTISED_10baseT_Full) { + } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, + common)) { phydev->speed = SPEED_10; phydev->duplex = DUPLEX_FULL; - } else if (common & ADVERTISED_10baseT_Half) { + } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, + common)) { phydev->speed = SPEED_10; phydev->duplex = DUPLEX_HALF; } if (phydev->duplex == DUPLEX_FULL) { - phydev->pause = !!(phydev->lp_advertising & ADVERTISED_Pause); - phydev->asym_pause = !!(phydev->lp_advertising & - ADVERTISED_Asym_Pause); + phydev->pause = linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, + phydev->lp_advertising); + phydev->asym_pause = linkmode_test_bit( + ETHTOOL_LINK_MODE_Asym_Pause_BIT, + phydev->lp_advertising); } } EXPORT_SYMBOL_GPL(phy_resolve_aneg_linkmode); diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 1d73ac3309ce..d33e7b3caf03 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -46,11 +46,8 @@ static const char *phy_state_to_str(enum phy_state st) { switch (st) { PHY_STATE_STR(DOWN) - PHY_STATE_STR(STARTING) PHY_STATE_STR(READY) - PHY_STATE_STR(PENDING) PHY_STATE_STR(UP) - PHY_STATE_STR(AN) PHY_STATE_STR(RUNNING) PHY_STATE_STR(NOLINK) PHY_STATE_STR(FORCING) @@ -62,6 +59,17 @@ static const char *phy_state_to_str(enum phy_state st) return NULL; } +static void phy_link_up(struct phy_device *phydev) +{ + phydev->phy_link_change(phydev, true, true); + phy_led_trigger_change_speed(phydev); +} + +static void phy_link_down(struct phy_device *phydev, bool do_carrier) +{ + phydev->phy_link_change(phydev, false, do_carrier); + phy_led_trigger_change_speed(phydev); +} /** * phy_print_status - Convenience function to print out the current phy status @@ -105,9 +113,9 @@ static int phy_clear_interrupt(struct phy_device *phydev) * * Returns 0 on success or < 0 on error. */ -static int phy_config_interrupt(struct phy_device *phydev, u32 interrupts) +static int phy_config_interrupt(struct phy_device *phydev, bool interrupts) { - phydev->interrupts = interrupts; + phydev->interrupts = interrupts ? 1 : 0; if (phydev->drv->config_intr) return phydev->drv->config_intr(phydev); @@ -171,11 +179,9 @@ EXPORT_SYMBOL(phy_aneg_done); * settings were found. */ static const struct phy_setting * -phy_find_valid(int speed, int duplex, u32 supported) +phy_find_valid(int speed, int duplex, unsigned long *supported) { - unsigned long mask = supported; - - return phy_lookup_setting(speed, duplex, &mask, BITS_PER_LONG, false); + return phy_lookup_setting(speed, duplex, supported, false); } /** @@ -192,9 +198,7 @@ unsigned int phy_supported_speeds(struct phy_device *phy, unsigned int *speeds, unsigned int size) { - unsigned long supported = phy->supported; - - return phy_speeds(speeds, size, &supported, BITS_PER_LONG); + return phy_speeds(speeds, size, phy->supported); } /** @@ -206,11 +210,10 @@ unsigned int phy_supported_speeds(struct phy_device *phy, * * Description: Returns true if there is a valid setting, false otherwise. */ -static inline bool phy_check_valid(int speed, int duplex, u32 features) +static inline bool phy_check_valid(int speed, int duplex, + unsigned long *features) { - unsigned long mask = features; - - return !!phy_lookup_setting(speed, duplex, &mask, BITS_PER_LONG, true); + return !!phy_lookup_setting(speed, duplex, features, true); } /** @@ -224,13 +227,13 @@ static inline bool phy_check_valid(int speed, int duplex, u32 features) static void phy_sanitize_settings(struct phy_device *phydev) { const struct phy_setting *setting; - u32 features = phydev->supported; /* Sanitize settings based on PHY capabilities */ - if ((features & SUPPORTED_Autoneg) == 0) + if (linkmode_test_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, phydev->supported)) phydev->autoneg = AUTONEG_DISABLE; - setting = phy_find_valid(phydev->speed, phydev->duplex, features); + setting = phy_find_valid(phydev->speed, phydev->duplex, + phydev->supported); if (setting) { phydev->speed = setting->speed; phydev->duplex = setting->duplex; @@ -256,13 +259,15 @@ static void phy_sanitize_settings(struct phy_device *phydev) */ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); u32 speed = ethtool_cmd_speed(cmd); if (cmd->phy_address != phydev->mdio.addr) return -EINVAL; /* We make sure that we don't pass unsupported values in to the PHY */ - cmd->advertising &= phydev->supported; + ethtool_convert_legacy_u32_to_link_mode(advertising, cmd->advertising); + linkmode_and(advertising, advertising, phydev->supported); /* Verify the settings we care about. */ if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE) @@ -283,12 +288,14 @@ int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd) phydev->speed = speed; - phydev->advertising = cmd->advertising; + linkmode_copy(phydev->advertising, advertising); if (AUTONEG_ENABLE == cmd->autoneg) - phydev->advertising |= ADVERTISED_Autoneg; + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + phydev->advertising); else - phydev->advertising &= ~ADVERTISED_Autoneg; + linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + phydev->advertising); phydev->duplex = cmd->duplex; @@ -304,25 +311,24 @@ EXPORT_SYMBOL(phy_ethtool_sset); int phy_ethtool_ksettings_set(struct phy_device *phydev, const struct ethtool_link_ksettings *cmd) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); u8 autoneg = cmd->base.autoneg; u8 duplex = cmd->base.duplex; u32 speed = cmd->base.speed; - u32 advertising; if (cmd->base.phy_address != phydev->mdio.addr) return -EINVAL; - ethtool_convert_link_mode_to_legacy_u32(&advertising, - cmd->link_modes.advertising); + linkmode_copy(advertising, cmd->link_modes.advertising); /* We make sure that we don't pass unsupported values in to the PHY */ - advertising &= phydev->supported; + linkmode_and(advertising, advertising, phydev->supported); /* Verify the settings we care about. */ if (autoneg != AUTONEG_ENABLE && autoneg != AUTONEG_DISABLE) return -EINVAL; - if (autoneg == AUTONEG_ENABLE && advertising == 0) + if (autoneg == AUTONEG_ENABLE && linkmode_empty(advertising)) return -EINVAL; if (autoneg == AUTONEG_DISABLE && @@ -337,12 +343,14 @@ int phy_ethtool_ksettings_set(struct phy_device *phydev, phydev->speed = speed; - phydev->advertising = advertising; + linkmode_copy(phydev->advertising, advertising); if (autoneg == AUTONEG_ENABLE) - phydev->advertising |= ADVERTISED_Autoneg; + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + phydev->advertising); else - phydev->advertising &= ~ADVERTISED_Autoneg; + linkmode_clear_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, + phydev->advertising); phydev->duplex = duplex; @@ -358,14 +366,9 @@ EXPORT_SYMBOL(phy_ethtool_ksettings_set); void phy_ethtool_ksettings_get(struct phy_device *phydev, struct ethtool_link_ksettings *cmd) { - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, - phydev->supported); - - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, - phydev->advertising); - - ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising, - phydev->lp_advertising); + linkmode_copy(cmd->link_modes.supported, phydev->supported); + linkmode_copy(cmd->link_modes.advertising, phydev->advertising); + linkmode_copy(cmd->link_modes.lp_advertising, phydev->lp_advertising); cmd->base.speed = phydev->speed; cmd->base.duplex = phydev->duplex; @@ -434,7 +437,8 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) } break; case MII_ADVERTISE: - phydev->advertising = mii_adv_to_ethtool_adv_t(val); + mii_adv_mod_linkmode_adv_t(phydev->advertising, + val); change_autoneg = true; break; default: @@ -467,6 +471,18 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd) } EXPORT_SYMBOL(phy_mii_ioctl); +static void phy_queue_state_machine(struct phy_device *phydev, + unsigned int secs) +{ + mod_delayed_work(system_power_efficient_wq, &phydev->state_queue, + secs * HZ); +} + +static void phy_trigger_machine(struct phy_device *phydev) +{ + phy_queue_state_machine(phydev, 0); +} + static int phy_config_aneg(struct phy_device *phydev) { if (phydev->drv->config_aneg) @@ -482,6 +498,34 @@ static int phy_config_aneg(struct phy_device *phydev) } /** + * phy_check_link_status - check link status and set state accordingly + * @phydev: the phy_device struct + * + * Description: Check for link and whether autoneg was triggered / is running + * and set state accordingly + */ +static int phy_check_link_status(struct phy_device *phydev) +{ + int err; + + WARN_ON(!mutex_is_locked(&phydev->lock)); + + err = phy_read_status(phydev); + if (err) + return err; + + if (phydev->link && phydev->state != PHY_RUNNING) { + phydev->state = PHY_RUNNING; + phy_link_up(phydev); + } else if (!phydev->link && phydev->state != PHY_NOLINK) { + phydev->state = PHY_NOLINK; + phy_link_down(phydev, true); + } + + return 0; +} + +/** * phy_start_aneg - start auto-negotiation for this PHY device * @phydev: the phy_device struct * @@ -492,7 +536,6 @@ static int phy_config_aneg(struct phy_device *phydev) */ int phy_start_aneg(struct phy_device *phydev) { - bool trigger = 0; int err; if (!phydev->drv) @@ -500,44 +543,33 @@ int phy_start_aneg(struct phy_device *phydev) mutex_lock(&phydev->lock); + if (!__phy_is_started(phydev)) { + WARN(1, "called from state %s\n", + phy_state_to_str(phydev->state)); + err = -EBUSY; + goto out_unlock; + } + if (AUTONEG_DISABLE == phydev->autoneg) phy_sanitize_settings(phydev); /* Invalidate LP advertising flags */ - phydev->lp_advertising = 0; + linkmode_zero(phydev->lp_advertising); err = phy_config_aneg(phydev); if (err < 0) goto out_unlock; - if (phydev->state != PHY_HALTED) { - if (AUTONEG_ENABLE == phydev->autoneg) { - phydev->state = PHY_AN; - phydev->link_timeout = PHY_AN_TIMEOUT; - } else { - phydev->state = PHY_FORCING; - phydev->link_timeout = PHY_FORCE_TIMEOUT; - } - } - - /* Re-schedule a PHY state machine to check PHY status because - * negotiation may already be done and aneg interrupt may not be - * generated. - */ - if (!phy_polling_mode(phydev) && phydev->state == PHY_AN) { - err = phy_aneg_done(phydev); - if (err > 0) { - trigger = true; - err = 0; - } + if (phydev->autoneg == AUTONEG_ENABLE) { + err = phy_check_link_status(phydev); + } else { + phydev->state = PHY_FORCING; + phydev->link_timeout = PHY_FORCE_TIMEOUT; } out_unlock: mutex_unlock(&phydev->lock); - if (trigger) - phy_trigger_machine(phydev); - return err; } EXPORT_SYMBOL(phy_start_aneg); @@ -573,20 +605,38 @@ static int phy_poll_aneg_done(struct phy_device *phydev) */ int phy_speed_down(struct phy_device *phydev, bool sync) { - u32 adv = phydev->lp_advertising & phydev->supported; - u32 adv_old = phydev->advertising; + __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_old); + __ETHTOOL_DECLARE_LINK_MODE_MASK(adv); int ret; if (phydev->autoneg != AUTONEG_ENABLE) return 0; - if (adv & PHY_10BT_FEATURES) - phydev->advertising &= ~(PHY_100BT_FEATURES | - PHY_1000BT_FEATURES); - else if (adv & PHY_100BT_FEATURES) - phydev->advertising &= ~PHY_1000BT_FEATURES; + linkmode_copy(adv_old, phydev->advertising); + linkmode_copy(adv, phydev->lp_advertising); + linkmode_and(adv, adv, phydev->supported); + + if (linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, adv) || + linkmode_test_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, adv)) { + linkmode_clear_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, + phydev->advertising); + linkmode_clear_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + phydev->advertising); + linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + phydev->advertising); + linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + phydev->advertising); + } else if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, + adv) || + linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + adv)) { + linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + phydev->advertising); + linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + phydev->advertising); + } - if (phydev->advertising == adv_old) + if (linkmode_equal(phydev->advertising, adv_old)) return 0; ret = phy_config_aneg(phydev); @@ -605,28 +655,36 @@ EXPORT_SYMBOL_GPL(phy_speed_down); */ int phy_speed_up(struct phy_device *phydev) { - u32 mask = PHY_10BT_FEATURES | PHY_100BT_FEATURES | PHY_1000BT_FEATURES; - u32 adv_old = phydev->advertising; + __ETHTOOL_DECLARE_LINK_MODE_MASK(all_speeds) = { 0, }; + __ETHTOOL_DECLARE_LINK_MODE_MASK(not_speeds); + __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); + __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_old); + __ETHTOOL_DECLARE_LINK_MODE_MASK(speeds); + + linkmode_copy(adv_old, phydev->advertising); if (phydev->autoneg != AUTONEG_ENABLE) return 0; - phydev->advertising = (adv_old & ~mask) | (phydev->supported & mask); + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, all_speeds); + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, all_speeds); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, all_speeds); + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, all_speeds); + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, all_speeds); + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, all_speeds); + + linkmode_andnot(not_speeds, adv_old, all_speeds); + linkmode_copy(supported, phydev->supported); + linkmode_and(speeds, supported, all_speeds); + linkmode_or(phydev->advertising, not_speeds, speeds); - if (phydev->advertising == adv_old) + if (linkmode_equal(phydev->advertising, adv_old)) return 0; return phy_config_aneg(phydev); } EXPORT_SYMBOL_GPL(phy_speed_up); -static void phy_queue_state_machine(struct phy_device *phydev, - unsigned int secs) -{ - mod_delayed_work(system_power_efficient_wq, &phydev->state_queue, - secs * HZ); -} - /** * phy_start_machine - start PHY state machine tracking * @phydev: the phy_device struct @@ -644,20 +702,6 @@ void phy_start_machine(struct phy_device *phydev) EXPORT_SYMBOL_GPL(phy_start_machine); /** - * phy_trigger_machine - trigger the state machine to run - * - * @phydev: the phy_device struct - * - * Description: There has been a change in state which requires that the - * state machine runs. - */ - -void phy_trigger_machine(struct phy_device *phydev) -{ - phy_queue_state_machine(phydev, 0); -} - -/** * phy_stop_machine - stop the PHY state machine tracking * @phydev: target phy_device struct * @@ -670,7 +714,7 @@ void phy_stop_machine(struct phy_device *phydev) cancel_delayed_work_sync(&phydev->state_queue); mutex_lock(&phydev->lock); - if (phydev->state > PHY_UP && phydev->state != PHY_HALTED) + if (__phy_is_started(phydev)) phydev->state = PHY_UP; mutex_unlock(&phydev->lock); } @@ -686,6 +730,8 @@ void phy_stop_machine(struct phy_device *phydev) */ static void phy_error(struct phy_device *phydev) { + WARN_ON(1); + mutex_lock(&phydev->lock); phydev->state = PHY_HALTED; mutex_unlock(&phydev->lock); @@ -711,30 +757,26 @@ static int phy_disable_interrupts(struct phy_device *phydev) } /** - * phy_change - Called by the phy_interrupt to handle PHY changes - * @phydev: phy_device struct that interrupted + * phy_interrupt - PHY interrupt handler + * @irq: interrupt line + * @phy_dat: phy_device pointer + * + * Description: Handle PHY interrupt */ -static irqreturn_t phy_change(struct phy_device *phydev) +static irqreturn_t phy_interrupt(int irq, void *phy_dat) { - if (phy_interrupt_is_valid(phydev)) { - if (phydev->drv->did_interrupt && - !phydev->drv->did_interrupt(phydev)) - return IRQ_NONE; - - if (phydev->state == PHY_HALTED) - if (phy_disable_interrupts(phydev)) - goto phy_err; - } + struct phy_device *phydev = phy_dat; - mutex_lock(&phydev->lock); - if ((PHY_RUNNING == phydev->state) || (PHY_NOLINK == phydev->state)) - phydev->state = PHY_CHANGELINK; - mutex_unlock(&phydev->lock); + if (!phy_is_started(phydev)) + return IRQ_NONE; /* It can't be ours. */ + + if (phydev->drv->did_interrupt && !phydev->drv->did_interrupt(phydev)) + return IRQ_NONE; /* reschedule state queue work to run as soon as possible */ phy_trigger_machine(phydev); - if (phy_interrupt_is_valid(phydev) && phy_clear_interrupt(phydev)) + if (phy_clear_interrupt(phydev)) goto phy_err; return IRQ_HANDLED; @@ -744,36 +786,6 @@ phy_err: } /** - * phy_change_work - Scheduled by the phy_mac_interrupt to handle PHY changes - * @work: work_struct that describes the work to be done - */ -void phy_change_work(struct work_struct *work) -{ - struct phy_device *phydev = - container_of(work, struct phy_device, phy_queue); - - phy_change(phydev); -} - -/** - * phy_interrupt - PHY interrupt handler - * @irq: interrupt line - * @phy_dat: phy_device pointer - * - * Description: When a PHY interrupt occurs, the handler disables - * interrupts, and uses phy_change to handle the interrupt. - */ -static irqreturn_t phy_interrupt(int irq, void *phy_dat) -{ - struct phy_device *phydev = phy_dat; - - if (PHY_HALTED == phydev->state) - return IRQ_NONE; /* It can't be ours. */ - - return phy_change(phydev); -} - -/** * phy_enable_interrupts - Enable the interrupts from the PHY side * @phydev: target phy_device struct */ @@ -837,21 +849,24 @@ void phy_stop(struct phy_device *phydev) { mutex_lock(&phydev->lock); - if (PHY_HALTED == phydev->state) - goto out_unlock; + if (!__phy_is_started(phydev)) { + WARN(1, "called from state %s\n", + phy_state_to_str(phydev->state)); + mutex_unlock(&phydev->lock); + return; + } if (phy_interrupt_is_valid(phydev)) phy_disable_interrupts(phydev); phydev->state = PHY_HALTED; -out_unlock: mutex_unlock(&phydev->lock); phy_state_machine(&phydev->state_queue.work); /* Cannot call flush_scheduled_work() here as desired because - * of rtnl_lock(), but PHY_HALTED shall guarantee phy_change() + * of rtnl_lock(), but PHY_HALTED shall guarantee irq handler * will not reenable interrupts. */ } @@ -874,9 +889,6 @@ void phy_start(struct phy_device *phydev) mutex_lock(&phydev->lock); switch (phydev->state) { - case PHY_STARTING: - phydev->state = PHY_PENDING; - break; case PHY_READY: phydev->state = PHY_UP; break; @@ -902,18 +914,6 @@ void phy_start(struct phy_device *phydev) } EXPORT_SYMBOL(phy_start); -static void phy_link_up(struct phy_device *phydev) -{ - phydev->phy_link_change(phydev, true, true); - phy_led_trigger_change_speed(phydev); -} - -static void phy_link_down(struct phy_device *phydev, bool do_carrier) -{ - phydev->phy_link_change(phydev, false, do_carrier); - phy_led_trigger_change_speed(phydev); -} - /** * phy_state_machine - Handle the state machine * @work: work_struct that describes the work to be done @@ -936,63 +936,17 @@ void phy_state_machine(struct work_struct *work) switch (phydev->state) { case PHY_DOWN: - case PHY_STARTING: case PHY_READY: - case PHY_PENDING: break; case PHY_UP: needs_aneg = true; - phydev->link_timeout = PHY_AN_TIMEOUT; - - break; - case PHY_AN: - err = phy_read_status(phydev); - if (err < 0) - break; - - /* If the link is down, give up on negotiation for now */ - if (!phydev->link) { - phydev->state = PHY_NOLINK; - phy_link_down(phydev, true); - break; - } - - /* Check if negotiation is done. Break if there's an error */ - err = phy_aneg_done(phydev); - if (err < 0) - break; - - /* If AN is done, we're running */ - if (err > 0) { - phydev->state = PHY_RUNNING; - phy_link_up(phydev); - } else if (0 == phydev->link_timeout--) - needs_aneg = true; break; case PHY_NOLINK: - if (!phy_polling_mode(phydev)) - break; - - err = phy_read_status(phydev); - if (err) - break; - - if (phydev->link) { - if (AUTONEG_ENABLE == phydev->autoneg) { - err = phy_aneg_done(phydev); - if (err < 0) - break; - - if (!err) { - phydev->state = PHY_AN; - phydev->link_timeout = PHY_AN_TIMEOUT; - break; - } - } - phydev->state = PHY_RUNNING; - phy_link_up(phydev); - } + case PHY_RUNNING: + case PHY_CHANGELINK: + case PHY_RESUMING: + err = phy_check_link_status(phydev); break; case PHY_FORCING: err = genphy_update_link(phydev); @@ -1008,32 +962,6 @@ void phy_state_machine(struct work_struct *work) phy_link_down(phydev, false); } break; - case PHY_RUNNING: - if (!phy_polling_mode(phydev)) - break; - - err = phy_read_status(phydev); - if (err) - break; - - if (!phydev->link) { - phydev->state = PHY_NOLINK; - phy_link_down(phydev, true); - } - break; - case PHY_CHANGELINK: - err = phy_read_status(phydev); - if (err) - break; - - if (phydev->link) { - phydev->state = PHY_RUNNING; - phy_link_up(phydev); - } else { - phydev->state = PHY_NOLINK; - phy_link_down(phydev, true); - } - break; case PHY_HALTED: if (phydev->link) { phydev->link = 0; @@ -1041,30 +969,6 @@ void phy_state_machine(struct work_struct *work) do_suspend = true; } break; - case PHY_RESUMING: - if (AUTONEG_ENABLE == phydev->autoneg) { - err = phy_aneg_done(phydev); - if (err < 0) { - break; - } else if (!err) { - phydev->state = PHY_AN; - phydev->link_timeout = PHY_AN_TIMEOUT; - break; - } - } - - err = phy_read_status(phydev); - if (err) - break; - - if (phydev->link) { - phydev->state = PHY_RUNNING; - phy_link_up(phydev); - } else { - phydev->state = PHY_NOLINK; - phy_link_down(phydev, false); - } - break; } mutex_unlock(&phydev->lock); @@ -1090,7 +994,7 @@ void phy_state_machine(struct work_struct *work) * state machine would be pointless and possibly error prone when * called from phy_disconnect() synchronously. */ - if (phy_polling_mode(phydev) && old_state != PHY_HALTED) + if (phy_polling_mode(phydev) && phy_is_started(phydev)) phy_queue_state_machine(phydev, PHY_STATE_TIME); } @@ -1104,10 +1008,34 @@ void phy_state_machine(struct work_struct *work) void phy_mac_interrupt(struct phy_device *phydev) { /* Trigger a state machine change */ - queue_work(system_power_efficient_wq, &phydev->phy_queue); + phy_trigger_machine(phydev); } EXPORT_SYMBOL(phy_mac_interrupt); +static void mmd_eee_adv_to_linkmode(unsigned long *advertising, u16 eee_adv) +{ + linkmode_zero(advertising); + + if (eee_adv & MDIO_EEE_100TX) + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + advertising); + if (eee_adv & MDIO_EEE_1000T) + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + advertising); + if (eee_adv & MDIO_EEE_10GT) + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, + advertising); + if (eee_adv & MDIO_EEE_1000KX) + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseKX_Full_BIT, + advertising); + if (eee_adv & MDIO_EEE_10GKX4) + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT, + advertising); + if (eee_adv & MDIO_EEE_10GKR) + linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseKR_Full_BIT, + advertising); +} + /** * phy_init_eee - init and check the EEE feature * @phydev: target phy_device struct @@ -1126,9 +1054,12 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) /* According to 802.3az,the EEE is supported only in full duplex-mode. */ if (phydev->duplex == DUPLEX_FULL) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(common); + __ETHTOOL_DECLARE_LINK_MODE_MASK(lp); + __ETHTOOL_DECLARE_LINK_MODE_MASK(adv); int eee_lp, eee_cap, eee_adv; - u32 lp, cap, adv; int status; + u32 cap; /* Read phy status to properly get the right settings */ status = phy_read_status(phydev); @@ -1155,9 +1086,11 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) if (eee_adv <= 0) goto eee_exit_err; - adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv); - lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp); - if (!phy_check_valid(phydev->speed, phydev->duplex, lp & adv)) + mmd_eee_adv_to_linkmode(adv, eee_adv); + mmd_eee_adv_to_linkmode(lp, eee_lp); + linkmode_and(common, adv, lp); + + if (!phy_check_valid(phydev->speed, phydev->duplex, common)) goto eee_exit_err; if (clk_stop_enable) { @@ -1221,6 +1154,7 @@ int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data) if (val < 0) return val; data->advertised = mmd_eee_adv_to_ethtool_adv_t(val); + data->eee_enabled = !!data->advertised; /* Get LP advertisement EEE */ val = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_LPABLE); @@ -1228,6 +1162,8 @@ int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data) return val; data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val); + data->eee_active = !!(data->advertised & data->lp_advertised); + return 0; } EXPORT_SYMBOL(phy_ethtool_get_eee); @@ -1241,7 +1177,7 @@ EXPORT_SYMBOL(phy_ethtool_get_eee); */ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data) { - int cap, old_adv, adv, ret; + int cap, old_adv, adv = 0, ret; if (!phydev->drv) return -EIO; @@ -1255,10 +1191,12 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data) if (old_adv < 0) return old_adv; - adv = ethtool_adv_to_mmd_eee_adv_t(data->advertised) & cap; - - /* Mask prohibited EEE modes */ - adv &= ~phydev->eee_broken_modes; + if (data->eee_enabled) { + adv = !data->advertised ? cap : + ethtool_adv_to_mmd_eee_adv_t(data->advertised) & cap; + /* Mask prohibited EEE modes */ + adv &= ~phydev->eee_broken_modes; + } if (old_adv != adv) { ret = phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_EEE_ADV, adv); diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 26c41ede54a4..51990002d495 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -66,10 +66,12 @@ static const int phy_basic_ports_array[] = { ETHTOOL_LINK_MODE_TP_BIT, ETHTOOL_LINK_MODE_MII_BIT, }; +EXPORT_SYMBOL_GPL(phy_basic_ports_array); static const int phy_fibre_port_array[] = { ETHTOOL_LINK_MODE_FIBRE_BIT, }; +EXPORT_SYMBOL_GPL(phy_fibre_port_array); static const int phy_all_ports_features_array[] = { ETHTOOL_LINK_MODE_Autoneg_BIT, @@ -80,27 +82,32 @@ static const int phy_all_ports_features_array[] = { ETHTOOL_LINK_MODE_BNC_BIT, ETHTOOL_LINK_MODE_Backplane_BIT, }; +EXPORT_SYMBOL_GPL(phy_all_ports_features_array); -static const int phy_10_100_features_array[] = { +const int phy_10_100_features_array[4] = { ETHTOOL_LINK_MODE_10baseT_Half_BIT, ETHTOOL_LINK_MODE_10baseT_Full_BIT, ETHTOOL_LINK_MODE_100baseT_Half_BIT, ETHTOOL_LINK_MODE_100baseT_Full_BIT, }; +EXPORT_SYMBOL_GPL(phy_10_100_features_array); -static const int phy_basic_t1_features_array[] = { +const int phy_basic_t1_features_array[2] = { ETHTOOL_LINK_MODE_TP_BIT, ETHTOOL_LINK_MODE_100baseT_Full_BIT, }; +EXPORT_SYMBOL_GPL(phy_basic_t1_features_array); -static const int phy_gbit_features_array[] = { +const int phy_gbit_features_array[2] = { ETHTOOL_LINK_MODE_1000baseT_Half_BIT, ETHTOOL_LINK_MODE_1000baseT_Full_BIT, }; +EXPORT_SYMBOL_GPL(phy_gbit_features_array); -static const int phy_10gbit_features_array[] = { +const int phy_10gbit_features_array[1] = { ETHTOOL_LINK_MODE_10000baseT_Full_BIT, }; +EXPORT_SYMBOL_GPL(phy_10gbit_features_array); __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init; EXPORT_SYMBOL_GPL(phy_10gbit_full_features); @@ -584,7 +591,6 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, mutex_init(&dev->lock); INIT_DELAYED_WORK(&dev->state_queue, phy_state_machine); - INIT_WORK(&dev->phy_queue, phy_change_work); /* Request the appropriate module unconditionally; don't * bother trying to do so only if it isn't already loaded, @@ -596,7 +602,21 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, * driver will get bored and give up as soon as it finds that * there's no driver _already_ loaded. */ - request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, MDIO_ID_ARGS(phy_id)); + if (is_c45 && c45_ids) { + const int num_ids = ARRAY_SIZE(c45_ids->device_ids); + int i; + + for (i = 1; i < num_ids; i++) { + if (!(c45_ids->devices_in_package & (1 << i))) + continue; + + request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, + MDIO_ID_ARGS(c45_ids->device_ids[i])); + } + } else { + request_module(MDIO_MODULE_PREFIX MDIO_ID_FMT, + MDIO_ID_ARGS(phy_id)); + } device_initialize(&mdiodev->dev); @@ -1439,8 +1459,13 @@ static int genphy_config_advert(struct phy_device *phydev) int err, changed = 0; /* Only allow advertising what this PHY supports */ - phydev->advertising &= phydev->supported; - advertise = phydev->advertising; + linkmode_and(phydev->advertising, phydev->advertising, + phydev->supported); + if (!ethtool_convert_link_mode_to_legacy_u32(&advertise, + phydev->advertising)) + phydev_warn(phydev, "PHY advertising (%*pb) more modes than genphy supports, some modes not advertised.\n", + __ETHTOOL_LINK_MODE_MASK_NBITS, + phydev->advertising); /* Setup standard advertisement */ adv = phy_read(phydev, MII_ADVERTISE); @@ -1479,10 +1504,11 @@ static int genphy_config_advert(struct phy_device *phydev) oldadv = adv; adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF); - if (phydev->supported & (SUPPORTED_1000baseT_Half | - SUPPORTED_1000baseT_Full)) { + if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + phydev->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + phydev->supported)) adv |= ethtool_adv_to_mii_ctrl1000_t(advertise); - } if (adv != oldadv) changed = 1; @@ -1687,11 +1713,13 @@ int genphy_read_status(struct phy_device *phydev) if (err) return err; - phydev->lp_advertising = 0; + linkmode_zero(phydev->lp_advertising); if (AUTONEG_ENABLE == phydev->autoneg) { - if (phydev->supported & (SUPPORTED_1000baseT_Half - | SUPPORTED_1000baseT_Full)) { + if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + phydev->supported) || + linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + phydev->supported)) { lpagb = phy_read(phydev, MII_STAT1000); if (lpagb < 0) return lpagb; @@ -1708,8 +1736,8 @@ int genphy_read_status(struct phy_device *phydev) return -ENOLINK; } - phydev->lp_advertising = - mii_stat1000_to_ethtool_lpa_t(lpagb); + mii_stat1000_mod_linkmode_lpa_t(phydev->lp_advertising, + lpagb); common_adv_gb = lpagb & adv << 2; } @@ -1717,7 +1745,7 @@ int genphy_read_status(struct phy_device *phydev) if (lpa < 0) return lpa; - phydev->lp_advertising |= mii_lpa_to_ethtool_lpa_t(lpa); + mii_lpa_mod_linkmode_lpa_t(phydev->lp_advertising, lpa); adv = phy_read(phydev, MII_ADVERTISE); if (adv < 0) @@ -1798,11 +1826,13 @@ EXPORT_SYMBOL(genphy_soft_reset); int genphy_config_init(struct phy_device *phydev) { int val; - u32 features; + __ETHTOOL_DECLARE_LINK_MODE_MASK(features) = { 0, }; - features = (SUPPORTED_TP | SUPPORTED_MII - | SUPPORTED_AUI | SUPPORTED_FIBRE | - SUPPORTED_BNC | SUPPORTED_Pause | SUPPORTED_Asym_Pause); + linkmode_set_bit_array(phy_basic_ports_array, + ARRAY_SIZE(phy_basic_ports_array), + features); + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, features); + linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, features); /* Do we support autonegotiation? */ val = phy_read(phydev, MII_BMSR); @@ -1810,16 +1840,16 @@ int genphy_config_init(struct phy_device *phydev) return val; if (val & BMSR_ANEGCAPABLE) - features |= SUPPORTED_Autoneg; + linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT, features); if (val & BMSR_100FULL) - features |= SUPPORTED_100baseT_Full; + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, features); if (val & BMSR_100HALF) - features |= SUPPORTED_100baseT_Half; + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, features); if (val & BMSR_10FULL) - features |= SUPPORTED_10baseT_Full; + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, features); if (val & BMSR_10HALF) - features |= SUPPORTED_10baseT_Half; + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, features); if (val & BMSR_ESTATEN) { val = phy_read(phydev, MII_ESTATUS); @@ -1827,13 +1857,15 @@ int genphy_config_init(struct phy_device *phydev) return val; if (val & ESTATUS_1000_TFULL) - features |= SUPPORTED_1000baseT_Full; + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + features); if (val & ESTATUS_1000_THALF) - features |= SUPPORTED_1000baseT_Half; + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + features); } - phydev->supported &= features; - phydev->advertising &= features; + linkmode_and(phydev->supported, phydev->supported, features); + linkmode_and(phydev->advertising, phydev->advertising, features); return 0; } @@ -1879,10 +1911,16 @@ static int __set_phy_supported(struct phy_device *phydev, u32 max_speed) { switch (max_speed) { case SPEED_10: - phydev->supported &= ~PHY_100BT_FEATURES; + linkmode_clear_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, + phydev->supported); + linkmode_clear_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + phydev->supported); /* fall through */ case SPEED_100: - phydev->supported &= ~PHY_1000BT_FEATURES; + linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + phydev->supported); + linkmode_clear_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + phydev->supported); break; case SPEED_1000: break; @@ -1901,7 +1939,7 @@ int phy_set_max_speed(struct phy_device *phydev, u32 max_speed) if (err) return err; - phydev->advertising = phydev->supported; + linkmode_copy(phydev->advertising, phydev->supported); return 0; } @@ -1918,10 +1956,8 @@ EXPORT_SYMBOL(phy_set_max_speed); */ void phy_remove_link_mode(struct phy_device *phydev, u32 link_mode) { - WARN_ON(link_mode > 31); - - phydev->supported &= ~BIT(link_mode); - phydev->advertising = phydev->supported; + linkmode_clear_bit(link_mode, phydev->supported); + linkmode_copy(phydev->advertising, phydev->supported); } EXPORT_SYMBOL(phy_remove_link_mode); @@ -1934,9 +1970,9 @@ EXPORT_SYMBOL(phy_remove_link_mode); */ void phy_support_sym_pause(struct phy_device *phydev) { - phydev->supported &= ~SUPPORTED_Asym_Pause; - phydev->supported |= SUPPORTED_Pause; - phydev->advertising = phydev->supported; + linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported); + linkmode_copy(phydev->advertising, phydev->supported); } EXPORT_SYMBOL(phy_support_sym_pause); @@ -1948,8 +1984,9 @@ EXPORT_SYMBOL(phy_support_sym_pause); */ void phy_support_asym_pause(struct phy_device *phydev) { - phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - phydev->advertising = phydev->supported; + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydev->supported); + linkmode_copy(phydev->advertising, phydev->supported); } EXPORT_SYMBOL(phy_support_asym_pause); @@ -1967,12 +2004,13 @@ EXPORT_SYMBOL(phy_support_asym_pause); void phy_set_sym_pause(struct phy_device *phydev, bool rx, bool tx, bool autoneg) { - phydev->supported &= ~SUPPORTED_Pause; + linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydev->supported); if (rx && tx && autoneg) - phydev->supported |= SUPPORTED_Pause; + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, + phydev->supported); - phydev->advertising = phydev->supported; + linkmode_copy(phydev->advertising, phydev->supported); } EXPORT_SYMBOL(phy_set_sym_pause); @@ -1989,20 +2027,29 @@ EXPORT_SYMBOL(phy_set_sym_pause); */ void phy_set_asym_pause(struct phy_device *phydev, bool rx, bool tx) { - u16 oldadv = phydev->advertising; - u16 newadv = oldadv &= ~(SUPPORTED_Pause | SUPPORTED_Asym_Pause); + __ETHTOOL_DECLARE_LINK_MODE_MASK(oldadv); - if (rx) - newadv |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; - if (tx) - newadv ^= SUPPORTED_Asym_Pause; + linkmode_copy(oldadv, phydev->advertising); - if (oldadv != newadv) { - phydev->advertising = newadv; + linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, + phydev->advertising); + linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + phydev->advertising); - if (phydev->autoneg) - phy_start_aneg(phydev); + if (rx) { + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, + phydev->advertising); + linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + phydev->advertising); } + + if (tx) + linkmode_change_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + phydev->advertising); + + if (!linkmode_equal(oldadv, phydev->advertising) && + phydev->autoneg) + phy_start_aneg(phydev); } EXPORT_SYMBOL(phy_set_asym_pause); @@ -2018,8 +2065,10 @@ EXPORT_SYMBOL(phy_set_asym_pause); bool phy_validate_pause(struct phy_device *phydev, struct ethtool_pauseparam *pp) { - if (!(phydev->supported & SUPPORTED_Pause) || - (!(phydev->supported & SUPPORTED_Asym_Pause) && + if (!linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, + phydev->supported) || + (!linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + phydev->supported) && pp->rx_pause != pp->tx_pause)) return false; return true; @@ -2068,6 +2117,11 @@ static void of_set_phy_eee_broken(struct phy_device *phydev) phydev->eee_broken_modes = broken; } +static bool phy_drv_supports_irq(struct phy_driver *phydrv) +{ + return phydrv->config_intr && phydrv->ack_interrupt; +} + /** * phy_probe - probe and init a PHY device * @dev: device to probe and init @@ -2081,7 +2135,6 @@ static int phy_probe(struct device *dev) struct phy_device *phydev = to_phy_device(dev); struct device_driver *drv = phydev->mdio.dev.driver; struct phy_driver *phydrv = to_phy_driver(drv); - u32 features; int err = 0; phydev->drv = phydrv; @@ -2089,8 +2142,7 @@ static int phy_probe(struct device *dev) /* Disable the interrupt if the PHY doesn't support it * but the interrupt is still a valid one */ - if (!(phydrv->flags & PHY_HAS_INTERRUPT) && - phy_interrupt_is_valid(phydev)) + if (!phy_drv_supports_irq(phydrv) && phy_interrupt_is_valid(phydev)) phydev->irq = PHY_POLL; if (phydrv->flags & PHY_IS_INTERNAL) @@ -2102,10 +2154,9 @@ static int phy_probe(struct device *dev) * a controller will attach, and may modify one * or both of these values */ - ethtool_convert_link_mode_to_legacy_u32(&features, phydrv->features); - phydev->supported = features; + linkmode_copy(phydev->supported, phydrv->features); of_set_phy_supported(phydev); - phydev->advertising = phydev->supported; + linkmode_copy(phydev->advertising, phydev->supported); /* Get the EEE modes we want to prohibit. We will ask * the PHY stop advertising these mode later on @@ -2125,14 +2176,22 @@ static int phy_probe(struct device *dev) */ if (test_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydrv->features) || test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydrv->features)) { - phydev->supported &= ~(SUPPORTED_Pause | SUPPORTED_Asym_Pause); + linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, + phydev->supported); + linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + phydev->supported); if (test_bit(ETHTOOL_LINK_MODE_Pause_BIT, phydrv->features)) - phydev->supported |= SUPPORTED_Pause; + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, + phydev->supported); if (test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, phydrv->features)) - phydev->supported |= SUPPORTED_Asym_Pause; + linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + phydev->supported); } else { - phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, + phydev->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + phydev->supported); } /* Set the state to READY by default */ diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c index 491efc1bf5c4..263385b75bba 100644 --- a/drivers/net/phy/phy_led_triggers.c +++ b/drivers/net/phy/phy_led_triggers.c @@ -67,7 +67,7 @@ void phy_led_trigger_change_speed(struct phy_device *phy) EXPORT_SYMBOL_GPL(phy_led_trigger_change_speed); static void phy_led_trigger_format_name(struct phy_device *phy, char *buf, - size_t size, char *suffix) + size_t size, const char *suffix) { snprintf(buf, size, PHY_ID_FMT ":%s", phy->mdio.bus->id, phy->mdio.addr, suffix); @@ -77,20 +77,9 @@ static int phy_led_trigger_register(struct phy_device *phy, struct phy_led_trigger *plt, unsigned int speed) { - char name_suffix[PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE]; - plt->speed = speed; - - if (speed < SPEED_1000) - snprintf(name_suffix, sizeof(name_suffix), "%dMbps", speed); - else if (speed == SPEED_2500) - snprintf(name_suffix, sizeof(name_suffix), "2.5Gbps"); - else - snprintf(name_suffix, sizeof(name_suffix), "%dGbps", - DIV_ROUND_CLOSEST(speed, 1000)); - phy_led_trigger_format_name(phy, plt->name, sizeof(plt->name), - name_suffix); + phy_speed_to_str(speed)); plt->trigger.name = plt->name; return led_trigger_register(&plt->trigger); diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 9b8dd0d0ee42..e7becc7379d7 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -191,8 +191,7 @@ static int phylink_parse_fixedlink(struct phylink *pl, phylink_validate(pl, pl->supported, &pl->link_config); s = phy_lookup_setting(pl->link_config.speed, pl->link_config.duplex, - pl->supported, - __ETHTOOL_LINK_MODE_MASK_NBITS, true); + pl->supported, true); linkmode_zero(pl->supported); phylink_set(pl->supported, MII); if (s) { @@ -634,13 +633,11 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy) { struct phylink_link_state config; __ETHTOOL_DECLARE_LINK_MODE_MASK(supported); - u32 advertising; int ret; memset(&config, 0, sizeof(config)); - ethtool_convert_legacy_u32_to_link_mode(supported, phy->supported); - ethtool_convert_legacy_u32_to_link_mode(config.advertising, - phy->advertising); + linkmode_copy(supported, phy->supported); + linkmode_copy(config.advertising, phy->advertising); config.interface = pl->link_config.interface; /* @@ -673,15 +670,14 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy) linkmode_copy(pl->link_config.advertising, config.advertising); /* Restrict the phy advertisement according to the MAC support. */ - ethtool_convert_link_mode_to_legacy_u32(&advertising, config.advertising); - phy->advertising = advertising; + linkmode_copy(phy->advertising, config.advertising); mutex_unlock(&pl->state_mutex); mutex_unlock(&phy->lock); netdev_dbg(pl->netdev, - "phy: setting supported %*pb advertising 0x%08x\n", + "phy: setting supported %*pb advertising %*pb\n", __ETHTOOL_LINK_MODE_MASK_NBITS, pl->supported, - phy->advertising); + __ETHTOOL_LINK_MODE_MASK_NBITS, phy->advertising); phy_start_machine(phy); if (phy->irq > 0) @@ -1088,8 +1084,7 @@ int phylink_ethtool_ksettings_set(struct phylink *pl, * duplex. */ s = phy_lookup_setting(kset->base.speed, kset->base.duplex, - pl->supported, - __ETHTOOL_LINK_MODE_MASK_NBITS, false); + pl->supported, false); if (!s) return -EINVAL; diff --git a/drivers/net/phy/qsemi.c b/drivers/net/phy/qsemi.c index 889a4dce1648..cfe2313dbefd 100644 --- a/drivers/net/phy/qsemi.c +++ b/drivers/net/phy/qsemi.c @@ -116,7 +116,6 @@ static struct phy_driver qs6612_driver[] = { { .name = "QS6612", .phy_id_mask = 0xfffffff0, .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = qs6612_config_init, .ack_interrupt = qs6612_ack_interrupt, .config_intr = qs6612_config_intr, diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c index 271e8adc39f1..c6010fb1aa0f 100644 --- a/drivers/net/phy/realtek.c +++ b/drivers/net/phy/realtek.c @@ -213,17 +213,13 @@ static int rtl8366rb_config_init(struct phy_device *phydev) static struct phy_driver realtek_drvs[] = { { - .phy_id = 0x00008201, + PHY_ID_MATCH_EXACT(0x00008201), .name = "RTL8201CP Ethernet", - .phy_id_mask = 0x0000ffff, .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, }, { - .phy_id = 0x001cc816, + PHY_ID_MATCH_EXACT(0x001cc816), .name = "RTL8201F Fast Ethernet", - .phy_id_mask = 0x001fffff, .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .ack_interrupt = &rtl8201_ack_interrupt, .config_intr = &rtl8201_config_intr, .suspend = genphy_suspend, @@ -231,19 +227,16 @@ static struct phy_driver realtek_drvs[] = { .read_page = rtl821x_read_page, .write_page = rtl821x_write_page, }, { - .phy_id = 0x001cc910, + PHY_ID_MATCH_EXACT(0x001cc910), .name = "RTL8211 Gigabit Ethernet", - .phy_id_mask = 0x001fffff, .features = PHY_GBIT_FEATURES, .config_aneg = rtl8211_config_aneg, .read_mmd = &genphy_read_mmd_unsupported, .write_mmd = &genphy_write_mmd_unsupported, }, { - .phy_id = 0x001cc912, + PHY_ID_MATCH_EXACT(0x001cc912), .name = "RTL8211B Gigabit Ethernet", - .phy_id_mask = 0x001fffff, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .ack_interrupt = &rtl821x_ack_interrupt, .config_intr = &rtl8211b_config_intr, .read_mmd = &genphy_read_mmd_unsupported, @@ -251,39 +244,32 @@ static struct phy_driver realtek_drvs[] = { .suspend = rtl8211b_suspend, .resume = rtl8211b_resume, }, { - .phy_id = 0x001cc913, + PHY_ID_MATCH_EXACT(0x001cc913), .name = "RTL8211C Gigabit Ethernet", - .phy_id_mask = 0x001fffff, .features = PHY_GBIT_FEATURES, .config_init = rtl8211c_config_init, .read_mmd = &genphy_read_mmd_unsupported, .write_mmd = &genphy_write_mmd_unsupported, }, { - .phy_id = 0x001cc914, + PHY_ID_MATCH_EXACT(0x001cc914), .name = "RTL8211DN Gigabit Ethernet", - .phy_id_mask = 0x001fffff, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .ack_interrupt = rtl821x_ack_interrupt, .config_intr = rtl8211e_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, }, { - .phy_id = 0x001cc915, + PHY_ID_MATCH_EXACT(0x001cc915), .name = "RTL8211E Gigabit Ethernet", - .phy_id_mask = 0x001fffff, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .ack_interrupt = &rtl821x_ack_interrupt, .config_intr = &rtl8211e_config_intr, .suspend = genphy_suspend, .resume = genphy_resume, }, { - .phy_id = 0x001cc916, + PHY_ID_MATCH_EXACT(0x001cc916), .name = "RTL8211F Gigabit Ethernet", - .phy_id_mask = 0x001fffff, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = &rtl8211f_config_init, .ack_interrupt = &rtl8211f_ack_interrupt, .config_intr = &rtl8211f_config_intr, @@ -292,11 +278,9 @@ static struct phy_driver realtek_drvs[] = { .read_page = rtl821x_read_page, .write_page = rtl821x_write_page, }, { - .phy_id = 0x001cc961, + PHY_ID_MATCH_EXACT(0x001cc961), .name = "RTL8366RB Gigabit Ethernet", - .phy_id_mask = 0x001fffff, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = &rtl8366rb_config_init, .suspend = genphy_suspend, .resume = genphy_resume, @@ -305,15 +289,8 @@ static struct phy_driver realtek_drvs[] = { module_phy_driver(realtek_drvs); -static struct mdio_device_id __maybe_unused realtek_tbl[] = { - { 0x001cc816, 0x001fffff }, - { 0x001cc910, 0x001fffff }, - { 0x001cc912, 0x001fffff }, - { 0x001cc913, 0x001fffff }, - { 0x001cc914, 0x001fffff }, - { 0x001cc915, 0x001fffff }, - { 0x001cc916, 0x001fffff }, - { 0x001cc961, 0x001fffff }, +static const struct mdio_device_id __maybe_unused realtek_tbl[] = { + { PHY_ID_MATCH_VENDOR(0x001cc800) }, { } }; diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index c328208388da..f9477ff55545 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -219,7 +219,6 @@ static struct phy_driver smsc_phy_driver[] = { .name = "SMSC LAN83C185", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = smsc_phy_probe, @@ -239,7 +238,6 @@ static struct phy_driver smsc_phy_driver[] = { .name = "SMSC LAN8187", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = smsc_phy_probe, @@ -264,7 +262,6 @@ static struct phy_driver smsc_phy_driver[] = { .name = "SMSC LAN8700", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = smsc_phy_probe, @@ -290,7 +287,6 @@ static struct phy_driver smsc_phy_driver[] = { .name = "SMSC LAN911x Internal PHY", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = smsc_phy_probe, @@ -309,7 +305,7 @@ static struct phy_driver smsc_phy_driver[] = { .name = "SMSC LAN8710/LAN8720", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT | PHY_RST_AFTER_CLK_EN, + .flags = PHY_RST_AFTER_CLK_EN, .probe = smsc_phy_probe, @@ -335,7 +331,6 @@ static struct phy_driver smsc_phy_driver[] = { .name = "SMSC LAN8740", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .probe = smsc_phy_probe, diff --git a/drivers/net/phy/ste10Xp.c b/drivers/net/phy/ste10Xp.c index 2fe9a87b55b5..33d733684f5b 100644 --- a/drivers/net/phy/ste10Xp.c +++ b/drivers/net/phy/ste10Xp.c @@ -87,7 +87,6 @@ static struct phy_driver ste10xp_pdriver[] = { .phy_id_mask = 0xfffffff0, .name = "STe101p", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = ste10Xp_config_init, .ack_interrupt = ste10Xp_ack_interrupt, .config_intr = ste10Xp_config_intr, @@ -98,7 +97,6 @@ static struct phy_driver ste10xp_pdriver[] = { .phy_id_mask = 0xffffffff, .name = "STe100p", .features = PHY_BASIC_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = ste10Xp_config_init, .ack_interrupt = ste10Xp_ack_interrupt, .config_intr = ste10Xp_config_intr, diff --git a/drivers/net/phy/uPD60620.c b/drivers/net/phy/uPD60620.c index 55f48ee3595a..1e4fc42e4629 100644 --- a/drivers/net/phy/uPD60620.c +++ b/drivers/net/phy/uPD60620.c @@ -47,7 +47,7 @@ static int upd60620_read_status(struct phy_device *phydev) return phy_state; phydev->link = 0; - phydev->lp_advertising = 0; + linkmode_zero(phydev->lp_advertising); phydev->pause = 0; phydev->asym_pause = 0; @@ -70,8 +70,8 @@ static int upd60620_read_status(struct phy_device *phydev) if (phy_state < 0) return phy_state; - phydev->lp_advertising - = mii_lpa_to_ethtool_lpa_t(phy_state); + mii_lpa_to_linkmode_lpa_t(phydev->lp_advertising, + phy_state); if (phydev->duplex == DUPLEX_FULL) { if (phy_state & LPA_PAUSE_CAP) diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c index fbf9ad429593..0646af458f6a 100644 --- a/drivers/net/phy/vitesse.c +++ b/drivers/net/phy/vitesse.c @@ -70,7 +70,6 @@ #define PHY_ID_VSC8244 0x000fc6c0 #define PHY_ID_VSC8514 0x00070670 #define PHY_ID_VSC8572 0x000704d0 -#define PHY_ID_VSC8574 0x000704a0 #define PHY_ID_VSC8601 0x00070420 #define PHY_ID_VSC7385 0x00070450 #define PHY_ID_VSC7388 0x00070480 @@ -303,7 +302,6 @@ static int vsc82xx_config_intr(struct phy_device *phydev) phydev->drv->phy_id == PHY_ID_VSC8244 || phydev->drv->phy_id == PHY_ID_VSC8514 || phydev->drv->phy_id == PHY_ID_VSC8572 || - phydev->drv->phy_id == PHY_ID_VSC8574 || phydev->drv->phy_id == PHY_ID_VSC8601) ? MII_VSC8244_IMASK_MASK : MII_VSC8221_IMASK_MASK); @@ -399,7 +397,6 @@ static struct phy_driver vsc82xx_driver[] = { .name = "Vitesse VSC8234", .phy_id_mask = 0x000ffff0, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = &vsc824x_config_init, .config_aneg = &vsc82x4_config_aneg, .ack_interrupt = &vsc824x_ack_interrupt, @@ -409,7 +406,6 @@ static struct phy_driver vsc82xx_driver[] = { .name = "Vitesse VSC8244", .phy_id_mask = 0x000fffc0, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = &vsc824x_config_init, .config_aneg = &vsc82x4_config_aneg, .ack_interrupt = &vsc824x_ack_interrupt, @@ -419,7 +415,6 @@ static struct phy_driver vsc82xx_driver[] = { .name = "Vitesse VSC8514", .phy_id_mask = 0x000ffff0, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = &vsc824x_config_init, .config_aneg = &vsc82x4_config_aneg, .ack_interrupt = &vsc824x_ack_interrupt, @@ -429,17 +424,6 @@ static struct phy_driver vsc82xx_driver[] = { .name = "Vitesse VSC8572", .phy_id_mask = 0x000ffff0, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, - .config_init = &vsc824x_config_init, - .config_aneg = &vsc82x4_config_aneg, - .ack_interrupt = &vsc824x_ack_interrupt, - .config_intr = &vsc82xx_config_intr, -}, { - .phy_id = PHY_ID_VSC8574, - .name = "Vitesse VSC8574", - .phy_id_mask = 0x000ffff0, - .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = &vsc824x_config_init, .config_aneg = &vsc82x4_config_aneg, .ack_interrupt = &vsc824x_ack_interrupt, @@ -449,7 +433,6 @@ static struct phy_driver vsc82xx_driver[] = { .name = "Vitesse VSC8601", .phy_id_mask = 0x000ffff0, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = &vsc8601_config_init, .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, @@ -494,7 +477,6 @@ static struct phy_driver vsc82xx_driver[] = { .name = "Vitesse VSC8662", .phy_id_mask = 0x000ffff0, .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = &vsc824x_config_init, .config_aneg = &vsc82x4_config_aneg, .ack_interrupt = &vsc824x_ack_interrupt, @@ -505,7 +487,6 @@ static struct phy_driver vsc82xx_driver[] = { .phy_id_mask = 0x000ffff0, .name = "Vitesse VSC8221", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = &vsc8221_config_init, .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, @@ -515,7 +496,6 @@ static struct phy_driver vsc82xx_driver[] = { .phy_id_mask = 0x000ffff0, .name = "Vitesse VSC8211", .features = PHY_GBIT_FEATURES, - .flags = PHY_HAS_INTERRUPT, .config_init = &vsc8221_config_init, .ack_interrupt = &vsc824x_ack_interrupt, .config_intr = &vsc82xx_config_intr, @@ -528,7 +508,6 @@ static struct mdio_device_id __maybe_unused vitesse_tbl[] = { { PHY_ID_VSC8244, 0x000fffc0 }, { PHY_ID_VSC8514, 0x000ffff0 }, { PHY_ID_VSC8572, 0x000ffff0 }, - { PHY_ID_VSC8574, 0x000ffff0 }, { PHY_ID_VSC7385, 0x000ffff0 }, { PHY_ID_VSC7388, 0x000ffff0 }, { PHY_ID_VSC7395, 0x000ffff0 }, diff --git a/drivers/net/ppp/ppp_async.c b/drivers/net/ppp/ppp_async.c index bdc4d23627c5..b287bb811875 100644 --- a/drivers/net/ppp/ppp_async.c +++ b/drivers/net/ppp/ppp_async.c @@ -70,7 +70,7 @@ struct asyncppp { struct tasklet_struct tsk; refcount_t refcnt; - struct semaphore dead_sem; + struct completion dead; struct ppp_channel chan; /* interface to generic ppp layer */ unsigned char obuf[OBUFSIZE]; }; @@ -148,7 +148,7 @@ static struct asyncppp *ap_get(struct tty_struct *tty) static void ap_put(struct asyncppp *ap) { if (refcount_dec_and_test(&ap->refcnt)) - up(&ap->dead_sem); + complete(&ap->dead); } /* @@ -186,7 +186,7 @@ ppp_asynctty_open(struct tty_struct *tty) tasklet_init(&ap->tsk, ppp_async_process, (unsigned long) ap); refcount_set(&ap->refcnt, 1); - sema_init(&ap->dead_sem, 0); + init_completion(&ap->dead); ap->chan.private = ap; ap->chan.ops = &async_ops; @@ -235,7 +235,7 @@ ppp_asynctty_close(struct tty_struct *tty) * by the time it returns. */ if (!refcount_dec_and_test(&ap->refcnt)) - down(&ap->dead_sem); + wait_for_completion(&ap->dead); tasklet_kill(&ap->tsk); ppp_unregister_channel(&ap->chan); @@ -770,7 +770,7 @@ process_input_packet(struct asyncppp *ap) { struct sk_buff *skb; unsigned char *p; - unsigned int len, fcs, proto; + unsigned int len, fcs; skb = ap->rpkt; if (ap->state & (SC_TOSS | SC_ESCAPE)) @@ -799,14 +799,14 @@ process_input_packet(struct asyncppp *ap) goto err; p = skb_pull(skb, 2); } - proto = p[0]; - if (proto & 1) { - /* protocol is compressed */ - *(u8 *)skb_push(skb, 1) = 0; - } else { + + /* If protocol field is not compressed, it can be LCP packet */ + if (!(p[0] & 0x01)) { + unsigned int proto; + if (skb->len < 2) goto err; - proto = (proto << 8) + p[1]; + proto = (p[0] << 8) + p[1]; if (proto == PPP_LCP) async_lcp_peek(ap, p, skb->len, 1); } diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c index 500bc0027c1b..c708400fff4a 100644 --- a/drivers/net/ppp/ppp_generic.c +++ b/drivers/net/ppp/ppp_generic.c @@ -1965,6 +1965,46 @@ ppp_do_recv(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) ppp_recv_unlock(ppp); } +/** + * __ppp_decompress_proto - Decompress protocol field, slim version. + * @skb: Socket buffer where protocol field should be decompressed. It must have + * at least 1 byte of head room and 1 byte of linear data. First byte of + * data must be a protocol field byte. + * + * Decompress protocol field in PPP header if it's compressed, e.g. when + * Protocol-Field-Compression (PFC) was negotiated. No checks w.r.t. skb data + * length are done in this function. + */ +static void __ppp_decompress_proto(struct sk_buff *skb) +{ + if (skb->data[0] & 0x01) + *(u8 *)skb_push(skb, 1) = 0x00; +} + +/** + * ppp_decompress_proto - Check skb data room and decompress protocol field. + * @skb: Socket buffer where protocol field should be decompressed. First byte + * of data must be a protocol field byte. + * + * Decompress protocol field in PPP header if it's compressed, e.g. when + * Protocol-Field-Compression (PFC) was negotiated. This function also makes + * sure that skb data room is sufficient for Protocol field, before and after + * decompression. + * + * Return: true - decompressed successfully, false - not enough room in skb. + */ +static bool ppp_decompress_proto(struct sk_buff *skb) +{ + /* At least one byte should be present (if protocol is compressed) */ + if (!pskb_may_pull(skb, 1)) + return false; + + __ppp_decompress_proto(skb); + + /* Protocol field should occupy 2 bytes when not compressed */ + return pskb_may_pull(skb, 2); +} + void ppp_input(struct ppp_channel *chan, struct sk_buff *skb) { @@ -1977,7 +2017,7 @@ ppp_input(struct ppp_channel *chan, struct sk_buff *skb) } read_lock_bh(&pch->upl); - if (!pskb_may_pull(skb, 2)) { + if (!ppp_decompress_proto(skb)) { kfree_skb(skb); if (pch->ppp) { ++pch->ppp->dev->stats.rx_length_errors; @@ -2074,6 +2114,9 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb) if (ppp->flags & SC_MUST_COMP && ppp->rstate & SC_DC_FERROR) goto err; + /* At this point the "Protocol" field MUST be decompressed, either in + * ppp_input(), ppp_decompress_frame() or in ppp_receive_mp_frame(). + */ proto = PPP_PROTO(skb); switch (proto) { case PPP_VJC_COMP: @@ -2245,6 +2288,9 @@ ppp_decompress_frame(struct ppp *ppp, struct sk_buff *skb) skb_put(skb, len); skb_pull(skb, 2); /* pull off the A/C bytes */ + /* Don't call __ppp_decompress_proto() here, but instead rely on + * corresponding algo (mppe/bsd/deflate) to decompress it. + */ } else { /* Uncompressed frame - pass to decompressor so it can update its dictionary if necessary. */ @@ -2290,9 +2336,11 @@ ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch) /* * Do protocol ID decompression on the first fragment of each packet. + * We have to do that here, because ppp_receive_nonmp_frame() expects + * decompressed protocol field. */ - if ((PPP_MP_CB(skb)->BEbits & B) && (skb->data[0] & 1)) - *(u8 *)skb_push(skb, 1) = 0; + if (PPP_MP_CB(skb)->BEbits & B) + __ppp_decompress_proto(skb); /* * Expand sequence number to 32 bits, making it as close diff --git a/drivers/net/ppp/ppp_synctty.c b/drivers/net/ppp/ppp_synctty.c index 047f6c68a441..d02ba2494d93 100644 --- a/drivers/net/ppp/ppp_synctty.c +++ b/drivers/net/ppp/ppp_synctty.c @@ -709,11 +709,10 @@ ppp_sync_input(struct syncppp *ap, const unsigned char *buf, p = skb_pull(skb, 2); } - /* decompress protocol field if compressed */ - if (p[0] & 1) { - /* protocol is compressed */ - *(u8 *)skb_push(skb, 1) = 0; - } else if (skb->len < 2) + /* PPP packet length should be >= 2 bytes when protocol field is not + * compressed. + */ + if (!(p[0] & 0x01) && skb->len < 2) goto err; /* queue the frame to be processed */ diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c index 67ffe74747a1..8f09edd811e9 100644 --- a/drivers/net/ppp/pptp.c +++ b/drivers/net/ppp/pptp.c @@ -325,11 +325,6 @@ allow_packet: skb_pull(skb, 2); } - if ((*skb->data) & 1) { - /* protocol is compressed */ - *(u8 *)skb_push(skb, 1) = 0; - } - skb->ip_summed = CHECKSUM_NONE; skb_set_network_header(skb, skb->head-skb->data); ppp_input(&po->chan, skb); diff --git a/drivers/net/tap.c b/drivers/net/tap.c index f03004f37eca..443b2694130c 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -1113,7 +1113,7 @@ static long tap_ioctl(struct file *file, unsigned int cmd, rtnl_unlock(); return -ENOLINK; } - ret = dev_set_mac_address(tap->dev, &sa); + ret = dev_set_mac_address(tap->dev, &sa, NULL); tap_put_tap_dev(tap); rtnl_unlock(); return ret; diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 364f514d56d8..afd9d25d1992 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -59,7 +59,7 @@ static int __set_port_dev_addr(struct net_device *port_dev, memcpy(addr.__data, dev_addr, port_dev->addr_len); addr.ss_family = port_dev->type; - return dev_set_mac_address(port_dev, (struct sockaddr *)&addr); + return dev_set_mac_address(port_dev, (struct sockaddr *)&addr, NULL); } static int team_port_set_orig_dev_addr(struct team_port *port) @@ -1212,7 +1212,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev, goto err_port_enter; } - err = dev_open(port_dev); + err = dev_open(port_dev, extack); if (err) { netdev_dbg(dev, "Device %s opening failed\n", portname); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 005020042be9..a4fdad475594 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -188,6 +188,11 @@ struct tun_file { struct xdp_rxq_info xdp_rxq; }; +struct tun_page { + struct page *page; + int count; +}; + struct tun_flow_entry { struct hlist_node hash_link; struct rcu_head rcu; @@ -196,7 +201,7 @@ struct tun_flow_entry { u32 rxhash; u32 rps_rxhash; int queue_index; - unsigned long updated; + unsigned long updated ____cacheline_aligned_in_smp; }; #define TUN_NUM_FLOW_ENTRIES 1024 @@ -524,18 +529,17 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash, unsigned long delay = tun->ageing_time; u16 queue_index = tfile->queue_index; - if (!rxhash) - return; - else - head = &tun->flows[tun_hashfn(rxhash)]; + head = &tun->flows[tun_hashfn(rxhash)]; rcu_read_lock(); e = tun_flow_find(head, rxhash); if (likely(e)) { /* TODO: keep queueing to old queue until it's empty? */ - e->queue_index = queue_index; - e->updated = jiffies; + if (e->queue_index != queue_index) + e->queue_index = queue_index; + if (e->updated != jiffies) + e->updated = jiffies; sock_rps_record_flow_hash(e->rps_rxhash); } else { spin_lock_bh(&tun->lock); @@ -1249,6 +1253,21 @@ static int tun_xdp(struct net_device *dev, struct netdev_bpf *xdp) } } +static int tun_net_change_carrier(struct net_device *dev, bool new_carrier) +{ + if (new_carrier) { + struct tun_struct *tun = netdev_priv(dev); + + if (!tun->numqueues) + return -EPERM; + + netif_carrier_on(dev); + } else { + netif_carrier_off(dev); + } + return 0; +} + static const struct net_device_ops tun_netdev_ops = { .ndo_uninit = tun_net_uninit, .ndo_open = tun_net_open, @@ -1258,6 +1277,7 @@ static const struct net_device_ops tun_netdev_ops = { .ndo_select_queue = tun_select_queue, .ndo_set_rx_headroom = tun_set_headroom, .ndo_get_stats64 = tun_net_get_stats64, + .ndo_change_carrier = tun_net_change_carrier, }; static void __tun_xdp_flush_tfile(struct tun_file *tfile) @@ -1340,6 +1360,7 @@ static const struct net_device_ops tap_netdev_ops = { .ndo_get_stats64 = tun_net_get_stats64, .ndo_bpf = tun_xdp, .ndo_xdp_xmit = tun_xdp_xmit, + .ndo_change_carrier = tun_net_change_carrier, }; static void tun_flow_init(struct tun_struct *tun) @@ -1473,23 +1494,22 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile, skb->truesize += skb->data_len; for (i = 1; i < it->nr_segs; i++) { - struct page_frag *pfrag = ¤t->task_frag; size_t fragsz = it->iov[i].iov_len; + struct page *page; + void *frag; if (fragsz == 0 || fragsz > PAGE_SIZE) { err = -EINVAL; goto free; } - - if (!skb_page_frag_refill(fragsz, pfrag, GFP_KERNEL)) { + frag = netdev_alloc_frag(fragsz); + if (!frag) { err = -ENOMEM; goto free; } - - skb_fill_page_desc(skb, i - 1, pfrag->page, - pfrag->offset, fragsz); - page_ref_inc(pfrag->page); - pfrag->offset += fragsz; + page = virt_to_head_page(frag); + skb_fill_page_desc(skb, i - 1, page, + frag - page_address(page), fragsz); } return skb; @@ -2381,9 +2401,16 @@ static void tun_sock_write_space(struct sock *sk) kill_fasync(&tfile->fasync, SIGIO, POLL_OUT); } +static void tun_put_page(struct tun_page *tpage) +{ + if (tpage->page) + __page_frag_cache_drain(tpage->page, tpage->count); +} + static int tun_xdp_one(struct tun_struct *tun, struct tun_file *tfile, - struct xdp_buff *xdp, int *flush) + struct xdp_buff *xdp, int *flush, + struct tun_page *tpage) { unsigned int datasize = xdp->data_end - xdp->data; struct tun_xdp_hdr *hdr = xdp->data_hard_start; @@ -2395,6 +2422,7 @@ static int tun_xdp_one(struct tun_struct *tun, int buflen = hdr->buflen; int err = 0; bool skb_xdp = false; + struct page *page; xdp_prog = rcu_dereference(tun->xdp_prog); if (xdp_prog) { @@ -2421,7 +2449,14 @@ static int tun_xdp_one(struct tun_struct *tun, case XDP_PASS: break; default: - put_page(virt_to_head_page(xdp->data)); + page = virt_to_head_page(xdp->data); + if (tpage->page == page) { + ++tpage->count; + } else { + tun_put_page(tpage); + tpage->page = page; + tpage->count = 1; + } return 0; } } @@ -2453,18 +2488,21 @@ build: goto out; } - if (!rcu_dereference(tun->steering_prog)) + if (!rcu_dereference(tun->steering_prog) && tun->numqueues > 1 && + !tfile->detached) rxhash = __skb_get_hash_symmetric(skb); skb_record_rx_queue(skb, tfile->queue_index); netif_receive_skb(skb); - stats = get_cpu_ptr(tun->pcpu_stats); + /* No need for get_cpu_ptr() here since this function is + * always called with bh disabled + */ + stats = this_cpu_ptr(tun->pcpu_stats); u64_stats_update_begin(&stats->syncp); stats->rx_packets++; stats->rx_bytes += datasize; u64_stats_update_end(&stats->syncp); - put_cpu_ptr(stats); if (rxhash) tun_flow_update(tun, rxhash, tfile); @@ -2485,15 +2523,18 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) return -EBADFD; if (ctl && (ctl->type == TUN_MSG_PTR)) { + struct tun_page tpage; int n = ctl->num; int flush = 0; + memset(&tpage, 0, sizeof(tpage)); + local_bh_disable(); rcu_read_lock(); for (i = 0; i < n; i++) { xdp = &((struct xdp_buff *)ctl->ptr)[i]; - tun_xdp_one(tun, tfile, xdp, &flush); + tun_xdp_one(tun, tfile, xdp, &flush, &tpage); } if (flush) @@ -2502,6 +2543,8 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len) rcu_read_unlock(); local_bh_enable(); + tun_put_page(&tpage); + ret = total_len; goto out; } @@ -2978,12 +3021,12 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, struct net *net = sock_net(&tfile->sk); struct tun_struct *tun; void __user* argp = (void __user*)arg; + unsigned int ifindex, carrier; struct ifreq ifr; kuid_t owner; kgid_t group; int sndbuf; int vnet_hdr_sz; - unsigned int ifindex; int le; int ret; bool do_notify = false; @@ -3161,7 +3204,7 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n", ifr.ifr_hwaddr.sa_data); - ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); + ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr, NULL); break; case TUNGETSNDBUF: @@ -3267,6 +3310,14 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd, ret = tun_set_ebpf(tun, &tun->filter_prog, argp); break; + case TUNSETCARRIER: + ret = -EFAULT; + if (copy_from_user(&carrier, argp, sizeof(carrier))) + goto unlock; + + ret = tun_net_change_carrier(tun->dev, (bool)carrier); + break; + default: ret = -EINVAL; break; diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig index 418b0904cecb..860352a525fb 100644 --- a/drivers/net/usb/Kconfig +++ b/drivers/net/usb/Kconfig @@ -613,4 +613,15 @@ config USB_NET_CH9200 To compile this driver as a module, choose M here: the module will be called ch9200. +config USB_NET_AQC111 + tristate "Aquantia AQtion USB to 5/2.5GbE Controllers support" + depends on USB_USBNET + select CRC32 + help + This option adds support for Aquantia AQtion USB + Ethernet adapters based on AQC111U/AQC112 chips. + + This driver should work with at least the following devices: + * Aquantia AQtion USB to 5GbE + endif # USB_NET_DRIVERS diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile index 27307a4ab003..99fd12be2111 100644 --- a/drivers/net/usb/Makefile +++ b/drivers/net/usb/Makefile @@ -40,3 +40,4 @@ obj-$(CONFIG_USB_VL600) += lg-vl600.o obj-$(CONFIG_USB_NET_QMI_WWAN) += qmi_wwan.o obj-$(CONFIG_USB_NET_CDC_MBIM) += cdc_mbim.o obj-$(CONFIG_USB_NET_CH9200) += ch9200.o +obj-$(CONFIG_USB_NET_AQC111) += aqc111.o diff --git a/drivers/net/usb/aqc111.c b/drivers/net/usb/aqc111.c new file mode 100644 index 000000000000..57f1c94fca0b --- /dev/null +++ b/drivers/net/usb/aqc111.c @@ -0,0 +1,1459 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Aquantia Corp. Aquantia AQtion USB to 5GbE Controller + * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com> + * Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net> + * Copyright (C) 2002-2003 TiVo Inc. + * Copyright (C) 2017-2018 ASIX + * Copyright (C) 2018 Aquantia Corp. + */ + +#include <linux/module.h> +#include <linux/netdevice.h> +#include <linux/ethtool.h> +#include <linux/mii.h> +#include <linux/usb.h> +#include <linux/crc32.h> +#include <linux/if_vlan.h> +#include <linux/usb/cdc.h> +#include <linux/usb/usbnet.h> +#include <linux/linkmode.h> + +#include "aqc111.h" + +#define DRIVER_NAME "aqc111" + +static int aqc111_read_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value, + u16 index, u16 size, void *data) +{ + int ret; + + ret = usbnet_read_cmd_nopm(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR | + USB_RECIP_DEVICE, value, index, data, size); + + if (unlikely(ret < 0)) + netdev_warn(dev->net, + "Failed to read(0x%x) reg index 0x%04x: %d\n", + cmd, index, ret); + + return ret; +} + +static int aqc111_read_cmd(struct usbnet *dev, u8 cmd, u16 value, + u16 index, u16 size, void *data) +{ + int ret; + + ret = usbnet_read_cmd(dev, cmd, USB_DIR_IN | USB_TYPE_VENDOR | + USB_RECIP_DEVICE, value, index, data, size); + + if (unlikely(ret < 0)) + netdev_warn(dev->net, + "Failed to read(0x%x) reg index 0x%04x: %d\n", + cmd, index, ret); + + return ret; +} + +static int aqc111_read16_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value, + u16 index, u16 *data) +{ + int ret = 0; + + ret = aqc111_read_cmd_nopm(dev, cmd, value, index, sizeof(*data), data); + le16_to_cpus(data); + + return ret; +} + +static int aqc111_read16_cmd(struct usbnet *dev, u8 cmd, u16 value, + u16 index, u16 *data) +{ + int ret = 0; + + ret = aqc111_read_cmd(dev, cmd, value, index, sizeof(*data), data); + le16_to_cpus(data); + + return ret; +} + +static int __aqc111_write_cmd(struct usbnet *dev, u8 cmd, u8 reqtype, + u16 value, u16 index, u16 size, const void *data) +{ + int err = -ENOMEM; + void *buf = NULL; + + netdev_dbg(dev->net, + "%s cmd=%#x reqtype=%#x value=%#x index=%#x size=%d\n", + __func__, cmd, reqtype, value, index, size); + + if (data) { + buf = kmemdup(data, size, GFP_KERNEL); + if (!buf) + goto out; + } + + err = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), + cmd, reqtype, value, index, buf, size, + (cmd == AQ_PHY_POWER) ? AQ_USB_PHY_SET_TIMEOUT : + AQ_USB_SET_TIMEOUT); + + if (unlikely(err < 0)) + netdev_warn(dev->net, + "Failed to write(0x%x) reg index 0x%04x: %d\n", + cmd, index, err); + kfree(buf); + +out: + return err; +} + +static int aqc111_write_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value, + u16 index, u16 size, void *data) +{ + int ret; + + ret = __aqc111_write_cmd(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR | + USB_RECIP_DEVICE, value, index, size, data); + + return ret; +} + +static int aqc111_write_cmd(struct usbnet *dev, u8 cmd, u16 value, + u16 index, u16 size, void *data) +{ + int ret; + + if (usb_autopm_get_interface(dev->intf) < 0) + return -ENODEV; + + ret = __aqc111_write_cmd(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR | + USB_RECIP_DEVICE, value, index, size, data); + + usb_autopm_put_interface(dev->intf); + + return ret; +} + +static int aqc111_write16_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value, + u16 index, u16 *data) +{ + u16 tmp = *data; + + cpu_to_le16s(&tmp); + + return aqc111_write_cmd_nopm(dev, cmd, value, index, sizeof(tmp), &tmp); +} + +static int aqc111_write16_cmd(struct usbnet *dev, u8 cmd, u16 value, + u16 index, u16 *data) +{ + u16 tmp = *data; + + cpu_to_le16s(&tmp); + + return aqc111_write_cmd(dev, cmd, value, index, sizeof(tmp), &tmp); +} + +static int aqc111_write32_cmd_nopm(struct usbnet *dev, u8 cmd, u16 value, + u16 index, u32 *data) +{ + u32 tmp = *data; + + cpu_to_le32s(&tmp); + + return aqc111_write_cmd_nopm(dev, cmd, value, index, sizeof(tmp), &tmp); +} + +static int aqc111_write32_cmd(struct usbnet *dev, u8 cmd, u16 value, + u16 index, u32 *data) +{ + u32 tmp = *data; + + cpu_to_le32s(&tmp); + + return aqc111_write_cmd(dev, cmd, value, index, sizeof(tmp), &tmp); +} + +static int aqc111_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, + u16 index, u16 size, void *data) +{ + return usbnet_write_cmd_async(dev, cmd, USB_DIR_OUT | USB_TYPE_VENDOR | + USB_RECIP_DEVICE, value, index, data, + size); +} + +static int aqc111_write16_cmd_async(struct usbnet *dev, u8 cmd, u16 value, + u16 index, u16 *data) +{ + u16 tmp = *data; + + cpu_to_le16s(&tmp); + + return aqc111_write_cmd_async(dev, cmd, value, index, + sizeof(tmp), &tmp); +} + +static void aqc111_get_drvinfo(struct net_device *net, + struct ethtool_drvinfo *info) +{ + struct usbnet *dev = netdev_priv(net); + struct aqc111_data *aqc111_data = dev->driver_priv; + + /* Inherit standard device info */ + usbnet_get_drvinfo(net, info); + strlcpy(info->driver, DRIVER_NAME, sizeof(info->driver)); + snprintf(info->fw_version, sizeof(info->fw_version), "%u.%u.%u", + aqc111_data->fw_ver.major, + aqc111_data->fw_ver.minor, + aqc111_data->fw_ver.rev); + info->eedump_len = 0x00; + info->regdump_len = 0x00; +} + +static void aqc111_get_wol(struct net_device *net, + struct ethtool_wolinfo *wolinfo) +{ + struct usbnet *dev = netdev_priv(net); + struct aqc111_data *aqc111_data = dev->driver_priv; + + wolinfo->supported = WAKE_MAGIC; + wolinfo->wolopts = 0; + + if (aqc111_data->wol_flags & AQ_WOL_FLAG_MP) + wolinfo->wolopts |= WAKE_MAGIC; +} + +static int aqc111_set_wol(struct net_device *net, + struct ethtool_wolinfo *wolinfo) +{ + struct usbnet *dev = netdev_priv(net); + struct aqc111_data *aqc111_data = dev->driver_priv; + + if (wolinfo->wolopts & ~WAKE_MAGIC) + return -EINVAL; + + aqc111_data->wol_flags = 0; + if (wolinfo->wolopts & WAKE_MAGIC) + aqc111_data->wol_flags |= AQ_WOL_FLAG_MP; + + return 0; +} + +static void aqc111_speed_to_link_mode(u32 speed, + struct ethtool_link_ksettings *elk) +{ + switch (speed) { + case SPEED_5000: + ethtool_link_ksettings_add_link_mode(elk, advertising, + 5000baseT_Full); + break; + case SPEED_2500: + ethtool_link_ksettings_add_link_mode(elk, advertising, + 2500baseT_Full); + break; + case SPEED_1000: + ethtool_link_ksettings_add_link_mode(elk, advertising, + 1000baseT_Full); + break; + case SPEED_100: + ethtool_link_ksettings_add_link_mode(elk, advertising, + 100baseT_Full); + break; + } +} + +static int aqc111_get_link_ksettings(struct net_device *net, + struct ethtool_link_ksettings *elk) +{ + struct usbnet *dev = netdev_priv(net); + struct aqc111_data *aqc111_data = dev->driver_priv; + enum usb_device_speed usb_speed = dev->udev->speed; + u32 speed = SPEED_UNKNOWN; + + ethtool_link_ksettings_zero_link_mode(elk, supported); + ethtool_link_ksettings_add_link_mode(elk, supported, + 100baseT_Full); + ethtool_link_ksettings_add_link_mode(elk, supported, + 1000baseT_Full); + if (usb_speed == USB_SPEED_SUPER) { + ethtool_link_ksettings_add_link_mode(elk, supported, + 2500baseT_Full); + ethtool_link_ksettings_add_link_mode(elk, supported, + 5000baseT_Full); + } + ethtool_link_ksettings_add_link_mode(elk, supported, TP); + ethtool_link_ksettings_add_link_mode(elk, supported, Autoneg); + + elk->base.port = PORT_TP; + elk->base.transceiver = XCVR_INTERNAL; + + elk->base.mdio_support = 0x00; /*Not supported*/ + + if (aqc111_data->autoneg) + linkmode_copy(elk->link_modes.advertising, + elk->link_modes.supported); + else + aqc111_speed_to_link_mode(aqc111_data->advertised_speed, elk); + + elk->base.autoneg = aqc111_data->autoneg; + + switch (aqc111_data->link_speed) { + case AQ_INT_SPEED_5G: + speed = SPEED_5000; + break; + case AQ_INT_SPEED_2_5G: + speed = SPEED_2500; + break; + case AQ_INT_SPEED_1G: + speed = SPEED_1000; + break; + case AQ_INT_SPEED_100M: + speed = SPEED_100; + break; + } + elk->base.duplex = DUPLEX_FULL; + elk->base.speed = speed; + + return 0; +} + +static void aqc111_set_phy_speed(struct usbnet *dev, u8 autoneg, u16 speed) +{ + struct aqc111_data *aqc111_data = dev->driver_priv; + + aqc111_data->phy_cfg &= ~AQ_ADV_MASK; + aqc111_data->phy_cfg |= AQ_PAUSE; + aqc111_data->phy_cfg |= AQ_ASYM_PAUSE; + aqc111_data->phy_cfg |= AQ_DOWNSHIFT; + aqc111_data->phy_cfg &= ~AQ_DSH_RETRIES_MASK; + aqc111_data->phy_cfg |= (3 << AQ_DSH_RETRIES_SHIFT) & + AQ_DSH_RETRIES_MASK; + + if (autoneg == AUTONEG_ENABLE) { + switch (speed) { + case SPEED_5000: + aqc111_data->phy_cfg |= AQ_ADV_5G; + /* fall-through */ + case SPEED_2500: + aqc111_data->phy_cfg |= AQ_ADV_2G5; + /* fall-through */ + case SPEED_1000: + aqc111_data->phy_cfg |= AQ_ADV_1G; + /* fall-through */ + case SPEED_100: + aqc111_data->phy_cfg |= AQ_ADV_100M; + /* fall-through */ + } + } else { + switch (speed) { + case SPEED_5000: + aqc111_data->phy_cfg |= AQ_ADV_5G; + break; + case SPEED_2500: + aqc111_data->phy_cfg |= AQ_ADV_2G5; + break; + case SPEED_1000: + aqc111_data->phy_cfg |= AQ_ADV_1G; + break; + case SPEED_100: + aqc111_data->phy_cfg |= AQ_ADV_100M; + break; + } + } + + aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0, &aqc111_data->phy_cfg); +} + +static int aqc111_set_link_ksettings(struct net_device *net, + const struct ethtool_link_ksettings *elk) +{ + struct usbnet *dev = netdev_priv(net); + struct aqc111_data *aqc111_data = dev->driver_priv; + enum usb_device_speed usb_speed = dev->udev->speed; + u8 autoneg = elk->base.autoneg; + u32 speed = elk->base.speed; + + if (autoneg == AUTONEG_ENABLE) { + if (aqc111_data->autoneg != AUTONEG_ENABLE) { + aqc111_data->autoneg = AUTONEG_ENABLE; + aqc111_data->advertised_speed = + (usb_speed == USB_SPEED_SUPER) ? + SPEED_5000 : SPEED_1000; + aqc111_set_phy_speed(dev, aqc111_data->autoneg, + aqc111_data->advertised_speed); + } + } else { + if (speed != SPEED_100 && + speed != SPEED_1000 && + speed != SPEED_2500 && + speed != SPEED_5000 && + speed != SPEED_UNKNOWN) + return -EINVAL; + + if (elk->base.duplex != DUPLEX_FULL) + return -EINVAL; + + if (usb_speed != USB_SPEED_SUPER && speed > SPEED_1000) + return -EINVAL; + + aqc111_data->autoneg = AUTONEG_DISABLE; + if (speed != SPEED_UNKNOWN) + aqc111_data->advertised_speed = speed; + + aqc111_set_phy_speed(dev, aqc111_data->autoneg, + aqc111_data->advertised_speed); + } + + return 0; +} + +static const struct ethtool_ops aqc111_ethtool_ops = { + .get_drvinfo = aqc111_get_drvinfo, + .get_wol = aqc111_get_wol, + .set_wol = aqc111_set_wol, + .get_msglevel = usbnet_get_msglevel, + .set_msglevel = usbnet_set_msglevel, + .get_link = ethtool_op_get_link, + .get_link_ksettings = aqc111_get_link_ksettings, + .set_link_ksettings = aqc111_set_link_ksettings +}; + +static int aqc111_change_mtu(struct net_device *net, int new_mtu) +{ + struct usbnet *dev = netdev_priv(net); + u16 reg16 = 0; + u8 buf[5]; + + net->mtu = new_mtu; + dev->hard_mtu = net->mtu + net->hard_header_len; + + aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, + 2, ®16); + if (net->mtu > 1500) + reg16 |= SFR_MEDIUM_JUMBO_EN; + else + reg16 &= ~SFR_MEDIUM_JUMBO_EN; + + aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, + 2, ®16); + + if (dev->net->mtu > 12500 && dev->net->mtu <= 16334) { + memcpy(buf, &AQC111_BULKIN_SIZE[2], 5); + /* RX bulk configuration */ + aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_RX_BULKIN_QCTRL, + 5, 5, buf); + } + + /* Set high low water level */ + if (dev->net->mtu <= 4500) + reg16 = 0x0810; + else if (dev->net->mtu <= 9500) + reg16 = 0x1020; + else if (dev->net->mtu <= 12500) + reg16 = 0x1420; + else if (dev->net->mtu <= 16334) + reg16 = 0x1A20; + + aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_PAUSE_WATERLVL_LOW, + 2, ®16); + + return 0; +} + +static int aqc111_set_mac_addr(struct net_device *net, void *p) +{ + struct usbnet *dev = netdev_priv(net); + int ret = 0; + + ret = eth_mac_addr(net, p); + if (ret < 0) + return ret; + + /* Set the MAC address */ + return aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_NODE_ID, ETH_ALEN, + ETH_ALEN, net->dev_addr); +} + +static int aqc111_vlan_rx_kill_vid(struct net_device *net, + __be16 proto, u16 vid) +{ + struct usbnet *dev = netdev_priv(net); + u8 vlan_ctrl = 0; + u16 reg16 = 0; + u8 reg8 = 0; + + aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, ®8); + vlan_ctrl = reg8; + + /* Address */ + reg8 = (vid / 16); + aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_ADDRESS, 1, 1, ®8); + /* Data */ + reg8 = vlan_ctrl | SFR_VLAN_CONTROL_RD; + aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, ®8); + aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_DATA0, 2, ®16); + reg16 &= ~(1 << (vid % 16)); + aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_DATA0, 2, ®16); + reg8 = vlan_ctrl | SFR_VLAN_CONTROL_WE; + aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, ®8); + + return 0; +} + +static int aqc111_vlan_rx_add_vid(struct net_device *net, __be16 proto, u16 vid) +{ + struct usbnet *dev = netdev_priv(net); + u8 vlan_ctrl = 0; + u16 reg16 = 0; + u8 reg8 = 0; + + aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, ®8); + vlan_ctrl = reg8; + + /* Address */ + reg8 = (vid / 16); + aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_ADDRESS, 1, 1, ®8); + /* Data */ + reg8 = vlan_ctrl | SFR_VLAN_CONTROL_RD; + aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, ®8); + aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_DATA0, 2, ®16); + reg16 |= (1 << (vid % 16)); + aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_DATA0, 2, ®16); + reg8 = vlan_ctrl | SFR_VLAN_CONTROL_WE; + aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, 1, 1, ®8); + + return 0; +} + +static void aqc111_set_rx_mode(struct net_device *net) +{ + struct usbnet *dev = netdev_priv(net); + struct aqc111_data *aqc111_data = dev->driver_priv; + int mc_count = 0; + + mc_count = netdev_mc_count(net); + + aqc111_data->rxctl &= ~(SFR_RX_CTL_PRO | SFR_RX_CTL_AMALL | + SFR_RX_CTL_AM); + + if (net->flags & IFF_PROMISC) { + aqc111_data->rxctl |= SFR_RX_CTL_PRO; + } else if ((net->flags & IFF_ALLMULTI) || mc_count > AQ_MAX_MCAST) { + aqc111_data->rxctl |= SFR_RX_CTL_AMALL; + } else if (!netdev_mc_empty(net)) { + u8 m_filter[AQ_MCAST_FILTER_SIZE] = { 0 }; + struct netdev_hw_addr *ha = NULL; + u32 crc_bits = 0; + + netdev_for_each_mc_addr(ha, net) { + crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26; + m_filter[crc_bits >> 3] |= BIT(crc_bits & 7); + } + + aqc111_write_cmd_async(dev, AQ_ACCESS_MAC, + SFR_MULTI_FILTER_ARRY, + AQ_MCAST_FILTER_SIZE, + AQ_MCAST_FILTER_SIZE, m_filter); + + aqc111_data->rxctl |= SFR_RX_CTL_AM; + } + + aqc111_write16_cmd_async(dev, AQ_ACCESS_MAC, SFR_RX_CTL, + 2, &aqc111_data->rxctl); +} + +static int aqc111_set_features(struct net_device *net, + netdev_features_t features) +{ + struct usbnet *dev = netdev_priv(net); + struct aqc111_data *aqc111_data = dev->driver_priv; + netdev_features_t changed = net->features ^ features; + u16 reg16 = 0; + u8 reg8 = 0; + + if (changed & NETIF_F_IP_CSUM) { + aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_TXCOE_CTL, 1, 1, ®8); + reg8 ^= SFR_TXCOE_TCP | SFR_TXCOE_UDP; + aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_TXCOE_CTL, + 1, 1, ®8); + } + + if (changed & NETIF_F_IPV6_CSUM) { + aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_TXCOE_CTL, 1, 1, ®8); + reg8 ^= SFR_TXCOE_TCPV6 | SFR_TXCOE_UDPV6; + aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_TXCOE_CTL, + 1, 1, ®8); + } + + if (changed & NETIF_F_RXCSUM) { + aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_RXCOE_CTL, 1, 1, ®8); + if (features & NETIF_F_RXCSUM) { + aqc111_data->rx_checksum = 1; + reg8 &= ~(SFR_RXCOE_IP | SFR_RXCOE_TCP | SFR_RXCOE_UDP | + SFR_RXCOE_TCPV6 | SFR_RXCOE_UDPV6); + } else { + aqc111_data->rx_checksum = 0; + reg8 |= SFR_RXCOE_IP | SFR_RXCOE_TCP | SFR_RXCOE_UDP | + SFR_RXCOE_TCPV6 | SFR_RXCOE_UDPV6; + } + + aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_RXCOE_CTL, + 1, 1, ®8); + } + if (changed & NETIF_F_HW_VLAN_CTAG_FILTER) { + if (features & NETIF_F_HW_VLAN_CTAG_FILTER) { + u16 i = 0; + + for (i = 0; i < 256; i++) { + /* Address */ + reg8 = i; + aqc111_write_cmd(dev, AQ_ACCESS_MAC, + SFR_VLAN_ID_ADDRESS, + 1, 1, ®8); + /* Data */ + aqc111_write16_cmd(dev, AQ_ACCESS_MAC, + SFR_VLAN_ID_DATA0, + 2, ®16); + reg8 = SFR_VLAN_CONTROL_WE; + aqc111_write_cmd(dev, AQ_ACCESS_MAC, + SFR_VLAN_ID_CONTROL, + 1, 1, ®8); + } + aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, + 1, 1, ®8); + reg8 |= SFR_VLAN_CONTROL_VFE; + aqc111_write_cmd(dev, AQ_ACCESS_MAC, + SFR_VLAN_ID_CONTROL, 1, 1, ®8); + } else { + aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, + 1, 1, ®8); + reg8 &= ~SFR_VLAN_CONTROL_VFE; + aqc111_write_cmd(dev, AQ_ACCESS_MAC, + SFR_VLAN_ID_CONTROL, 1, 1, ®8); + } + } + + return 0; +} + +static const struct net_device_ops aqc111_netdev_ops = { + .ndo_open = usbnet_open, + .ndo_stop = usbnet_stop, + .ndo_start_xmit = usbnet_start_xmit, + .ndo_tx_timeout = usbnet_tx_timeout, + .ndo_get_stats64 = usbnet_get_stats64, + .ndo_change_mtu = aqc111_change_mtu, + .ndo_set_mac_address = aqc111_set_mac_addr, + .ndo_validate_addr = eth_validate_addr, + .ndo_vlan_rx_add_vid = aqc111_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = aqc111_vlan_rx_kill_vid, + .ndo_set_rx_mode = aqc111_set_rx_mode, + .ndo_set_features = aqc111_set_features, +}; + +static int aqc111_read_perm_mac(struct usbnet *dev) +{ + u8 buf[ETH_ALEN]; + int ret; + + ret = aqc111_read_cmd(dev, AQ_FLASH_PARAMETERS, 0, 0, ETH_ALEN, buf); + if (ret < 0) + goto out; + + ether_addr_copy(dev->net->perm_addr, buf); + + return 0; +out: + return ret; +} + +static void aqc111_read_fw_version(struct usbnet *dev, + struct aqc111_data *aqc111_data) +{ + aqc111_read_cmd(dev, AQ_ACCESS_MAC, AQ_FW_VER_MAJOR, + 1, 1, &aqc111_data->fw_ver.major); + aqc111_read_cmd(dev, AQ_ACCESS_MAC, AQ_FW_VER_MINOR, + 1, 1, &aqc111_data->fw_ver.minor); + aqc111_read_cmd(dev, AQ_ACCESS_MAC, AQ_FW_VER_REV, + 1, 1, &aqc111_data->fw_ver.rev); + + if (aqc111_data->fw_ver.major & 0x80) + aqc111_data->fw_ver.major &= ~0x80; +} + +static int aqc111_bind(struct usbnet *dev, struct usb_interface *intf) +{ + struct usb_device *udev = interface_to_usbdev(intf); + enum usb_device_speed usb_speed = udev->speed; + struct aqc111_data *aqc111_data; + int ret; + + /* Check if vendor configuration */ + if (udev->actconfig->desc.bConfigurationValue != 1) { + usb_driver_set_configuration(udev, 1); + return -ENODEV; + } + + usb_reset_configuration(dev->udev); + + ret = usbnet_get_endpoints(dev, intf); + if (ret < 0) { + netdev_dbg(dev->net, "usbnet_get_endpoints failed"); + return ret; + } + + aqc111_data = kzalloc(sizeof(*aqc111_data), GFP_KERNEL); + if (!aqc111_data) + return -ENOMEM; + + /* store aqc111_data pointer in device data field */ + dev->driver_priv = aqc111_data; + + /* Init the MAC address */ + ret = aqc111_read_perm_mac(dev); + if (ret) + goto out; + + ether_addr_copy(dev->net->dev_addr, dev->net->perm_addr); + + /* Set Rx urb size */ + dev->rx_urb_size = URB_SIZE; + + /* Set TX needed headroom & tailroom */ + dev->net->needed_headroom += sizeof(u64); + dev->net->needed_tailroom += sizeof(u64); + + dev->net->max_mtu = 16334; + + dev->net->netdev_ops = &aqc111_netdev_ops; + dev->net->ethtool_ops = &aqc111_ethtool_ops; + + if (usb_device_no_sg_constraint(dev->udev)) + dev->can_dma_sg = 1; + + dev->net->hw_features |= AQ_SUPPORT_HW_FEATURE; + dev->net->features |= AQ_SUPPORT_FEATURE; + dev->net->vlan_features |= AQ_SUPPORT_VLAN_FEATURE; + + netif_set_gso_max_size(dev->net, 65535); + + aqc111_read_fw_version(dev, aqc111_data); + aqc111_data->autoneg = AUTONEG_ENABLE; + aqc111_data->advertised_speed = (usb_speed == USB_SPEED_SUPER) ? + SPEED_5000 : SPEED_1000; + + return 0; + +out: + kfree(aqc111_data); + return ret; +} + +static void aqc111_unbind(struct usbnet *dev, struct usb_interface *intf) +{ + struct aqc111_data *aqc111_data = dev->driver_priv; + u16 reg16; + + /* Force bz */ + reg16 = SFR_PHYPWR_RSTCTL_BZ; + aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_PHYPWR_RSTCTL, + 2, ®16); + reg16 = 0; + aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_PHYPWR_RSTCTL, + 2, ®16); + + /* Power down ethernet PHY */ + aqc111_data->phy_cfg &= ~AQ_ADV_MASK; + aqc111_data->phy_cfg |= AQ_LOW_POWER; + aqc111_data->phy_cfg &= ~AQ_PHY_POWER_EN; + aqc111_write32_cmd_nopm(dev, AQ_PHY_OPS, 0, 0, + &aqc111_data->phy_cfg); + + kfree(aqc111_data); +} + +static void aqc111_status(struct usbnet *dev, struct urb *urb) +{ + struct aqc111_data *aqc111_data = dev->driver_priv; + u64 *event_data = NULL; + int link = 0; + + if (urb->actual_length < sizeof(*event_data)) + return; + + event_data = urb->transfer_buffer; + le64_to_cpus(event_data); + + if (*event_data & AQ_LS_MASK) + link = 1; + else + link = 0; + + aqc111_data->link_speed = (*event_data & AQ_SPEED_MASK) >> + AQ_SPEED_SHIFT; + aqc111_data->link = link; + + if (netif_carrier_ok(dev->net) != link) + usbnet_defer_kevent(dev, EVENT_LINK_RESET); +} + +static void aqc111_configure_rx(struct usbnet *dev, + struct aqc111_data *aqc111_data) +{ + enum usb_device_speed usb_speed = dev->udev->speed; + u16 link_speed = 0, usb_host = 0; + u8 buf[5] = { 0 }; + u8 queue_num = 0; + u16 reg16 = 0; + u8 reg8 = 0; + + buf[0] = 0x00; + buf[1] = 0xF8; + buf[2] = 0x07; + switch (aqc111_data->link_speed) { + case AQ_INT_SPEED_5G: + link_speed = 5000; + reg8 = 0x05; + reg16 = 0x001F; + break; + case AQ_INT_SPEED_2_5G: + link_speed = 2500; + reg16 = 0x003F; + break; + case AQ_INT_SPEED_1G: + link_speed = 1000; + reg16 = 0x009F; + break; + case AQ_INT_SPEED_100M: + link_speed = 100; + queue_num = 1; + reg16 = 0x063F; + buf[1] = 0xFB; + buf[2] = 0x4; + break; + } + + aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_INTER_PACKET_GAP_0, + 1, 1, ®8); + + aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_TX_PAUSE_RESEND_T, 3, 3, buf); + + switch (usb_speed) { + case USB_SPEED_SUPER: + usb_host = 3; + break; + case USB_SPEED_HIGH: + usb_host = 2; + break; + case USB_SPEED_FULL: + case USB_SPEED_LOW: + usb_host = 1; + queue_num = 0; + break; + default: + usb_host = 0; + break; + } + + if (dev->net->mtu > 12500 && dev->net->mtu <= 16334) + queue_num = 2; /* For Jumbo packet 16KB */ + + memcpy(buf, &AQC111_BULKIN_SIZE[queue_num], 5); + /* RX bulk configuration */ + aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_RX_BULKIN_QCTRL, 5, 5, buf); + + /* Set high low water level */ + if (dev->net->mtu <= 4500) + reg16 = 0x0810; + else if (dev->net->mtu <= 9500) + reg16 = 0x1020; + else if (dev->net->mtu <= 12500) + reg16 = 0x1420; + else if (dev->net->mtu <= 16334) + reg16 = 0x1A20; + + aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_PAUSE_WATERLVL_LOW, + 2, ®16); + netdev_info(dev->net, "Link Speed %d, USB %d", link_speed, usb_host); +} + +static void aqc111_configure_csum_offload(struct usbnet *dev) +{ + u8 reg8 = 0; + + if (dev->net->features & NETIF_F_RXCSUM) { + reg8 |= SFR_RXCOE_IP | SFR_RXCOE_TCP | SFR_RXCOE_UDP | + SFR_RXCOE_TCPV6 | SFR_RXCOE_UDPV6; + } + aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_RXCOE_CTL, 1, 1, ®8); + + reg8 = 0; + if (dev->net->features & NETIF_F_IP_CSUM) + reg8 |= SFR_TXCOE_IP | SFR_TXCOE_TCP | SFR_TXCOE_UDP; + + if (dev->net->features & NETIF_F_IPV6_CSUM) + reg8 |= SFR_TXCOE_TCPV6 | SFR_TXCOE_UDPV6; + + aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_TXCOE_CTL, 1, 1, ®8); +} + +static int aqc111_link_reset(struct usbnet *dev) +{ + struct aqc111_data *aqc111_data = dev->driver_priv; + u16 reg16 = 0; + u8 reg8 = 0; + + if (aqc111_data->link == 1) { /* Link up */ + aqc111_configure_rx(dev, aqc111_data); + + /* Vlan Tag Filter */ + reg8 = SFR_VLAN_CONTROL_VSO; + if (dev->net->features & NETIF_F_HW_VLAN_CTAG_FILTER) + reg8 |= SFR_VLAN_CONTROL_VFE; + + aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_VLAN_ID_CONTROL, + 1, 1, ®8); + + reg8 = 0x0; + aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BMRX_DMA_CONTROL, + 1, 1, ®8); + + aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BMTX_DMA_CONTROL, + 1, 1, ®8); + + aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_ARC_CTRL, 1, 1, ®8); + + reg16 = SFR_RX_CTL_IPE | SFR_RX_CTL_AB; + aqc111_data->rxctl = reg16; + aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, ®16); + + reg8 = SFR_RX_PATH_READY; + aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_ETH_MAC_PATH, + 1, 1, ®8); + + reg8 = SFR_BULK_OUT_EFF_EN; + aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BULK_OUT_CTRL, + 1, 1, ®8); + + reg16 = 0; + aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, + 2, ®16); + + reg16 = SFR_MEDIUM_XGMIIMODE | SFR_MEDIUM_FULL_DUPLEX; + aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, + 2, ®16); + + aqc111_configure_csum_offload(dev); + + aqc111_set_rx_mode(dev->net); + + aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, + 2, ®16); + + if (dev->net->mtu > 1500) + reg16 |= SFR_MEDIUM_JUMBO_EN; + + reg16 |= SFR_MEDIUM_RECEIVE_EN | SFR_MEDIUM_RXFLOW_CTRLEN | + SFR_MEDIUM_TXFLOW_CTRLEN; + aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, + 2, ®16); + + aqc111_data->rxctl |= SFR_RX_CTL_START; + aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_RX_CTL, + 2, &aqc111_data->rxctl); + + netif_carrier_on(dev->net); + } else { + aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, + 2, ®16); + reg16 &= ~SFR_MEDIUM_RECEIVE_EN; + aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, + 2, ®16); + + aqc111_data->rxctl &= ~SFR_RX_CTL_START; + aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_RX_CTL, + 2, &aqc111_data->rxctl); + + reg8 = SFR_BULK_OUT_FLUSH_EN | SFR_BULK_OUT_EFF_EN; + aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BULK_OUT_CTRL, + 1, 1, ®8); + reg8 = SFR_BULK_OUT_EFF_EN; + aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BULK_OUT_CTRL, + 1, 1, ®8); + + netif_carrier_off(dev->net); + } + return 0; +} + +static int aqc111_reset(struct usbnet *dev) +{ + struct aqc111_data *aqc111_data = dev->driver_priv; + u8 reg8 = 0; + + dev->rx_urb_size = URB_SIZE; + + if (usb_device_no_sg_constraint(dev->udev)) + dev->can_dma_sg = 1; + + dev->net->hw_features |= AQ_SUPPORT_HW_FEATURE; + dev->net->features |= AQ_SUPPORT_FEATURE; + dev->net->vlan_features |= AQ_SUPPORT_VLAN_FEATURE; + + /* Power up ethernet PHY */ + aqc111_data->phy_cfg = AQ_PHY_POWER_EN; + aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0, + &aqc111_data->phy_cfg); + + /* Set the MAC address */ + aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_NODE_ID, ETH_ALEN, + ETH_ALEN, dev->net->dev_addr); + + reg8 = 0xFF; + aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BM_INT_MASK, 1, 1, ®8); + + reg8 = 0x0; + aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_SWP_CTRL, 1, 1, ®8); + + aqc111_read_cmd(dev, AQ_ACCESS_MAC, SFR_MONITOR_MODE, 1, 1, ®8); + reg8 &= ~(SFR_MONITOR_MODE_EPHYRW | SFR_MONITOR_MODE_RWLC | + SFR_MONITOR_MODE_RWMP | SFR_MONITOR_MODE_RWWF | + SFR_MONITOR_MODE_RW_FLAG); + aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_MONITOR_MODE, 1, 1, ®8); + + netif_carrier_off(dev->net); + + /* Phy advertise */ + aqc111_set_phy_speed(dev, aqc111_data->autoneg, + aqc111_data->advertised_speed); + + return 0; +} + +static int aqc111_stop(struct usbnet *dev) +{ + struct aqc111_data *aqc111_data = dev->driver_priv; + u16 reg16 = 0; + + aqc111_read16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, + 2, ®16); + reg16 &= ~SFR_MEDIUM_RECEIVE_EN; + aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, + 2, ®16); + reg16 = 0; + aqc111_write16_cmd(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, ®16); + + /* Put PHY to low power*/ + aqc111_data->phy_cfg |= AQ_LOW_POWER; + aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0, + &aqc111_data->phy_cfg); + + netif_carrier_off(dev->net); + + return 0; +} + +static void aqc111_rx_checksum(struct sk_buff *skb, u64 pkt_desc) +{ + u32 pkt_type = 0; + + skb->ip_summed = CHECKSUM_NONE; + /* checksum error bit is set */ + if (pkt_desc & AQ_RX_PD_L4_ERR || pkt_desc & AQ_RX_PD_L3_ERR) + return; + + pkt_type = pkt_desc & AQ_RX_PD_L4_TYPE_MASK; + /* It must be a TCP or UDP packet with a valid checksum */ + if (pkt_type == AQ_RX_PD_L4_TCP || pkt_type == AQ_RX_PD_L4_UDP) + skb->ip_summed = CHECKSUM_UNNECESSARY; +} + +static int aqc111_rx_fixup(struct usbnet *dev, struct sk_buff *skb) +{ + struct aqc111_data *aqc111_data = dev->driver_priv; + struct sk_buff *new_skb = NULL; + u32 pkt_total_offset = 0; + u64 *pkt_desc_ptr = NULL; + u32 start_of_descs = 0; + u32 desc_offset = 0; /*RX Header Offset*/ + u16 pkt_count = 0; + u64 desc_hdr = 0; + u16 vlan_tag = 0; + u32 skb_len = 0; + + if (!skb) + goto err; + + if (skb->len == 0) + goto err; + + skb_len = skb->len; + /* RX Descriptor Header */ + skb_trim(skb, skb->len - sizeof(desc_hdr)); + desc_hdr = le64_to_cpup((u64 *)skb_tail_pointer(skb)); + + /* Check these packets */ + desc_offset = (desc_hdr & AQ_RX_DH_DESC_OFFSET_MASK) >> + AQ_RX_DH_DESC_OFFSET_SHIFT; + pkt_count = desc_hdr & AQ_RX_DH_PKT_CNT_MASK; + start_of_descs = skb_len - ((pkt_count + 1) * sizeof(desc_hdr)); + + /* self check descs position */ + if (start_of_descs != desc_offset) + goto err; + + /* self check desc_offset from header*/ + if (desc_offset >= skb_len) + goto err; + + if (pkt_count == 0) + goto err; + + /* Get the first RX packet descriptor */ + pkt_desc_ptr = (u64 *)(skb->data + desc_offset); + + while (pkt_count--) { + u64 pkt_desc = le64_to_cpup(pkt_desc_ptr); + u32 pkt_len_with_padd = 0; + u32 pkt_len = 0; + + pkt_len = (u32)((pkt_desc & AQ_RX_PD_LEN_MASK) >> + AQ_RX_PD_LEN_SHIFT); + pkt_len_with_padd = ((pkt_len + 7) & 0x7FFF8); + + pkt_total_offset += pkt_len_with_padd; + if (pkt_total_offset > desc_offset || + (pkt_count == 0 && pkt_total_offset != desc_offset)) { + goto err; + } + + if (pkt_desc & AQ_RX_PD_DROP || + !(pkt_desc & AQ_RX_PD_RX_OK) || + pkt_len > (dev->hard_mtu + AQ_RX_HW_PAD)) { + skb_pull(skb, pkt_len_with_padd); + /* Next RX Packet Descriptor */ + pkt_desc_ptr++; + continue; + } + + /* Clone SKB */ + new_skb = skb_clone(skb, GFP_ATOMIC); + + if (!new_skb) + goto err; + + new_skb->len = pkt_len; + skb_pull(new_skb, AQ_RX_HW_PAD); + skb_set_tail_pointer(new_skb, new_skb->len); + + new_skb->truesize = SKB_TRUESIZE(new_skb->len); + if (aqc111_data->rx_checksum) + aqc111_rx_checksum(new_skb, pkt_desc); + + if (pkt_desc & AQ_RX_PD_VLAN) { + vlan_tag = pkt_desc >> AQ_RX_PD_VLAN_SHIFT; + __vlan_hwaccel_put_tag(new_skb, htons(ETH_P_8021Q), + vlan_tag & VLAN_VID_MASK); + } + + usbnet_skb_return(dev, new_skb); + if (pkt_count == 0) + break; + + skb_pull(skb, pkt_len_with_padd); + + /* Next RX Packet Header */ + pkt_desc_ptr++; + + new_skb = NULL; + } + + return 1; + +err: + return 0; +} + +static struct sk_buff *aqc111_tx_fixup(struct usbnet *dev, struct sk_buff *skb, + gfp_t flags) +{ + int frame_size = dev->maxpacket; + struct sk_buff *new_skb = NULL; + u64 *tx_desc_ptr = NULL; + int padding_size = 0; + int headroom = 0; + int tailroom = 0; + u64 tx_desc = 0; + u16 tci = 0; + + /*Length of actual data*/ + tx_desc |= skb->len & AQ_TX_DESC_LEN_MASK; + + /* TSO MSS */ + tx_desc |= ((u64)(skb_shinfo(skb)->gso_size & AQ_TX_DESC_MSS_MASK)) << + AQ_TX_DESC_MSS_SHIFT; + + headroom = (skb->len + sizeof(tx_desc)) % 8; + if (headroom != 0) + padding_size = 8 - headroom; + + if (((skb->len + sizeof(tx_desc) + padding_size) % frame_size) == 0) { + padding_size += 8; + tx_desc |= AQ_TX_DESC_DROP_PADD; + } + + /* Vlan Tag */ + if (vlan_get_tag(skb, &tci) >= 0) { + tx_desc |= AQ_TX_DESC_VLAN; + tx_desc |= ((u64)tci & AQ_TX_DESC_VLAN_MASK) << + AQ_TX_DESC_VLAN_SHIFT; + } + + if (!dev->can_dma_sg && (dev->net->features & NETIF_F_SG) && + skb_linearize(skb)) + return NULL; + + headroom = skb_headroom(skb); + tailroom = skb_tailroom(skb); + + if (!(headroom >= sizeof(tx_desc) && tailroom >= padding_size)) { + new_skb = skb_copy_expand(skb, sizeof(tx_desc), + padding_size, flags); + dev_kfree_skb_any(skb); + skb = new_skb; + if (!skb) + return NULL; + } + if (padding_size != 0) + skb_put_zero(skb, padding_size); + /* Copy TX header */ + tx_desc_ptr = skb_push(skb, sizeof(tx_desc)); + *tx_desc_ptr = cpu_to_le64(tx_desc); + + usbnet_set_skb_tx_stats(skb, 1, 0); + + return skb; +} + +static const struct driver_info aqc111_info = { + .description = "Aquantia AQtion USB to 5GbE Controller", + .bind = aqc111_bind, + .unbind = aqc111_unbind, + .status = aqc111_status, + .link_reset = aqc111_link_reset, + .reset = aqc111_reset, + .stop = aqc111_stop, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | + FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET, + .rx_fixup = aqc111_rx_fixup, + .tx_fixup = aqc111_tx_fixup, +}; + +#define ASIX111_DESC \ +"ASIX USB 3.1 Gen1 to 5G Multi-Gigabit Ethernet Adapter" + +static const struct driver_info asix111_info = { + .description = ASIX111_DESC, + .bind = aqc111_bind, + .unbind = aqc111_unbind, + .status = aqc111_status, + .link_reset = aqc111_link_reset, + .reset = aqc111_reset, + .stop = aqc111_stop, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | + FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET, + .rx_fixup = aqc111_rx_fixup, + .tx_fixup = aqc111_tx_fixup, +}; + +#undef ASIX111_DESC + +#define ASIX112_DESC \ +"ASIX USB 3.1 Gen1 to 2.5G Multi-Gigabit Ethernet Adapter" + +static const struct driver_info asix112_info = { + .description = ASIX112_DESC, + .bind = aqc111_bind, + .unbind = aqc111_unbind, + .status = aqc111_status, + .link_reset = aqc111_link_reset, + .reset = aqc111_reset, + .stop = aqc111_stop, + .flags = FLAG_ETHER | FLAG_FRAMING_AX | + FLAG_AVOID_UNLINK_URBS | FLAG_MULTI_PACKET, + .rx_fixup = aqc111_rx_fixup, + .tx_fixup = aqc111_tx_fixup, +}; + +#undef ASIX112_DESC + +static int aqc111_suspend(struct usb_interface *intf, pm_message_t message) +{ + struct usbnet *dev = usb_get_intfdata(intf); + struct aqc111_data *aqc111_data = dev->driver_priv; + u16 temp_rx_ctrl = 0x00; + u16 reg16; + u8 reg8; + + usbnet_suspend(intf, message); + + aqc111_read16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, ®16); + temp_rx_ctrl = reg16; + /* Stop RX operations*/ + reg16 &= ~SFR_RX_CTL_START; + aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, ®16); + /* Force bz */ + aqc111_read16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_PHYPWR_RSTCTL, + 2, ®16); + reg16 |= SFR_PHYPWR_RSTCTL_BZ; + aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_PHYPWR_RSTCTL, + 2, ®16); + + reg8 = SFR_BULK_OUT_EFF_EN; + aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_BULK_OUT_CTRL, + 1, 1, ®8); + + temp_rx_ctrl &= ~(SFR_RX_CTL_START | SFR_RX_CTL_RF_WAK | + SFR_RX_CTL_AP | SFR_RX_CTL_AM); + aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL, + 2, &temp_rx_ctrl); + + reg8 = 0x00; + aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_ETH_MAC_PATH, + 1, 1, ®8); + + if (aqc111_data->wol_flags) { + struct aqc111_wol_cfg wol_cfg; + + memset(&wol_cfg, 0, sizeof(struct aqc111_wol_cfg)); + + aqc111_data->phy_cfg |= AQ_WOL; + ether_addr_copy(wol_cfg.hw_addr, dev->net->dev_addr); + wol_cfg.flags = aqc111_data->wol_flags; + + temp_rx_ctrl |= (SFR_RX_CTL_AB | SFR_RX_CTL_START); + aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL, + 2, &temp_rx_ctrl); + reg8 = 0x00; + aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_BM_INT_MASK, + 1, 1, ®8); + reg8 = SFR_BMRX_DMA_EN; + aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_BMRX_DMA_CONTROL, + 1, 1, ®8); + reg8 = SFR_RX_PATH_READY; + aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_ETH_MAC_PATH, + 1, 1, ®8); + reg8 = 0x07; + aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_BULKIN_QCTRL, + 1, 1, ®8); + reg8 = 0x00; + aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, + SFR_RX_BULKIN_QTIMR_LOW, 1, 1, ®8); + aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, + SFR_RX_BULKIN_QTIMR_HIGH, 1, 1, ®8); + reg8 = 0xFF; + aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_BULKIN_QSIZE, + 1, 1, ®8); + aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_BULKIN_QIFG, + 1, 1, ®8); + + aqc111_read16_cmd_nopm(dev, AQ_ACCESS_MAC, + SFR_MEDIUM_STATUS_MODE, 2, ®16); + reg16 |= SFR_MEDIUM_RECEIVE_EN; + aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, + SFR_MEDIUM_STATUS_MODE, 2, ®16); + + aqc111_write_cmd(dev, AQ_WOL_CFG, 0, 0, + WOL_CFG_SIZE, &wol_cfg); + aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0, + &aqc111_data->phy_cfg); + } else { + aqc111_data->phy_cfg |= AQ_LOW_POWER; + aqc111_write32_cmd(dev, AQ_PHY_OPS, 0, 0, + &aqc111_data->phy_cfg); + + /* Disable RX path */ + aqc111_read16_cmd_nopm(dev, AQ_ACCESS_MAC, + SFR_MEDIUM_STATUS_MODE, 2, ®16); + reg16 &= ~SFR_MEDIUM_RECEIVE_EN; + aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, + SFR_MEDIUM_STATUS_MODE, 2, ®16); + } + + return 0; +} + +static int aqc111_resume(struct usb_interface *intf) +{ + struct usbnet *dev = usb_get_intfdata(intf); + struct aqc111_data *aqc111_data = dev->driver_priv; + u16 reg16; + u8 reg8; + + netif_carrier_off(dev->net); + + /* Power up ethernet PHY */ + aqc111_data->phy_cfg |= AQ_PHY_POWER_EN; + aqc111_data->phy_cfg &= ~AQ_LOW_POWER; + aqc111_data->phy_cfg &= ~AQ_WOL; + + reg8 = 0xFF; + aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_BM_INT_MASK, + 1, 1, ®8); + /* Configure RX control register => start operation */ + reg16 = aqc111_data->rxctl; + reg16 &= ~SFR_RX_CTL_START; + aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, ®16); + + reg16 |= SFR_RX_CTL_START; + aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_RX_CTL, 2, ®16); + + aqc111_set_phy_speed(dev, aqc111_data->autoneg, + aqc111_data->advertised_speed); + + aqc111_read16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, + 2, ®16); + reg16 |= SFR_MEDIUM_RECEIVE_EN; + aqc111_write16_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_MEDIUM_STATUS_MODE, + 2, ®16); + reg8 = SFR_RX_PATH_READY; + aqc111_write_cmd_nopm(dev, AQ_ACCESS_MAC, SFR_ETH_MAC_PATH, + 1, 1, ®8); + reg8 = 0x0; + aqc111_write_cmd(dev, AQ_ACCESS_MAC, SFR_BMRX_DMA_CONTROL, 1, 1, ®8); + + return usbnet_resume(intf); +} + +#define AQC111_USB_ETH_DEV(vid, pid, table) \ + USB_DEVICE_INTERFACE_CLASS((vid), (pid), USB_CLASS_VENDOR_SPEC), \ + .driver_info = (unsigned long)&(table) \ +}, \ +{ \ + USB_DEVICE_AND_INTERFACE_INFO((vid), (pid), \ + USB_CLASS_COMM, \ + USB_CDC_SUBCLASS_ETHERNET, \ + USB_CDC_PROTO_NONE), \ + .driver_info = (unsigned long)&(table), + +static const struct usb_device_id products[] = { + {AQC111_USB_ETH_DEV(0x2eca, 0xc101, aqc111_info)}, + {AQC111_USB_ETH_DEV(0x0b95, 0x2790, asix111_info)}, + {AQC111_USB_ETH_DEV(0x0b95, 0x2791, asix112_info)}, + { },/* END */ +}; +MODULE_DEVICE_TABLE(usb, products); + +static struct usb_driver aq_driver = { + .name = "aqc111", + .id_table = products, + .probe = usbnet_probe, + .suspend = aqc111_suspend, + .resume = aqc111_resume, + .disconnect = usbnet_disconnect, +}; + +module_usb_driver(aq_driver); + +MODULE_DESCRIPTION("Aquantia AQtion USB to 5/2.5GbE Controllers"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/usb/aqc111.h b/drivers/net/usb/aqc111.h new file mode 100644 index 000000000000..4d68b3a6067c --- /dev/null +++ b/drivers/net/usb/aqc111.h @@ -0,0 +1,232 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later + * Aquantia Corp. Aquantia AQtion USB to 5GbE Controller + * Copyright (C) 2003-2005 David Hollis <dhollis@davehollis.com> + * Copyright (C) 2005 Phil Chang <pchang23@sbcglobal.net> + * Copyright (C) 2002-2003 TiVo Inc. + * Copyright (C) 2017-2018 ASIX + * Copyright (C) 2018 Aquantia Corp. + */ + +#ifndef __LINUX_USBNET_AQC111_H +#define __LINUX_USBNET_AQC111_H + +#define URB_SIZE (1024 * 62) + +#define AQ_MCAST_FILTER_SIZE 8 +#define AQ_MAX_MCAST 64 + +#define AQ_ACCESS_MAC 0x01 +#define AQ_FLASH_PARAMETERS 0x20 +#define AQ_PHY_POWER 0x31 +#define AQ_WOL_CFG 0x60 +#define AQ_PHY_OPS 0x61 + +#define AQ_USB_PHY_SET_TIMEOUT 10000 +#define AQ_USB_SET_TIMEOUT 4000 + +/* Feature. ********************************************/ +#define AQ_SUPPORT_FEATURE (NETIF_F_SG | NETIF_F_IP_CSUM |\ + NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |\ + NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_TX |\ + NETIF_F_HW_VLAN_CTAG_RX) + +#define AQ_SUPPORT_HW_FEATURE (NETIF_F_SG | NETIF_F_IP_CSUM |\ + NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |\ + NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_FILTER) + +#define AQ_SUPPORT_VLAN_FEATURE (NETIF_F_SG | NETIF_F_IP_CSUM |\ + NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |\ + NETIF_F_TSO) + +/* SFR Reg. ********************************************/ + +#define SFR_GENERAL_STATUS 0x03 +#define SFR_CHIP_STATUS 0x05 +#define SFR_RX_CTL 0x0B + #define SFR_RX_CTL_TXPADCRC 0x0400 + #define SFR_RX_CTL_IPE 0x0200 + #define SFR_RX_CTL_DROPCRCERR 0x0100 + #define SFR_RX_CTL_START 0x0080 + #define SFR_RX_CTL_RF_WAK 0x0040 + #define SFR_RX_CTL_AP 0x0020 + #define SFR_RX_CTL_AM 0x0010 + #define SFR_RX_CTL_AB 0x0008 + #define SFR_RX_CTL_AMALL 0x0002 + #define SFR_RX_CTL_PRO 0x0001 + #define SFR_RX_CTL_STOP 0x0000 +#define SFR_INTER_PACKET_GAP_0 0x0D +#define SFR_NODE_ID 0x10 +#define SFR_MULTI_FILTER_ARRY 0x16 +#define SFR_MEDIUM_STATUS_MODE 0x22 + #define SFR_MEDIUM_XGMIIMODE 0x0001 + #define SFR_MEDIUM_FULL_DUPLEX 0x0002 + #define SFR_MEDIUM_RXFLOW_CTRLEN 0x0010 + #define SFR_MEDIUM_TXFLOW_CTRLEN 0x0020 + #define SFR_MEDIUM_JUMBO_EN 0x0040 + #define SFR_MEDIUM_RECEIVE_EN 0x0100 +#define SFR_MONITOR_MODE 0x24 + #define SFR_MONITOR_MODE_EPHYRW 0x01 + #define SFR_MONITOR_MODE_RWLC 0x02 + #define SFR_MONITOR_MODE_RWMP 0x04 + #define SFR_MONITOR_MODE_RWWF 0x08 + #define SFR_MONITOR_MODE_RW_FLAG 0x10 + #define SFR_MONITOR_MODE_PMEPOL 0x20 + #define SFR_MONITOR_MODE_PMETYPE 0x40 +#define SFR_PHYPWR_RSTCTL 0x26 + #define SFR_PHYPWR_RSTCTL_BZ 0x0010 + #define SFR_PHYPWR_RSTCTL_IPRL 0x0020 +#define SFR_VLAN_ID_ADDRESS 0x2A +#define SFR_VLAN_ID_CONTROL 0x2B + #define SFR_VLAN_CONTROL_WE 0x0001 + #define SFR_VLAN_CONTROL_RD 0x0002 + #define SFR_VLAN_CONTROL_VSO 0x0010 + #define SFR_VLAN_CONTROL_VFE 0x0020 +#define SFR_VLAN_ID_DATA0 0x2C +#define SFR_VLAN_ID_DATA1 0x2D +#define SFR_RX_BULKIN_QCTRL 0x2E + #define SFR_RX_BULKIN_QCTRL_TIME 0x01 + #define SFR_RX_BULKIN_QCTRL_IFG 0x02 + #define SFR_RX_BULKIN_QCTRL_SIZE 0x04 +#define SFR_RX_BULKIN_QTIMR_LOW 0x2F +#define SFR_RX_BULKIN_QTIMR_HIGH 0x30 +#define SFR_RX_BULKIN_QSIZE 0x31 +#define SFR_RX_BULKIN_QIFG 0x32 +#define SFR_RXCOE_CTL 0x34 + #define SFR_RXCOE_IP 0x01 + #define SFR_RXCOE_TCP 0x02 + #define SFR_RXCOE_UDP 0x04 + #define SFR_RXCOE_ICMP 0x08 + #define SFR_RXCOE_IGMP 0x10 + #define SFR_RXCOE_TCPV6 0x20 + #define SFR_RXCOE_UDPV6 0x40 + #define SFR_RXCOE_ICMV6 0x80 +#define SFR_TXCOE_CTL 0x35 + #define SFR_TXCOE_IP 0x01 + #define SFR_TXCOE_TCP 0x02 + #define SFR_TXCOE_UDP 0x04 + #define SFR_TXCOE_ICMP 0x08 + #define SFR_TXCOE_IGMP 0x10 + #define SFR_TXCOE_TCPV6 0x20 + #define SFR_TXCOE_UDPV6 0x40 + #define SFR_TXCOE_ICMV6 0x80 +#define SFR_BM_INT_MASK 0x41 +#define SFR_BMRX_DMA_CONTROL 0x43 + #define SFR_BMRX_DMA_EN 0x80 +#define SFR_BMTX_DMA_CONTROL 0x46 +#define SFR_PAUSE_WATERLVL_LOW 0x54 +#define SFR_PAUSE_WATERLVL_HIGH 0x55 +#define SFR_ARC_CTRL 0x9E +#define SFR_SWP_CTRL 0xB1 +#define SFR_TX_PAUSE_RESEND_T 0xB2 +#define SFR_ETH_MAC_PATH 0xB7 + #define SFR_RX_PATH_READY 0x01 +#define SFR_BULK_OUT_CTRL 0xB9 + #define SFR_BULK_OUT_FLUSH_EN 0x01 + #define SFR_BULK_OUT_EFF_EN 0x02 + +#define AQ_FW_VER_MAJOR 0xDA +#define AQ_FW_VER_MINOR 0xDB +#define AQ_FW_VER_REV 0xDC + +/*PHY_OPS**********************************************************************/ + +#define AQ_ADV_100M BIT(0) +#define AQ_ADV_1G BIT(1) +#define AQ_ADV_2G5 BIT(2) +#define AQ_ADV_5G BIT(3) +#define AQ_ADV_MASK 0x0F + +#define AQ_PAUSE BIT(16) +#define AQ_ASYM_PAUSE BIT(17) +#define AQ_LOW_POWER BIT(18) +#define AQ_PHY_POWER_EN BIT(19) +#define AQ_WOL BIT(20) +#define AQ_DOWNSHIFT BIT(21) + +#define AQ_DSH_RETRIES_SHIFT 0x18 +#define AQ_DSH_RETRIES_MASK 0xF000000 + +#define AQ_WOL_FLAG_MP 0x2 + +/******************************************************************************/ + +struct aqc111_wol_cfg { + u8 hw_addr[6]; + u8 flags; + u8 rsvd[283]; +} __packed; + +#define WOL_CFG_SIZE sizeof(struct aqc111_wol_cfg) + +struct aqc111_data { + u16 rxctl; + u8 rx_checksum; + u8 link_speed; + u8 link; + u8 autoneg; + u32 advertised_speed; + struct { + u8 major; + u8 minor; + u8 rev; + } fw_ver; + u32 phy_cfg; + u8 wol_flags; +}; + +#define AQ_LS_MASK 0x8000 +#define AQ_SPEED_MASK 0x7F00 +#define AQ_SPEED_SHIFT 0x0008 +#define AQ_INT_SPEED_5G 0x000F +#define AQ_INT_SPEED_2_5G 0x0010 +#define AQ_INT_SPEED_1G 0x0011 +#define AQ_INT_SPEED_100M 0x0013 + +/* TX Descriptor */ +#define AQ_TX_DESC_LEN_MASK 0x1FFFFF +#define AQ_TX_DESC_DROP_PADD BIT(28) +#define AQ_TX_DESC_VLAN BIT(29) +#define AQ_TX_DESC_MSS_MASK 0x7FFF +#define AQ_TX_DESC_MSS_SHIFT 0x20 +#define AQ_TX_DESC_VLAN_MASK 0xFFFF +#define AQ_TX_DESC_VLAN_SHIFT 0x30 + +#define AQ_RX_HW_PAD 0x02 + +/* RX Packet Descriptor */ +#define AQ_RX_PD_L4_ERR BIT(0) +#define AQ_RX_PD_L3_ERR BIT(1) +#define AQ_RX_PD_L4_TYPE_MASK 0x1C +#define AQ_RX_PD_L4_UDP 0x04 +#define AQ_RX_PD_L4_TCP 0x10 +#define AQ_RX_PD_L3_TYPE_MASK 0x60 +#define AQ_RX_PD_L3_IP 0x20 +#define AQ_RX_PD_L3_IP6 0x40 + +#define AQ_RX_PD_VLAN BIT(10) +#define AQ_RX_PD_RX_OK BIT(11) +#define AQ_RX_PD_DROP BIT(31) +#define AQ_RX_PD_LEN_MASK 0x7FFF0000 +#define AQ_RX_PD_LEN_SHIFT 0x10 +#define AQ_RX_PD_VLAN_SHIFT 0x20 + +/* RX Descriptor header */ +#define AQ_RX_DH_PKT_CNT_MASK 0x1FFF +#define AQ_RX_DH_DESC_OFFSET_MASK 0xFFFFE000 +#define AQ_RX_DH_DESC_OFFSET_SHIFT 0x0D + +static struct { + unsigned char ctrl; + unsigned char timer_l; + unsigned char timer_h; + unsigned char size; + unsigned char ifg; +} AQC111_BULKIN_SIZE[] = { + /* xHCI & EHCI & OHCI */ + {7, 0x00, 0x01, 0x1E, 0xFF},/* 10G, 5G, 2.5G, 1G */ + {7, 0xA0, 0x00, 0x14, 0x00},/* 100M */ + /* Jumbo packet */ + {7, 0x00, 0x01, 0x18, 0xFF}, +}; + +#endif /* __LINUX_USBNET_AQC111_H */ diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 5c42cf81a08b..b3b3c05903a1 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -562,6 +562,8 @@ static const struct driver_info wwan_info = { #define MICROSOFT_VENDOR_ID 0x045e #define UBLOX_VENDOR_ID 0x1546 #define TPLINK_VENDOR_ID 0x2357 +#define AQUANTIA_VENDOR_ID 0x2eca +#define ASIX_VENDOR_ID 0x0b95 static const struct usb_device_id products[] = { /* BLACKLIST !! @@ -821,6 +823,30 @@ static const struct usb_device_id products[] = { .driver_info = 0, }, +/* Aquantia AQtion USB to 5GbE Controller (based on AQC111U) */ +{ + USB_DEVICE_AND_INTERFACE_INFO(AQUANTIA_VENDOR_ID, 0xc101, + USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, + USB_CDC_PROTO_NONE), + .driver_info = 0, +}, + +/* ASIX USB 3.1 Gen1 to 5G Multi-Gigabit Ethernet Adapter(based on AQC111U) */ +{ + USB_DEVICE_AND_INTERFACE_INFO(ASIX_VENDOR_ID, 0x2790, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, + USB_CDC_PROTO_NONE), + .driver_info = 0, +}, + +/* ASIX USB 3.1 Gen1 to 2.5G Multi-Gigabit Ethernet Adapter(based on AQC112U) */ +{ + USB_DEVICE_AND_INTERFACE_INFO(ASIX_VENDOR_ID, 0x2791, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, + USB_CDC_PROTO_NONE), + .driver_info = 0, +}, + /* WHITELIST!!! * * CDC Ether uses two interfaces, not necessarily consecutive. diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c index 77d3c85febf1..e96bc0c6140f 100644 --- a/drivers/net/usb/lan78xx.c +++ b/drivers/net/usb/lan78xx.c @@ -13,6 +13,7 @@ #include <linux/slab.h> #include <linux/if_vlan.h> #include <linux/uaccess.h> +#include <linux/linkmode.h> #include <linux/list.h> #include <linux/ip.h> #include <linux/ipv6.h> @@ -1586,18 +1587,17 @@ static int lan78xx_set_pause(struct net_device *net, dev->fc_request_control |= FLOW_CTRL_TX; if (ecmd.base.autoneg) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, }; u32 mii_adv; - u32 advertising; - ethtool_convert_link_mode_to_legacy_u32( - &advertising, ecmd.link_modes.advertising); - - advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); + linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, + ecmd.link_modes.advertising); + linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + ecmd.link_modes.advertising); mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control); - advertising |= mii_adv_to_ethtool_adv_t(mii_adv); - - ethtool_convert_legacy_u32_to_link_mode( - ecmd.link_modes.advertising, advertising); + mii_adv_to_linkmode_adv_t(fc, mii_adv); + linkmode_or(ecmd.link_modes.advertising, fc, + ecmd.link_modes.advertising); phy_ethtool_ksettings_set(phydev, &ecmd); } @@ -2095,6 +2095,7 @@ static struct phy_device *lan7801_phy_init(struct lan78xx_net *dev) static int lan78xx_phy_init(struct lan78xx_net *dev) { + __ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, }; int ret; u32 mii_adv; struct phy_device *phydev; @@ -2158,9 +2159,13 @@ static int lan78xx_phy_init(struct lan78xx_net *dev) /* support both flow controls */ dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX); - phydev->advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); + linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT, + phydev->advertising); + linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + phydev->advertising); mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control); - phydev->advertising |= mii_adv_to_ethtool_adv_t(mii_adv); + mii_adv_to_linkmode_adv_t(fc, mii_adv); + linkmode_or(phydev->advertising, fc, phydev->advertising); if (phydev->mdio.dev.of_node) { u32 reg; diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index f2d01cb6f958..e3d08626828e 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -618,9 +618,7 @@ static void smsc95xx_status(struct usbnet *dev, struct urb *urb) return; } - memcpy(&intdata, urb->transfer_buffer, 4); - le32_to_cpus(&intdata); - + intdata = get_unaligned_le32(urb->transfer_buffer); netif_dbg(dev, link, dev->net, "intdata: 0x%08X\n", intdata); if (intdata & INT_ENP_PHY_INT_) @@ -1295,6 +1293,7 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) dev->net->features |= NETIF_F_RXCSUM; dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM; + set_bit(EVENT_NO_IP_ALIGN, &dev->flags); smsc95xx_init_mac_address(dev); @@ -1933,8 +1932,7 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) unsigned char *packet; u16 size; - memcpy(&header, skb->data, sizeof(header)); - le32_to_cpus(&header); + header = get_unaligned_le32(skb->data); skb_pull(skb, 4 + NET_IP_ALIGN); packet = skb->data; @@ -2011,12 +2009,30 @@ static u32 smsc95xx_calc_csum_preamble(struct sk_buff *skb) return (high_16 << 16) | low_16; } +/* The TX CSUM won't work if the checksum lies in the last 4 bytes of the + * transmission. This is fairly unlikely, only seems to trigger with some + * short TCP ACK packets sent. + * + * Note, this calculation should probably check for the alignment of the + * data as well, but a straight check for csum being in the last four bytes + * of the packet should be ok for now. + */ +static bool smsc95xx_can_tx_checksum(struct sk_buff *skb) +{ + unsigned int len = skb->len - skb_checksum_start_offset(skb); + + if (skb->len <= 45) + return false; + return skb->csum_offset < (len - (4 + 1)); +} + static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { bool csum = skb->ip_summed == CHECKSUM_PARTIAL; int overhead = csum ? SMSC95XX_TX_OVERHEAD_CSUM : SMSC95XX_TX_OVERHEAD; u32 tx_cmd_a, tx_cmd_b; + void *ptr; /* We do not advertise SG, so skbs should be already linearized */ BUG_ON(skb_shinfo(skb)->nr_frags); @@ -2030,8 +2046,11 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev, return NULL; } + tx_cmd_b = (u32)skb->len; + tx_cmd_a = tx_cmd_b | TX_CMD_A_FIRST_SEG_ | TX_CMD_A_LAST_SEG_; + if (csum) { - if (skb->len <= 45) { + if (!smsc95xx_can_tx_checksum(skb)) { /* workaround - hardware tx checksum does not work * properly with extremely small packets */ long csstart = skb_checksum_start_offset(skb); @@ -2043,24 +2062,18 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev, csum = false; } else { u32 csum_preamble = smsc95xx_calc_csum_preamble(skb); - skb_push(skb, 4); - cpu_to_le32s(&csum_preamble); - memcpy(skb->data, &csum_preamble, 4); + ptr = skb_push(skb, 4); + put_unaligned_le32(csum_preamble, ptr); + + tx_cmd_a += 4; + tx_cmd_b += 4; + tx_cmd_b |= TX_CMD_B_CSUM_ENABLE; } } - skb_push(skb, 4); - tx_cmd_b = (u32)(skb->len - 4); - if (csum) - tx_cmd_b |= TX_CMD_B_CSUM_ENABLE; - cpu_to_le32s(&tx_cmd_b); - memcpy(skb->data, &tx_cmd_b, 4); - - skb_push(skb, 4); - tx_cmd_a = (u32)(skb->len - 8) | TX_CMD_A_FIRST_SEG_ | - TX_CMD_A_LAST_SEG_; - cpu_to_le32s(&tx_cmd_a); - memcpy(skb->data, &tx_cmd_a, 4); + ptr = skb_push(skb, 8); + put_unaligned_le32(tx_cmd_a, ptr); + put_unaligned_le32(tx_cmd_b, ptr+4); return skb; } diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 890fa5b905e2..f412ea1cef18 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -1253,7 +1253,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, return PTR_ERR(net); peer = rtnl_create_link(net, ifname, name_assign_type, - &veth_link_ops, tbp); + &veth_link_ops, tbp, extack); if (IS_ERR(peer)) { put_net(net); return PTR_ERR(peer); diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index ea672145f6a6..023725086046 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -236,6 +236,7 @@ struct virtnet_info { u32 speed; unsigned long guest_offloads; + unsigned long guest_offloads_capable; /* failover when STANDBY feature enabled */ struct failover *failover; @@ -2479,6 +2480,31 @@ static int virtnet_get_phys_port_name(struct net_device *dev, char *buf, return 0; } +static int virtnet_set_features(struct net_device *dev, + netdev_features_t features) +{ + struct virtnet_info *vi = netdev_priv(dev); + u64 offloads; + int err; + + if ((dev->features ^ features) & NETIF_F_LRO) { + if (vi->xdp_queue_pairs) + return -EBUSY; + + if (features & NETIF_F_LRO) + offloads = vi->guest_offloads_capable; + else + offloads = 0; + + err = virtnet_set_guest_offloads(vi, offloads); + if (err) + return err; + vi->guest_offloads = offloads; + } + + return 0; +} + static const struct net_device_ops virtnet_netdev = { .ndo_open = virtnet_open, .ndo_stop = virtnet_close, @@ -2493,6 +2519,7 @@ static const struct net_device_ops virtnet_netdev = { .ndo_xdp_xmit = virtnet_xdp_xmit, .ndo_features_check = passthru_features_check, .ndo_get_phys_port_name = virtnet_get_phys_port_name, + .ndo_set_features = virtnet_set_features, }; static void virtnet_config_changed_work(struct work_struct *work) @@ -2951,6 +2978,11 @@ static int virtnet_probe(struct virtio_device *vdev) } if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) dev->features |= NETIF_F_RXCSUM; + if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || + virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)) + dev->features |= NETIF_F_LRO; + if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) + dev->hw_features |= NETIF_F_LRO; dev->vlan_features = dev->features; @@ -3080,6 +3112,7 @@ static int virtnet_probe(struct virtio_device *vdev) for (i = 0; i < ARRAY_SIZE(guest_offloads); i++) if (virtio_has_feature(vi->vdev, guest_offloads[i])) set_bit(guest_offloads[i], &vi->guest_offloads); + vi->guest_offloads_capable = vi->guest_offloads; pr_debug("virtnet: registered device %s with %d RX and TX vq's\n", dev->name, max_queue_pairs); diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c index 69b7227c637e..95909e262ba4 100644 --- a/drivers/net/vrf.c +++ b/drivers/net/vrf.c @@ -747,7 +747,8 @@ static int vrf_rtable_create(struct net_device *dev) /**************************** device handling ********************/ /* cycle interface to flush neighbor cache and move routes across tables */ -static void cycle_netdev(struct net_device *dev) +static void cycle_netdev(struct net_device *dev, + struct netlink_ext_ack *extack) { unsigned int flags = dev->flags; int ret; @@ -755,9 +756,9 @@ static void cycle_netdev(struct net_device *dev) if (!netif_running(dev)) return; - ret = dev_change_flags(dev, flags & ~IFF_UP); + ret = dev_change_flags(dev, flags & ~IFF_UP, extack); if (ret >= 0) - ret = dev_change_flags(dev, flags); + ret = dev_change_flags(dev, flags, extack); if (ret < 0) { netdev_err(dev, @@ -785,7 +786,7 @@ static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev, if (ret < 0) goto err; - cycle_netdev(port_dev); + cycle_netdev(port_dev, extack); return 0; @@ -815,7 +816,7 @@ static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev) netdev_upper_dev_unlink(port_dev, dev); port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE; - cycle_netdev(port_dev); + cycle_netdev(port_dev, NULL); return 0; } @@ -981,24 +982,23 @@ static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev, struct sk_buff *skb) { int orig_iif = skb->skb_iif; - bool need_strict; + bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr); + bool is_ndisc = ipv6_ndisc_frame(skb); - /* loopback traffic; do not push through packet taps again. - * Reset pkt_type for upper layers to process skb + /* loopback, multicast & non-ND link-local traffic; do not push through + * packet taps again. Reset pkt_type for upper layers to process skb */ - if (skb->pkt_type == PACKET_LOOPBACK) { + if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) { skb->dev = vrf_dev; skb->skb_iif = vrf_dev->ifindex; IP6CB(skb)->flags |= IP6SKB_L3SLAVE; - skb->pkt_type = PACKET_HOST; + if (skb->pkt_type == PACKET_LOOPBACK) + skb->pkt_type = PACKET_HOST; goto out; } - /* if packet is NDISC or addressed to multicast or link-local - * then keep the ingress interface - */ - need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr); - if (!ipv6_ndisc_frame(skb) && !need_strict) { + /* if packet is NDISC then keep the ingress interface */ + if (!is_ndisc) { vrf_rx_stats(vrf_dev, skb->len); skb->dev = vrf_dev; skb->skb_iif = vrf_dev->ifindex; diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c index 0565f8880199..5209ee9aac47 100644 --- a/drivers/net/vxlan.c +++ b/drivers/net/vxlan.c @@ -79,9 +79,11 @@ struct vxlan_fdb { u8 eth_addr[ETH_ALEN]; u16 state; /* see ndm_state */ __be32 vni; - u8 flags; /* see ndm_flags */ + u16 flags; /* see ndm_flags and below */ }; +#define NTF_VXLAN_ADDED_BY_USER 0x100 + /* salt for hash table */ static u32 vxlan_salt __read_mostly; @@ -186,7 +188,7 @@ static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb) * and enabled unshareable flags. */ static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family, - __be16 port, u32 flags) + __be16 port, u32 flags, int ifindex) { struct vxlan_sock *vs; @@ -195,7 +197,8 @@ static struct vxlan_sock *vxlan_find_sock(struct net *net, sa_family_t family, hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) { if (inet_sk(vs->sock->sk)->inet_sport == port && vxlan_get_sk_family(vs) == family && - vs->flags == flags) + vs->flags == flags && + vs->sock->sk->sk_bound_dev_if == ifindex) return vs; } return NULL; @@ -235,7 +238,7 @@ static struct vxlan_dev *vxlan_find_vni(struct net *net, int ifindex, { struct vxlan_sock *vs; - vs = vxlan_find_sock(net, family, port, flags); + vs = vxlan_find_sock(net, family, port, flags, ifindex); if (!vs) return NULL; @@ -355,6 +358,23 @@ errout: rtnl_set_sk_err(net, RTNLGRP_NEIGH, err); } +static void vxlan_fdb_switchdev_notifier_info(const struct vxlan_dev *vxlan, + const struct vxlan_fdb *fdb, + const struct vxlan_rdst *rd, + struct switchdev_notifier_vxlan_fdb_info *fdb_info) +{ + fdb_info->info.dev = vxlan->dev; + fdb_info->info.extack = NULL; + fdb_info->remote_ip = rd->remote_ip; + fdb_info->remote_port = rd->remote_port; + fdb_info->remote_vni = rd->remote_vni; + fdb_info->remote_ifindex = rd->remote_ifindex; + memcpy(fdb_info->eth_addr, fdb->eth_addr, ETH_ALEN); + fdb_info->vni = fdb->vni; + fdb_info->offloaded = rd->offloaded; + fdb_info->added_by_user = fdb->flags & NTF_VXLAN_ADDED_BY_USER; +} + static void vxlan_fdb_switchdev_call_notifiers(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb, struct vxlan_rdst *rd, @@ -368,31 +388,25 @@ static void vxlan_fdb_switchdev_call_notifiers(struct vxlan_dev *vxlan, notifier_type = adding ? SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE : SWITCHDEV_VXLAN_FDB_DEL_TO_DEVICE; - - info = (struct switchdev_notifier_vxlan_fdb_info){ - .remote_ip = rd->remote_ip, - .remote_port = rd->remote_port, - .remote_vni = rd->remote_vni, - .remote_ifindex = rd->remote_ifindex, - .vni = fdb->vni, - .offloaded = rd->offloaded, - }; - memcpy(info.eth_addr, fdb->eth_addr, ETH_ALEN); - + vxlan_fdb_switchdev_notifier_info(vxlan, fdb, rd, &info); call_switchdev_notifiers(notifier_type, vxlan->dev, &info.info); } static void vxlan_fdb_notify(struct vxlan_dev *vxlan, struct vxlan_fdb *fdb, - struct vxlan_rdst *rd, int type) + struct vxlan_rdst *rd, int type, bool swdev_notify) { - switch (type) { - case RTM_NEWNEIGH: - vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd, true); - break; - case RTM_DELNEIGH: - vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd, false); - break; + if (swdev_notify) { + switch (type) { + case RTM_NEWNEIGH: + vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd, + true); + break; + case RTM_DELNEIGH: + vxlan_fdb_switchdev_call_notifiers(vxlan, fdb, rd, + false); + break; + } } __vxlan_fdb_notify(vxlan, fdb, rd, type); @@ -409,7 +423,7 @@ static void vxlan_ip_miss(struct net_device *dev, union vxlan_addr *ipa) .remote_vni = cpu_to_be32(VXLAN_N_VID), }; - vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH); + vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true); } static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN]) @@ -421,7 +435,7 @@ static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN]) memcpy(f.eth_addr, eth_addr, ETH_ALEN); - vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH); + vxlan_fdb_notify(vxlan, &f, &remote, RTM_GETNEIGH, true); } /* Hash Ethernet address */ @@ -531,16 +545,7 @@ int vxlan_fdb_find_uc(struct net_device *dev, const u8 *mac, __be32 vni, } rdst = first_remote_rcu(f); - - memset(fdb_info, 0, sizeof(*fdb_info)); - fdb_info->info.dev = dev; - fdb_info->remote_ip = rdst->remote_ip; - fdb_info->remote_port = rdst->remote_port; - fdb_info->remote_vni = rdst->remote_vni; - fdb_info->remote_ifindex = rdst->remote_ifindex; - fdb_info->vni = vni; - fdb_info->offloaded = rdst->offloaded; - ether_addr_copy(fdb_info->eth_addr, mac); + vxlan_fdb_switchdev_notifier_info(vxlan, f, rdst, fdb_info); out: rcu_read_unlock(); @@ -548,6 +553,75 @@ out: } EXPORT_SYMBOL_GPL(vxlan_fdb_find_uc); +static int vxlan_fdb_notify_one(struct notifier_block *nb, + const struct vxlan_dev *vxlan, + const struct vxlan_fdb *f, + const struct vxlan_rdst *rdst) +{ + struct switchdev_notifier_vxlan_fdb_info fdb_info; + int rc; + + vxlan_fdb_switchdev_notifier_info(vxlan, f, rdst, &fdb_info); + rc = nb->notifier_call(nb, SWITCHDEV_VXLAN_FDB_ADD_TO_DEVICE, + &fdb_info); + return notifier_to_errno(rc); +} + +int vxlan_fdb_replay(const struct net_device *dev, __be32 vni, + struct notifier_block *nb) +{ + struct vxlan_dev *vxlan; + struct vxlan_rdst *rdst; + struct vxlan_fdb *f; + unsigned int h; + int rc = 0; + + if (!netif_is_vxlan(dev)) + return -EINVAL; + vxlan = netdev_priv(dev); + + spin_lock_bh(&vxlan->hash_lock); + for (h = 0; h < FDB_HASH_SIZE; ++h) { + hlist_for_each_entry(f, &vxlan->fdb_head[h], hlist) { + if (f->vni == vni) { + list_for_each_entry(rdst, &f->remotes, list) { + rc = vxlan_fdb_notify_one(nb, vxlan, + f, rdst); + if (rc) + goto out; + } + } + } + } + +out: + spin_unlock_bh(&vxlan->hash_lock); + return rc; +} +EXPORT_SYMBOL_GPL(vxlan_fdb_replay); + +void vxlan_fdb_clear_offload(const struct net_device *dev, __be32 vni) +{ + struct vxlan_dev *vxlan; + struct vxlan_rdst *rdst; + struct vxlan_fdb *f; + unsigned int h; + + if (!netif_is_vxlan(dev)) + return; + vxlan = netdev_priv(dev); + + spin_lock_bh(&vxlan->hash_lock); + for (h = 0; h < FDB_HASH_SIZE; ++h) { + hlist_for_each_entry(f, &vxlan->fdb_head[h], hlist) + if (f->vni == vni) + list_for_each_entry(rdst, &f->remotes, list) + rdst->offloaded = false; + } + spin_unlock_bh(&vxlan->hash_lock); +} +EXPORT_SYMBOL_GPL(vxlan_fdb_clear_offload); + /* Replace destination of unicast mac */ static int vxlan_fdb_replace(struct vxlan_fdb *f, union vxlan_addr *ip, __be16 port, __be32 vni, @@ -701,7 +775,7 @@ static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff) static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan, const u8 *mac, __u16 state, - __be32 src_vni, __u8 ndm_flags) + __be32 src_vni, __u16 ndm_flags) { struct vxlan_fdb *f; @@ -721,7 +795,7 @@ static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan, static int vxlan_fdb_create(struct vxlan_dev *vxlan, const u8 *mac, union vxlan_addr *ip, __u16 state, __be16 port, __be32 src_vni, - __be32 vni, __u32 ifindex, __u8 ndm_flags, + __be32 vni, __u32 ifindex, __u16 ndm_flags, struct vxlan_fdb **fdb) { struct vxlan_rdst *rd = NULL; @@ -757,9 +831,10 @@ static int vxlan_fdb_update(struct vxlan_dev *vxlan, const u8 *mac, union vxlan_addr *ip, __u16 state, __u16 flags, __be16 port, __be32 src_vni, __be32 vni, - __u32 ifindex, __u8 ndm_flags) + __u32 ifindex, __u16 ndm_flags, + bool swdev_notify) { - __u8 fdb_flags = (ndm_flags & ~NTF_USE); + __u16 fdb_flags = (ndm_flags & ~NTF_USE); struct vxlan_rdst *rd = NULL; struct vxlan_fdb *f; int notify = 0; @@ -772,16 +847,24 @@ static int vxlan_fdb_update(struct vxlan_dev *vxlan, "lost race to create %pM\n", mac); return -EEXIST; } - if (f->state != state) { - f->state = state; - f->updated = jiffies; - notify = 1; - } - if (f->flags != fdb_flags) { - f->flags = fdb_flags; - f->updated = jiffies; - notify = 1; + + /* Do not allow an externally learned entry to take over an + * entry added by the user. + */ + if (!(fdb_flags & NTF_EXT_LEARNED) || + !(f->flags & NTF_VXLAN_ADDED_BY_USER)) { + if (f->state != state) { + f->state = state; + f->updated = jiffies; + notify = 1; + } + if (f->flags != fdb_flags) { + f->flags = fdb_flags; + f->updated = jiffies; + notify = 1; + } } + if ((flags & NLM_F_REPLACE)) { /* Only change unicasts */ if (!(is_multicast_ether_addr(f->eth_addr) || @@ -823,7 +906,7 @@ static int vxlan_fdb_update(struct vxlan_dev *vxlan, if (notify) { if (rd == NULL) rd = first_remote_rtnl(f); - vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH); + vxlan_fdb_notify(vxlan, f, rd, RTM_NEWNEIGH, swdev_notify); } return 0; @@ -842,7 +925,7 @@ static void vxlan_fdb_free(struct rcu_head *head) } static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f, - bool do_notify) + bool do_notify, bool swdev_notify) { struct vxlan_rdst *rd; @@ -852,7 +935,8 @@ static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f, --vxlan->addrcnt; if (do_notify) list_for_each_entry(rd, &f->remotes, list) - vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH); + vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH, + swdev_notify); hlist_del_rcu(&f->hlist); call_rcu(&f->rcu, vxlan_fdb_free); @@ -867,10 +951,10 @@ static void vxlan_dst_free(struct rcu_head *head) } static void vxlan_fdb_dst_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f, - struct vxlan_rdst *rd) + struct vxlan_rdst *rd, bool swdev_notify) { list_del_rcu(&rd->list); - vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH); + vxlan_fdb_notify(vxlan, f, rd, RTM_DELNEIGH, swdev_notify); call_rcu(&rd->rcu, vxlan_dst_free); } @@ -969,7 +1053,9 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], spin_lock_bh(&vxlan->hash_lock); err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags, - port, src_vni, vni, ifindex, ndm->ndm_flags); + port, src_vni, vni, ifindex, + ndm->ndm_flags | NTF_VXLAN_ADDED_BY_USER, + true); spin_unlock_bh(&vxlan->hash_lock); return err; @@ -978,7 +1064,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], static int __vxlan_fdb_delete(struct vxlan_dev *vxlan, const unsigned char *addr, union vxlan_addr ip, __be16 port, __be32 src_vni, __be32 vni, - u32 ifindex, u16 vid) + u32 ifindex, bool swdev_notify) { struct vxlan_fdb *f; struct vxlan_rdst *rd = NULL; @@ -998,11 +1084,11 @@ static int __vxlan_fdb_delete(struct vxlan_dev *vxlan, * otherwise destroy the fdb entry */ if (rd && !list_is_singular(&f->remotes)) { - vxlan_fdb_dst_destroy(vxlan, f, rd); + vxlan_fdb_dst_destroy(vxlan, f, rd, swdev_notify); goto out; } - vxlan_fdb_destroy(vxlan, f, true); + vxlan_fdb_destroy(vxlan, f, true, swdev_notify); out: return 0; @@ -1026,7 +1112,7 @@ static int vxlan_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[], spin_lock_bh(&vxlan->hash_lock); err = __vxlan_fdb_delete(vxlan, addr, ip, port, src_vni, vni, ifindex, - vid); + true); spin_unlock_bh(&vxlan->hash_lock); return err; @@ -1067,6 +1153,39 @@ out: return err; } +static int vxlan_fdb_get(struct sk_buff *skb, + struct nlattr *tb[], + struct net_device *dev, + const unsigned char *addr, + u16 vid, u32 portid, u32 seq, + struct netlink_ext_ack *extack) +{ + struct vxlan_dev *vxlan = netdev_priv(dev); + struct vxlan_fdb *f; + __be32 vni; + int err; + + if (tb[NDA_VNI]) + vni = cpu_to_be32(nla_get_u32(tb[NDA_VNI])); + else + vni = vxlan->default_dst.remote_vni; + + rcu_read_lock(); + + f = __vxlan_find_mac(vxlan, addr, vni); + if (!f) { + NL_SET_ERR_MSG(extack, "Fdb entry not found"); + err = -ENOENT; + goto errout; + } + + err = vxlan_fdb_info(skb, vxlan, f, portid, seq, + RTM_NEWNEIGH, 0, first_remote_rcu(f)); +errout: + rcu_read_unlock(); + return err; +} + /* Watch incoming packets to learn mapping between Ethernet address * and Tunnel endpoint. * Return true if packet is bogus and should be dropped. @@ -1104,7 +1223,7 @@ static bool vxlan_snoop(struct net_device *dev, rdst->remote_ip = *src_ip; f->updated = jiffies; - vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH); + vxlan_fdb_notify(vxlan, f, rdst, RTM_NEWNEIGH, true); } else { /* learned new entry */ spin_lock(&vxlan->hash_lock); @@ -1117,7 +1236,7 @@ static bool vxlan_snoop(struct net_device *dev, vxlan->cfg.dst_port, vni, vxlan->default_dst.remote_vni, - ifindex, NTF_SELF); + ifindex, NTF_SELF, true); spin_unlock(&vxlan->hash_lock); } @@ -1553,6 +1672,34 @@ drop: return 0; } +/* Callback from net/ipv{4,6}/udp.c to check that we have a VNI for errors */ +static int vxlan_err_lookup(struct sock *sk, struct sk_buff *skb) +{ + struct vxlan_dev *vxlan; + struct vxlan_sock *vs; + struct vxlanhdr *hdr; + __be32 vni; + + if (skb->len < VXLAN_HLEN) + return -EINVAL; + + hdr = vxlan_hdr(skb); + + if (!(hdr->vx_flags & VXLAN_HF_VNI)) + return -EINVAL; + + vs = rcu_dereference_sk_user_data(sk); + if (!vs) + return -ENOENT; + + vni = vxlan_vni(hdr->vx_vni); + vxlan = vxlan_vs_find_vni(vs, skb->dev->ifindex, vni); + if (!vxlan) + return -ENOENT; + + return 0; +} + static int arp_reduce(struct net_device *dev, struct sk_buff *skb, __be32 vni) { struct vxlan_dev *vxlan = netdev_priv(dev); @@ -2241,6 +2388,9 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, struct rtable *rt; __be16 df = 0; + if (!ifindex) + ifindex = sock4->sock->sk->sk_bound_dev_if; + rt = vxlan_get_route(vxlan, dev, sock4, skb, ifindex, tos, dst->sin.sin_addr.s_addr, &local_ip.sin.sin_addr.s_addr, @@ -2251,13 +2401,24 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, goto tx_error; } - /* Bypass encapsulation if the destination is local */ if (!info) { + /* Bypass encapsulation if the destination is local */ err = encap_bypass_if_local(skb, dev, vxlan, dst, dst_port, ifindex, vni, &rt->dst, rt->rt_flags); if (err) goto out_unlock; + + if (vxlan->cfg.df == VXLAN_DF_SET) { + df = htons(IP_DF); + } else if (vxlan->cfg.df == VXLAN_DF_INHERIT) { + struct ethhdr *eth = eth_hdr(skb); + + if (ntohs(eth->h_proto) == ETH_P_IPV6 || + (ntohs(eth->h_proto) == ETH_P_IP && + old_iph->frag_off & htons(IP_DF))) + df = htons(IP_DF); + } } else if (info->key.tun_flags & TUNNEL_DONT_FRAGMENT) { df = htons(IP_DF); } @@ -2279,6 +2440,9 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, } else { struct vxlan_sock *sock6 = rcu_dereference(vxlan->vn6_sock); + if (!ifindex) + ifindex = sock6->sock->sk->sk_bound_dev_if; + ndst = vxlan6_get_route(vxlan, dev, sock6, skb, ifindex, tos, label, &dst->sin6.sin6_addr, &local_ip.sin6.sin6_addr, @@ -2462,7 +2626,7 @@ static void vxlan_cleanup(struct timer_list *t) "garbage collect %pM\n", f->eth_addr); f->state = NUD_STALE; - vxlan_fdb_destroy(vxlan, f, true); + vxlan_fdb_destroy(vxlan, f, true, true); } else if (time_before(timeout, next_timer)) next_timer = timeout; } @@ -2513,7 +2677,7 @@ static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni) spin_lock_bh(&vxlan->hash_lock); f = __vxlan_find_mac(vxlan, all_zeros_mac, vni); if (f) - vxlan_fdb_destroy(vxlan, f, true); + vxlan_fdb_destroy(vxlan, f, true, true); spin_unlock_bh(&vxlan->hash_lock); } @@ -2567,7 +2731,7 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all) continue; /* the all_zeros_mac entry is deleted at vxlan_uninit */ if (!is_zero_ether_addr(f->eth_addr)) - vxlan_fdb_destroy(vxlan, f, true); + vxlan_fdb_destroy(vxlan, f, true, true); } } spin_unlock_bh(&vxlan->hash_lock); @@ -2675,6 +2839,7 @@ static const struct net_device_ops vxlan_netdev_ether_ops = { .ndo_fdb_add = vxlan_fdb_add, .ndo_fdb_del = vxlan_fdb_delete, .ndo_fdb_dump = vxlan_fdb_dump, + .ndo_fdb_get = vxlan_fdb_get, .ndo_fill_metadata_dst = vxlan_fill_metadata_dst, }; @@ -2810,6 +2975,7 @@ static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = { [IFLA_VXLAN_GPE] = { .type = NLA_FLAG, }, [IFLA_VXLAN_REMCSUM_NOPARTIAL] = { .type = NLA_FLAG }, [IFLA_VXLAN_TTL_INHERIT] = { .type = NLA_FLAG }, + [IFLA_VXLAN_DF] = { .type = NLA_U8 }, }; static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[], @@ -2866,6 +3032,16 @@ static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[], } } + if (data[IFLA_VXLAN_DF]) { + enum ifla_vxlan_df df = nla_get_u8(data[IFLA_VXLAN_DF]); + + if (df < 0 || df > VXLAN_DF_MAX) { + NL_SET_ERR_MSG_ATTR(extack, tb[IFLA_VXLAN_DF], + "Invalid DF attribute"); + return -EINVAL; + } + } + return 0; } @@ -2882,7 +3058,7 @@ static const struct ethtool_ops vxlan_ethtool_ops = { }; static struct socket *vxlan_create_sock(struct net *net, bool ipv6, - __be16 port, u32 flags) + __be16 port, u32 flags, int ifindex) { struct socket *sock; struct udp_port_cfg udp_conf; @@ -2900,6 +3076,7 @@ static struct socket *vxlan_create_sock(struct net *net, bool ipv6, } udp_conf.local_udp_port = port; + udp_conf.bind_ifindex = ifindex; /* Open UDP socket */ err = udp_sock_create(net, &udp_conf, &sock); @@ -2911,7 +3088,8 @@ static struct socket *vxlan_create_sock(struct net *net, bool ipv6, /* Create new listen socket if needed */ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6, - __be16 port, u32 flags) + __be16 port, u32 flags, + int ifindex) { struct vxlan_net *vn = net_generic(net, vxlan_net_id); struct vxlan_sock *vs; @@ -2926,7 +3104,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6, for (h = 0; h < VNI_HASH_SIZE; ++h) INIT_HLIST_HEAD(&vs->vni_list[h]); - sock = vxlan_create_sock(net, ipv6, port, flags); + sock = vxlan_create_sock(net, ipv6, port, flags, ifindex); if (IS_ERR(sock)) { kfree(vs); return ERR_CAST(sock); @@ -2949,6 +3127,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, bool ipv6, tunnel_cfg.sk_user_data = vs; tunnel_cfg.encap_type = 1; tunnel_cfg.encap_rcv = vxlan_rcv; + tunnel_cfg.encap_err_lookup = vxlan_err_lookup; tunnel_cfg.encap_destroy = NULL; tunnel_cfg.gro_receive = vxlan_gro_receive; tunnel_cfg.gro_complete = vxlan_gro_complete; @@ -2963,11 +3142,17 @@ static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6) struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); struct vxlan_sock *vs = NULL; struct vxlan_dev_node *node; + int l3mdev_index = 0; + + if (vxlan->cfg.remote_ifindex) + l3mdev_index = l3mdev_master_upper_ifindex_by_index( + vxlan->net, vxlan->cfg.remote_ifindex); if (!vxlan->cfg.no_share) { spin_lock(&vn->sock_lock); vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET, - vxlan->cfg.dst_port, vxlan->cfg.flags); + vxlan->cfg.dst_port, vxlan->cfg.flags, + l3mdev_index); if (vs && !refcount_inc_not_zero(&vs->refcnt)) { spin_unlock(&vn->sock_lock); return -EBUSY; @@ -2976,7 +3161,8 @@ static int __vxlan_sock_add(struct vxlan_dev *vxlan, bool ipv6) } if (!vs) vs = vxlan_socket_create(vxlan->net, ipv6, - vxlan->cfg.dst_port, vxlan->cfg.flags); + vxlan->cfg.dst_port, vxlan->cfg.flags, + l3mdev_index); if (IS_ERR(vs)) return PTR_ERR(vs); #if IS_ENABLED(CONFIG_IPV6) @@ -3293,7 +3479,8 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev, /* notify default fdb entry */ if (f) - vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH); + vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH, + true); list_add(&vxlan->next, &vn->vxlan_list); return 0; @@ -3304,7 +3491,7 @@ errout: * destroy the entry by hand here. */ if (f) - vxlan_fdb_destroy(vxlan, f, false); + vxlan_fdb_destroy(vxlan, f, false, false); if (unregister) unregister_netdevice(dev); return err; @@ -3394,11 +3581,8 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[], conf->flags |= VXLAN_F_LEARN; } - if (data[IFLA_VXLAN_AGEING]) { - if (changelink) - return -EOPNOTSUPP; + if (data[IFLA_VXLAN_AGEING]) conf->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]); - } if (data[IFLA_VXLAN_PROXY]) { if (changelink) @@ -3517,6 +3701,9 @@ static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[], conf->mtu = nla_get_u32(tb[IFLA_MTU]); } + if (data[IFLA_VXLAN_DF]) + conf->df = nla_get_u8(data[IFLA_VXLAN_DF]); + return 0; } @@ -3540,6 +3727,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], { struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_rdst *dst = &vxlan->default_dst; + unsigned long old_age_interval; struct vxlan_rdst old_dst; struct vxlan_config conf; int err; @@ -3549,12 +3737,16 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], if (err) return err; + old_age_interval = vxlan->cfg.age_interval; memcpy(&old_dst, dst, sizeof(struct vxlan_rdst)); err = vxlan_dev_configure(vxlan->net, dev, &conf, true, extack); if (err) return err; + if (old_age_interval != vxlan->cfg.age_interval) + mod_timer(&vxlan->age_timer, jiffies); + /* handle default dst entry */ if (!vxlan_addr_equal(&dst->remote_ip, &old_dst.remote_ip)) { spin_lock_bh(&vxlan->hash_lock); @@ -3564,7 +3756,8 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], vxlan->cfg.dst_port, old_dst.remote_vni, old_dst.remote_vni, - old_dst.remote_ifindex, 0); + old_dst.remote_ifindex, + true); if (!vxlan_addr_any(&dst->remote_ip)) { err = vxlan_fdb_update(vxlan, all_zeros_mac, @@ -3575,7 +3768,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[], dst->remote_vni, dst->remote_vni, dst->remote_ifindex, - NTF_SELF); + NTF_SELF, true); if (err) { spin_unlock_bh(&vxlan->hash_lock); return err; @@ -3608,6 +3801,7 @@ static size_t vxlan_get_size(const struct net_device *dev) nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL_INHERIT */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */ + nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_DF */ nla_total_size(sizeof(__be32)) + /* IFLA_VXLAN_LABEL */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */ nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */ @@ -3674,6 +3868,7 @@ static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev) nla_put_u8(skb, IFLA_VXLAN_TTL_INHERIT, !!(vxlan->cfg.flags & VXLAN_F_TTL_INHERIT)) || nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->cfg.tos) || + nla_put_u8(skb, IFLA_VXLAN_DF, vxlan->cfg.df) || nla_put_be32(skb, IFLA_VXLAN_LABEL, vxlan->cfg.label) || nla_put_u8(skb, IFLA_VXLAN_LEARNING, !!(vxlan->cfg.flags & VXLAN_F_LEARN)) || @@ -3756,7 +3951,7 @@ struct net_device *vxlan_dev_create(struct net *net, const char *name, memset(&tb, 0, sizeof(tb)); dev = rtnl_create_link(net, name, name_assign_type, - &vxlan_link_ops, tb); + &vxlan_link_ops, tb, NULL); if (IS_ERR(dev)) return dev; @@ -3851,18 +4046,89 @@ out: spin_unlock_bh(&vxlan->hash_lock); } +static int +vxlan_fdb_external_learn_add(struct net_device *dev, + struct switchdev_notifier_vxlan_fdb_info *fdb_info) +{ + struct vxlan_dev *vxlan = netdev_priv(dev); + int err; + + spin_lock_bh(&vxlan->hash_lock); + err = vxlan_fdb_update(vxlan, fdb_info->eth_addr, &fdb_info->remote_ip, + NUD_REACHABLE, + NLM_F_CREATE | NLM_F_REPLACE, + fdb_info->remote_port, + fdb_info->vni, + fdb_info->remote_vni, + fdb_info->remote_ifindex, + NTF_USE | NTF_SELF | NTF_EXT_LEARNED, + false); + spin_unlock_bh(&vxlan->hash_lock); + + return err; +} + +static int +vxlan_fdb_external_learn_del(struct net_device *dev, + struct switchdev_notifier_vxlan_fdb_info *fdb_info) +{ + struct vxlan_dev *vxlan = netdev_priv(dev); + struct vxlan_fdb *f; + int err = 0; + + spin_lock_bh(&vxlan->hash_lock); + + f = vxlan_find_mac(vxlan, fdb_info->eth_addr, fdb_info->vni); + if (!f) + err = -ENOENT; + else if (f->flags & NTF_EXT_LEARNED) + err = __vxlan_fdb_delete(vxlan, fdb_info->eth_addr, + fdb_info->remote_ip, + fdb_info->remote_port, + fdb_info->vni, + fdb_info->remote_vni, + fdb_info->remote_ifindex, + false); + + spin_unlock_bh(&vxlan->hash_lock); + + return err; +} + static int vxlan_switchdev_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = switchdev_notifier_info_to_dev(ptr); + struct switchdev_notifier_vxlan_fdb_info *fdb_info; + int err = 0; switch (event) { case SWITCHDEV_VXLAN_FDB_OFFLOADED: vxlan_fdb_offloaded_set(dev, ptr); break; + case SWITCHDEV_VXLAN_FDB_ADD_TO_BRIDGE: + fdb_info = ptr; + err = vxlan_fdb_external_learn_add(dev, fdb_info); + if (err) { + err = notifier_from_errno(err); + break; + } + fdb_info->offloaded = true; + vxlan_fdb_offloaded_set(dev, fdb_info); + break; + case SWITCHDEV_VXLAN_FDB_DEL_TO_BRIDGE: + fdb_info = ptr; + err = vxlan_fdb_external_learn_del(dev, fdb_info); + if (err) { + err = notifier_from_errno(err); + break; + } + fdb_info->offloaded = false; + vxlan_fdb_offloaded_set(dev, fdb_info); + break; } - return 0; + return err; } static struct notifier_block vxlan_switchdev_notifier_block __read_mostly = { diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index 4d6409605207..7a42336c8af8 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -391,6 +391,7 @@ static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev) dev_kfree_skb(skb); return -ENOMEM; } + netdev_sent_queue(dev, skb->len); spin_lock_irqsave(&priv->lock, flags); /* Start from the next BD that should be filled */ @@ -447,6 +448,8 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv) { /* Start from the next BD that should be filled */ struct net_device *dev = priv->ndev; + unsigned int bytes_sent = 0; + int howmany = 0; struct qe_bd *bd; /* BD pointer */ u16 bd_status; int tx_restart = 0; @@ -474,6 +477,8 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv) skb = priv->tx_skbuff[priv->skb_dirtytx]; if (!skb) break; + howmany++; + bytes_sent += skb->len; dev->stats.tx_packets++; memset(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr), @@ -501,6 +506,7 @@ static int hdlc_tx_done(struct ucc_hdlc_private *priv) if (tx_restart) hdlc_tx_restart(priv); + netdev_completed_queue(dev, howmany, bytes_sent); return 0; } @@ -721,6 +727,7 @@ static int uhdlc_open(struct net_device *dev) priv->hdlc_busy = 1; netif_device_attach(priv->ndev); napi_enable(&priv->napi); + netdev_reset_queue(dev); netif_start_queue(dev); hdlc_open(dev); } @@ -812,6 +819,7 @@ static int uhdlc_close(struct net_device *dev) free_irq(priv->ut_info->uf_info.irq, priv); netif_stop_queue(dev); + netdev_reset_queue(dev); priv->hdlc_busy = 0; return 0; diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig index 166920ae23f8..8c456a66ac3b 100644 --- a/drivers/net/wireless/Kconfig +++ b/drivers/net/wireless/Kconfig @@ -114,4 +114,11 @@ config USB_NET_RNDIS_WLAN If you choose to build a module, it'll be called rndis_wlan. +config VIRT_WIFI + tristate "Wifi wrapper for ethernet drivers" + depends on CFG80211 + ---help--- + This option adds support for ethernet connections to appear as if they + are wifi connections through a special rtnetlink device. + endif # WLAN diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile index 7fc96306712a..6cfe74515c95 100644 --- a/drivers/net/wireless/Makefile +++ b/drivers/net/wireless/Makefile @@ -27,3 +27,5 @@ obj-$(CONFIG_PCMCIA_WL3501) += wl3501_cs.o obj-$(CONFIG_USB_NET_RNDIS_WLAN) += rndis_wlan.o obj-$(CONFIG_MAC80211_HWSIM) += mac80211_hwsim.o + +obj-$(CONFIG_VIRT_WIFI) += virt_wifi.o diff --git a/drivers/net/wireless/ath/ath10k/Kconfig b/drivers/net/wireless/ath/ath10k/Kconfig index e1ad6b9166a6..a7fb5441ced4 100644 --- a/drivers/net/wireless/ath/ath10k/Kconfig +++ b/drivers/net/wireless/ath/ath10k/Kconfig @@ -47,8 +47,7 @@ config ATH10K_SNOC select QCOM_QMI_HELPERS ---help--- This module adds support for integrated WCN3990 chip connected - to system NOC(SNOC). Currently work in progress and will not - fully work. + to system NOC(SNOC). config ATH10K_DEBUG bool "Atheros ath10k debugging" diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c index d210b0ed59be..399b501f3c3c 100644 --- a/drivers/net/wireless/ath/ath10k/core.c +++ b/drivers/net/wireless/ath/ath10k/core.c @@ -561,6 +561,7 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { .hw_ops = &wcn3990_ops, .decap_align_bytes = 1, .num_peers = TARGET_HL_10_TLV_NUM_PEERS, + .n_cipher_suites = 8, .ast_skid_limit = TARGET_HL_10_TLV_AST_SKID_LIMIT, .num_wds_entries = TARGET_HL_10_TLV_NUM_WDS_ENTRIES, .target_64bit = true, @@ -594,6 +595,7 @@ static const char *const ath10k_core_fw_feature_str[] = { [ATH10K_FW_FEATURE_NO_PS] = "no-ps", [ATH10K_FW_FEATURE_MGMT_TX_BY_REF] = "mgmt-tx-by-reference", [ATH10K_FW_FEATURE_NON_BMI] = "non-bmi", + [ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL] = "single-chan-info-per-channel", }; static unsigned int ath10k_core_get_fw_feature_str(char *buf, @@ -2183,6 +2185,8 @@ static void ath10k_core_restart(struct work_struct *work) if (ret) ath10k_warn(ar, "failed to send firmware crash dump via devcoredump: %d", ret); + + complete(&ar->driver_recovery); } static void ath10k_core_set_coverage_class_work(struct work_struct *work) @@ -3074,6 +3078,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, init_completion(&ar->scan.completed); init_completion(&ar->scan.on_channel); init_completion(&ar->target_suspend); + init_completion(&ar->driver_recovery); init_completion(&ar->wow.wakeup_completed); init_completion(&ar->install_key_done); diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h index 042418097cf9..46e9c8c97a4d 100644 --- a/drivers/net/wireless/ath/ath10k/core.h +++ b/drivers/net/wireless/ath/ath10k/core.h @@ -474,6 +474,7 @@ struct ath10k_htt_data_stats { u64 bw[ATH10K_COUNTER_TYPE_MAX][ATH10K_BW_NUM]; u64 nss[ATH10K_COUNTER_TYPE_MAX][ATH10K_NSS_NUM]; u64 gi[ATH10K_COUNTER_TYPE_MAX][ATH10K_GI_NUM]; + u64 rate_table[ATH10K_COUNTER_TYPE_MAX][ATH10K_RATE_TABLE_NUM]; }; struct ath10k_htt_tx_stats { @@ -493,6 +494,7 @@ struct ath10k_sta { u32 smps; u16 peer_id; struct rate_info txrate; + struct ieee80211_tx_info tx_info; struct work_struct update_wk; u64 rx_duration; @@ -760,6 +762,9 @@ enum ath10k_fw_features { /* Firmware load is done externally, not by bmi */ ATH10K_FW_FEATURE_NON_BMI = 19, + /* Firmware sends only one chan_info event per channel */ + ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL = 20, + /* keep last */ ATH10K_FW_FEATURE_COUNT, }; @@ -960,6 +965,7 @@ struct ath10k { } hif; struct completion target_suspend; + struct completion driver_recovery; const struct ath10k_hw_regs *regs; const struct ath10k_hw_ce_regs *hw_ce_regs; diff --git a/drivers/net/wireless/ath/ath10k/coredump.c b/drivers/net/wireless/ath/ath10k/coredump.c index 4d28063052fe..eadae2f9206b 100644 --- a/drivers/net/wireless/ath/ath10k/coredump.c +++ b/drivers/net/wireless/ath/ath10k/coredump.c @@ -867,9 +867,105 @@ static const struct ath10k_mem_region qca9984_hw10_mem_regions[] = { }, }; +static const struct ath10k_mem_section ipq4019_soc_reg_range[] = { + {0x080000, 0x080004}, + {0x080020, 0x080024}, + {0x080028, 0x080050}, + {0x0800d4, 0x0800ec}, + {0x08010c, 0x080118}, + {0x080284, 0x080290}, + {0x0802a8, 0x0802b8}, + {0x0802dc, 0x08030c}, + {0x082000, 0x083fff} +}; + +static const struct ath10k_mem_region qca4019_hw10_mem_regions[] = { + { + .type = ATH10K_MEM_REGION_TYPE_DRAM, + .start = 0x400000, + .len = 0x68000, + .name = "DRAM", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_REG, + .start = 0xC0000, + .len = 0x40000, + .name = "SRAM", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_REG, + .start = 0x98000, + .len = 0x50000, + .name = "IRAM", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IOREG, + .start = 0x30000, + .len = 0x7000, + .name = "APB REG 1", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IOREG, + .start = 0x3f000, + .len = 0x3000, + .name = "APB REG 2", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IOREG, + .start = 0x43000, + .len = 0x3000, + .name = "WIFI REG", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_IOREG, + .start = 0x4A000, + .len = 0x5000, + .name = "CE REG", + .section_table = { + .sections = NULL, + .size = 0, + }, + }, + { + .type = ATH10K_MEM_REGION_TYPE_REG, + .start = 0x080000, + .len = 0x083fff - 0x080000, + .name = "REG_TOTAL", + .section_table = { + .sections = ipq4019_soc_reg_range, + .size = ARRAY_SIZE(ipq4019_soc_reg_range), + }, + }, +}; + static const struct ath10k_hw_mem_layout hw_mem_layouts[] = { { .hw_id = QCA6174_HW_1_0_VERSION, + .hw_rev = ATH10K_HW_QCA6174, .region_table = { .regions = qca6174_hw10_mem_regions, .size = ARRAY_SIZE(qca6174_hw10_mem_regions), @@ -877,6 +973,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = { }, { .hw_id = QCA6174_HW_1_1_VERSION, + .hw_rev = ATH10K_HW_QCA6174, .region_table = { .regions = qca6174_hw10_mem_regions, .size = ARRAY_SIZE(qca6174_hw10_mem_regions), @@ -884,6 +981,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = { }, { .hw_id = QCA6174_HW_1_3_VERSION, + .hw_rev = ATH10K_HW_QCA6174, .region_table = { .regions = qca6174_hw10_mem_regions, .size = ARRAY_SIZE(qca6174_hw10_mem_regions), @@ -891,6 +989,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = { }, { .hw_id = QCA6174_HW_2_1_VERSION, + .hw_rev = ATH10K_HW_QCA6174, .region_table = { .regions = qca6174_hw21_mem_regions, .size = ARRAY_SIZE(qca6174_hw21_mem_regions), @@ -898,6 +997,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = { }, { .hw_id = QCA6174_HW_3_0_VERSION, + .hw_rev = ATH10K_HW_QCA6174, .region_table = { .regions = qca6174_hw30_mem_regions, .size = ARRAY_SIZE(qca6174_hw30_mem_regions), @@ -905,6 +1005,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = { }, { .hw_id = QCA6174_HW_3_2_VERSION, + .hw_rev = ATH10K_HW_QCA6174, .region_table = { .regions = qca6174_hw30_mem_regions, .size = ARRAY_SIZE(qca6174_hw30_mem_regions), @@ -912,6 +1013,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = { }, { .hw_id = QCA9377_HW_1_1_DEV_VERSION, + .hw_rev = ATH10K_HW_QCA9377, .region_table = { .regions = qca6174_hw30_mem_regions, .size = ARRAY_SIZE(qca6174_hw30_mem_regions), @@ -919,6 +1021,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = { }, { .hw_id = QCA988X_HW_2_0_VERSION, + .hw_rev = ATH10K_HW_QCA988X, .region_table = { .regions = qca988x_hw20_mem_regions, .size = ARRAY_SIZE(qca988x_hw20_mem_regions), @@ -926,6 +1029,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = { }, { .hw_id = QCA9984_HW_1_0_DEV_VERSION, + .hw_rev = ATH10K_HW_QCA9984, .region_table = { .regions = qca9984_hw10_mem_regions, .size = ARRAY_SIZE(qca9984_hw10_mem_regions), @@ -933,6 +1037,7 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = { }, { .hw_id = QCA9888_HW_2_0_DEV_VERSION, + .hw_rev = ATH10K_HW_QCA9888, .region_table = { .regions = qca9984_hw10_mem_regions, .size = ARRAY_SIZE(qca9984_hw10_mem_regions), @@ -940,12 +1045,20 @@ static const struct ath10k_hw_mem_layout hw_mem_layouts[] = { }, { .hw_id = QCA99X0_HW_2_0_DEV_VERSION, + .hw_rev = ATH10K_HW_QCA99X0, .region_table = { .regions = qca99x0_hw20_mem_regions, .size = ARRAY_SIZE(qca99x0_hw20_mem_regions), }, }, - + { + .hw_id = QCA4019_HW_1_0_DEV_VERSION, + .hw_rev = ATH10K_HW_QCA4019, + .region_table = { + .regions = qca4019_hw10_mem_regions, + .size = ARRAY_SIZE(qca4019_hw10_mem_regions), + }, + }, }; static u32 ath10k_coredump_get_ramdump_size(struct ath10k *ar) @@ -987,7 +1100,8 @@ const struct ath10k_hw_mem_layout *ath10k_coredump_get_mem_layout(struct ath10k return NULL; for (i = 0; i < ARRAY_SIZE(hw_mem_layouts); i++) { - if (ar->target_version == hw_mem_layouts[i].hw_id) + if (ar->target_version == hw_mem_layouts[i].hw_id && + ar->hw_rev == hw_mem_layouts[i].hw_rev) return &hw_mem_layouts[i]; } diff --git a/drivers/net/wireless/ath/ath10k/coredump.h b/drivers/net/wireless/ath/ath10k/coredump.h index 3baaf9d2cbcd..5dac653e1649 100644 --- a/drivers/net/wireless/ath/ath10k/coredump.h +++ b/drivers/net/wireless/ath/ath10k/coredump.h @@ -165,6 +165,7 @@ struct ath10k_mem_region { */ struct ath10k_hw_mem_layout { u32 hw_id; + u32 hw_rev; struct { const struct ath10k_mem_region *regions; diff --git a/drivers/net/wireless/ath/ath10k/debugfs_sta.c b/drivers/net/wireless/ath/ath10k/debugfs_sta.c index b09cdc699c69..4778a455d81a 100644 --- a/drivers/net/wireless/ath/ath10k/debugfs_sta.c +++ b/drivers/net/wireless/ath/ath10k/debugfs_sta.c @@ -71,7 +71,7 @@ void ath10k_sta_update_rx_tid_stats_ampdu(struct ath10k *ar, u16 peer_id, u8 tid spin_lock_bh(&ar->data_lock); peer = ath10k_peer_find_by_id(ar, peer_id); - if (!peer) + if (!peer || !peer->sta) goto out; arsta = (struct ath10k_sta *)peer->sta->drv_priv; @@ -665,7 +665,7 @@ static ssize_t ath10k_dbg_sta_dump_tx_stats(struct file *file, "retry", "ampdu"}; const char *str[ATH10K_COUNTER_TYPE_MAX] = {"bytes", "packets"}; int len = 0, i, j, k, retval = 0; - const int size = 2 * 4096; + const int size = 16 * 4096; char *buf; buf = kzalloc(size, GFP_KERNEL); @@ -719,6 +719,16 @@ static ssize_t ath10k_dbg_sta_dump_tx_stats(struct file *file, len += scnprintf(buf + len, size - len, "%llu ", stats->legacy[j][i]); len += scnprintf(buf + len, size - len, "\n"); + len += scnprintf(buf + len, size - len, + " Rate table %s (1,2 ... Mbps)\n ", + str[j]); + for (i = 0; i < ATH10K_RATE_TABLE_NUM; i++) { + len += scnprintf(buf + len, size - len, "%llu ", + stats->rate_table[j][i]); + if (!((i + 1) % 8)) + len += + scnprintf(buf + len, size - len, "\n "); + } } } diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c index ffec98f7be50..f42bac204ef8 100644 --- a/drivers/net/wireless/ath/ath10k/htt_rx.c +++ b/drivers/net/wireless/ath/ath10k/htt_rx.c @@ -469,6 +469,166 @@ static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt, return msdu; } +static inline void ath10k_htt_append_frag_list(struct sk_buff *skb_head, + struct sk_buff *frag_list, + unsigned int frag_len) +{ + skb_shinfo(skb_head)->frag_list = frag_list; + skb_head->data_len = frag_len; + skb_head->len += skb_head->data_len; +} + +static int ath10k_htt_rx_handle_amsdu_mon_32(struct ath10k_htt *htt, + struct sk_buff *msdu, + struct htt_rx_in_ord_msdu_desc **msdu_desc) +{ + struct ath10k *ar = htt->ar; + u32 paddr; + struct sk_buff *frag_buf; + struct sk_buff *prev_frag_buf; + u8 last_frag; + struct htt_rx_in_ord_msdu_desc *ind_desc = *msdu_desc; + struct htt_rx_desc *rxd; + int amsdu_len = __le16_to_cpu(ind_desc->msdu_len); + + rxd = (void *)msdu->data; + trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd)); + + skb_put(msdu, sizeof(struct htt_rx_desc)); + skb_pull(msdu, sizeof(struct htt_rx_desc)); + skb_put(msdu, min(amsdu_len, HTT_RX_MSDU_SIZE)); + amsdu_len -= msdu->len; + + last_frag = ind_desc->reserved; + if (last_frag) { + if (amsdu_len) { + ath10k_warn(ar, "invalid amsdu len %u, left %d", + __le16_to_cpu(ind_desc->msdu_len), + amsdu_len); + } + return 0; + } + + ind_desc++; + paddr = __le32_to_cpu(ind_desc->msdu_paddr); + frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr); + if (!frag_buf) { + ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%x", paddr); + return -ENOENT; + } + + skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE)); + ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len); + + amsdu_len -= frag_buf->len; + prev_frag_buf = frag_buf; + last_frag = ind_desc->reserved; + while (!last_frag) { + ind_desc++; + paddr = __le32_to_cpu(ind_desc->msdu_paddr); + frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr); + if (!frag_buf) { + ath10k_warn(ar, "failed to pop frag-n paddr: 0x%x", + paddr); + prev_frag_buf->next = NULL; + return -ENOENT; + } + + skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE)); + last_frag = ind_desc->reserved; + amsdu_len -= frag_buf->len; + + prev_frag_buf->next = frag_buf; + prev_frag_buf = frag_buf; + } + + if (amsdu_len) { + ath10k_warn(ar, "invalid amsdu len %u, left %d", + __le16_to_cpu(ind_desc->msdu_len), amsdu_len); + } + + *msdu_desc = ind_desc; + + prev_frag_buf->next = NULL; + return 0; +} + +static int +ath10k_htt_rx_handle_amsdu_mon_64(struct ath10k_htt *htt, + struct sk_buff *msdu, + struct htt_rx_in_ord_msdu_desc_ext **msdu_desc) +{ + struct ath10k *ar = htt->ar; + u64 paddr; + struct sk_buff *frag_buf; + struct sk_buff *prev_frag_buf; + u8 last_frag; + struct htt_rx_in_ord_msdu_desc_ext *ind_desc = *msdu_desc; + struct htt_rx_desc *rxd; + int amsdu_len = __le16_to_cpu(ind_desc->msdu_len); + + rxd = (void *)msdu->data; + trace_ath10k_htt_rx_desc(ar, rxd, sizeof(*rxd)); + + skb_put(msdu, sizeof(struct htt_rx_desc)); + skb_pull(msdu, sizeof(struct htt_rx_desc)); + skb_put(msdu, min(amsdu_len, HTT_RX_MSDU_SIZE)); + amsdu_len -= msdu->len; + + last_frag = ind_desc->reserved; + if (last_frag) { + if (amsdu_len) { + ath10k_warn(ar, "invalid amsdu len %u, left %d", + __le16_to_cpu(ind_desc->msdu_len), + amsdu_len); + } + return 0; + } + + ind_desc++; + paddr = __le64_to_cpu(ind_desc->msdu_paddr); + frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr); + if (!frag_buf) { + ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%llx", paddr); + return -ENOENT; + } + + skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE)); + ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len); + + amsdu_len -= frag_buf->len; + prev_frag_buf = frag_buf; + last_frag = ind_desc->reserved; + while (!last_frag) { + ind_desc++; + paddr = __le64_to_cpu(ind_desc->msdu_paddr); + frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr); + if (!frag_buf) { + ath10k_warn(ar, "failed to pop frag-n paddr: 0x%llx", + paddr); + prev_frag_buf->next = NULL; + return -ENOENT; + } + + skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE)); + last_frag = ind_desc->reserved; + amsdu_len -= frag_buf->len; + + prev_frag_buf->next = frag_buf; + prev_frag_buf = frag_buf; + } + + if (amsdu_len) { + ath10k_warn(ar, "invalid amsdu len %u, left %d", + __le16_to_cpu(ind_desc->msdu_len), amsdu_len); + } + + *msdu_desc = ind_desc; + + prev_frag_buf->next = NULL; + return 0; +} + static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt, struct htt_rx_in_ord_ind *ev, struct sk_buff_head *list) @@ -477,7 +637,7 @@ static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt, struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32; struct htt_rx_desc *rxd; struct sk_buff *msdu; - int msdu_count; + int msdu_count, ret; bool is_offload; u32 paddr; @@ -495,6 +655,18 @@ static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt, return -ENOENT; } + if (!is_offload && ar->monitor_arvif) { + ret = ath10k_htt_rx_handle_amsdu_mon_32(htt, msdu, + &msdu_desc); + if (ret) { + __skb_queue_purge(list); + return ret; + } + __skb_queue_tail(list, msdu); + msdu_desc++; + continue; + } + __skb_queue_tail(list, msdu); if (!is_offload) { @@ -527,7 +699,7 @@ static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt, struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64; struct htt_rx_desc *rxd; struct sk_buff *msdu; - int msdu_count; + int msdu_count, ret; bool is_offload; u64 paddr; @@ -544,6 +716,18 @@ static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt, return -ENOENT; } + if (!is_offload && ar->monitor_arvif) { + ret = ath10k_htt_rx_handle_amsdu_mon_64(htt, msdu, + &msdu_desc); + if (ret) { + __skb_queue_purge(list); + return ret; + } + __skb_queue_tail(list, msdu); + msdu_desc++; + continue; + } + __skb_queue_tail(list, msdu); if (!is_offload) { @@ -1159,7 +1343,8 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar, struct sk_buff *msdu, struct ieee80211_rx_status *status, enum htt_rx_mpdu_encrypt_type enctype, - bool is_decrypted) + bool is_decrypted, + const u8 first_hdr[64]) { struct ieee80211_hdr *hdr; struct htt_rx_desc *rxd; @@ -1167,6 +1352,9 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar, size_t crypto_len; bool is_first; bool is_last; + bool msdu_limit_err; + int bytes_aligned = ar->hw_params.decap_align_bytes; + u8 *qos; rxd = (void *)msdu->data - sizeof(*rxd); is_first = !!(rxd->msdu_end.common.info0 & @@ -1184,16 +1372,45 @@ static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar, * [FCS] <-- at end, needs to be trimmed */ + /* Some hardwares(QCA99x0 variants) limit number of msdus in a-msdu when + * deaggregate, so that unwanted MSDU-deaggregation is avoided for + * error packets. If limit exceeds, hw sends all remaining MSDUs as + * a single last MSDU with this msdu limit error set. + */ + msdu_limit_err = ath10k_rx_desc_msdu_limit_error(&ar->hw_params, rxd); + + /* If MSDU limit error happens, then don't warn on, the partial raw MSDU + * without first MSDU is expected in that case, and handled later here. + */ /* This probably shouldn't happen but warn just in case */ - if (WARN_ON_ONCE(!is_first)) + if (WARN_ON_ONCE(!is_first && !msdu_limit_err)) return; /* This probably shouldn't happen but warn just in case */ - if (WARN_ON_ONCE(!(is_first && is_last))) + if (WARN_ON_ONCE(!(is_first && is_last) && !msdu_limit_err)) return; skb_trim(msdu, msdu->len - FCS_LEN); + /* Push original 80211 header */ + if (unlikely(msdu_limit_err)) { + hdr = (struct ieee80211_hdr *)first_hdr; + hdr_len = ieee80211_hdrlen(hdr->frame_control); + crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype); + + if (ieee80211_is_data_qos(hdr->frame_control)) { + qos = ieee80211_get_qos_ctl(hdr); + qos[0] |= IEEE80211_QOS_CTL_A_MSDU_PRESENT; + } + + if (crypto_len) + memcpy(skb_push(msdu, crypto_len), + (void *)hdr + round_up(hdr_len, bytes_aligned), + crypto_len); + + memcpy(skb_push(msdu, hdr_len), hdr, hdr_len); + } + /* In most cases this will be true for sniffed frames. It makes sense * to deliver them as-is without stripping the crypto param. This is * necessary for software based decryption. @@ -1467,7 +1684,7 @@ static void ath10k_htt_rx_h_undecap(struct ath10k *ar, switch (decap) { case RX_MSDU_DECAP_RAW: ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype, - is_decrypted); + is_decrypted, first_hdr); break; case RX_MSDU_DECAP_NATIVE_WIFI: ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr, @@ -2627,7 +2844,7 @@ void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb) dev_kfree_skb_any(skb); } -static inline int ath10k_get_legacy_rate_idx(struct ath10k *ar, u8 rate) +static inline s8 ath10k_get_legacy_rate_idx(struct ath10k *ar, u8 rate) { static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12, 18, 24, 36, 48, 54}; @@ -2646,11 +2863,11 @@ static void ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar, struct ath10k_sta *arsta, struct ath10k_per_peer_tx_stats *pstats, - u8 legacy_rate_idx) + s8 legacy_rate_idx) { struct rate_info *txrate = &arsta->txrate; struct ath10k_htt_tx_stats *tx_stats; - int ht_idx, gi, mcs, bw, nss; + int idx, ht_idx, gi, mcs, bw, nss; if (!arsta->tx_stats) return; @@ -2661,6 +2878,8 @@ ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar, mcs = txrate->mcs; bw = txrate->bw; nss = txrate->nss; + idx = mcs * 8 + 8 * 10 * nss; + idx += bw * 2 + gi; #define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name] @@ -2709,12 +2928,16 @@ ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar, pstats->succ_bytes + pstats->retry_bytes; STATS_OP_FMT(AMPDU).gi[0][gi] += pstats->succ_bytes + pstats->retry_bytes; + STATS_OP_FMT(AMPDU).rate_table[0][idx] += + pstats->succ_bytes + pstats->retry_bytes; STATS_OP_FMT(AMPDU).bw[1][bw] += pstats->succ_pkts + pstats->retry_pkts; STATS_OP_FMT(AMPDU).nss[1][nss] += pstats->succ_pkts + pstats->retry_pkts; STATS_OP_FMT(AMPDU).gi[1][gi] += pstats->succ_pkts + pstats->retry_pkts; + STATS_OP_FMT(AMPDU).rate_table[1][idx] += + pstats->succ_pkts + pstats->retry_pkts; } else { tx_stats->ack_fails += ATH10K_HW_BA_FAIL(pstats->flags); @@ -2743,6 +2966,15 @@ ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar, STATS_OP_FMT(RETRY).bw[1][bw] += pstats->retry_pkts; STATS_OP_FMT(RETRY).nss[1][nss] += pstats->retry_pkts; STATS_OP_FMT(RETRY).gi[1][gi] += pstats->retry_pkts; + + if (txrate->flags >= RATE_INFO_FLAGS_MCS) { + STATS_OP_FMT(SUCC).rate_table[0][idx] += pstats->succ_bytes; + STATS_OP_FMT(SUCC).rate_table[1][idx] += pstats->succ_pkts; + STATS_OP_FMT(FAIL).rate_table[0][idx] += pstats->failed_bytes; + STATS_OP_FMT(FAIL).rate_table[1][idx] += pstats->failed_pkts; + STATS_OP_FMT(RETRY).rate_table[0][idx] += pstats->retry_bytes; + STATS_OP_FMT(RETRY).rate_table[1][idx] += pstats->retry_pkts; + } } static void @@ -2751,8 +2983,10 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar, struct ath10k_per_peer_tx_stats *peer_stats) { struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; + struct ieee80211_chanctx_conf *conf = NULL; u8 rate = 0, sgi; s8 rate_idx = 0; + bool skip_auto_rate; struct rate_info txrate; lockdep_assert_held(&ar->data_lock); @@ -2762,6 +2996,13 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar, txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode); txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode); sgi = ATH10K_HW_GI(peer_stats->flags); + skip_auto_rate = ATH10K_FW_SKIPPED_RATE_CTRL(peer_stats->flags); + + /* Firmware's rate control skips broadcast/management frames, + * if host has configure fixed rates and in some other special cases. + */ + if (skip_auto_rate) + return; if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) { ath10k_warn(ar, "Invalid VHT mcs %hhd peer stats", txrate.mcs); @@ -2776,7 +3017,7 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar, } memset(&arsta->txrate, 0, sizeof(arsta->txrate)); - + memset(&arsta->tx_info.status, 0, sizeof(arsta->tx_info.status)); if (txrate.flags == WMI_RATE_PREAMBLE_CCK || txrate.flags == WMI_RATE_PREAMBLE_OFDM) { rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode); @@ -2795,11 +3036,59 @@ ath10k_update_per_peer_tx_stats(struct ath10k *ar, arsta->txrate.mcs = txrate.mcs; } - if (sgi) - arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; + switch (txrate.flags) { + case WMI_RATE_PREAMBLE_OFDM: + if (arsta->arvif && arsta->arvif->vif) + conf = rcu_dereference(arsta->arvif->vif->chanctx_conf); + if (conf && conf->def.chan->band == NL80211_BAND_5GHZ) + arsta->tx_info.status.rates[0].idx = rate_idx - 4; + break; + case WMI_RATE_PREAMBLE_CCK: + arsta->tx_info.status.rates[0].idx = rate_idx; + if (sgi) + arsta->tx_info.status.rates[0].flags |= + (IEEE80211_TX_RC_USE_SHORT_PREAMBLE | + IEEE80211_TX_RC_SHORT_GI); + break; + case WMI_RATE_PREAMBLE_HT: + arsta->tx_info.status.rates[0].idx = + txrate.mcs + ((txrate.nss - 1) * 8); + if (sgi) + arsta->tx_info.status.rates[0].flags |= + IEEE80211_TX_RC_SHORT_GI; + arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_MCS; + break; + case WMI_RATE_PREAMBLE_VHT: + ieee80211_rate_set_vht(&arsta->tx_info.status.rates[0], + txrate.mcs, txrate.nss); + if (sgi) + arsta->tx_info.status.rates[0].flags |= + IEEE80211_TX_RC_SHORT_GI; + arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_VHT_MCS; + break; + } arsta->txrate.nss = txrate.nss; arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw); + if (sgi) + arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; + + switch (arsta->txrate.bw) { + case RATE_INFO_BW_40: + arsta->tx_info.status.rates[0].flags |= + IEEE80211_TX_RC_40_MHZ_WIDTH; + break; + case RATE_INFO_BW_80: + arsta->tx_info.status.rates[0].flags |= + IEEE80211_TX_RC_80_MHZ_WIDTH; + break; + } + + if (peer_stats->succ_pkts) { + arsta->tx_info.flags = IEEE80211_TX_STAT_ACK; + arsta->tx_info.status.rates[0].count = 1; + ieee80211_tx_rate_update(ar->hw, sta, &arsta->tx_info); + } if (ath10k_debug_is_extd_tx_stats_enabled(ar)) ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats, @@ -2832,7 +3121,7 @@ static void ath10k_htt_fetch_peer_stats(struct ath10k *ar, rcu_read_lock(); spin_lock_bh(&ar->data_lock); peer = ath10k_peer_find_by_id(ar, peer_id); - if (!peer) { + if (!peer || !peer->sta) { ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n", peer_id); goto out; @@ -2885,7 +3174,7 @@ static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data) rcu_read_lock(); spin_lock_bh(&ar->data_lock); peer = ath10k_peer_find_by_id(ar, peer_id); - if (!peer) { + if (!peer || !peer->sta) { ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n", peer_id); goto out; diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c index af8ae8117c62..61ecf931ba4d 100644 --- a/drivers/net/wireless/ath/ath10k/hw.c +++ b/drivers/net/wireless/ath/ath10k/hw.c @@ -1119,8 +1119,15 @@ static int ath10k_qca99x0_rx_desc_get_l3_pad_bytes(struct htt_rx_desc *rxd) RX_MSDU_END_INFO1_L3_HDR_PAD); } +static bool ath10k_qca99x0_rx_desc_msdu_limit_error(struct htt_rx_desc *rxd) +{ + return !!(rxd->msdu_end.common.info0 & + __cpu_to_le32(RX_MSDU_END_INFO0_MSDU_LIMIT_ERR)); +} + const struct ath10k_hw_ops qca99x0_ops = { .rx_desc_get_l3_pad_bytes = ath10k_qca99x0_rx_desc_get_l3_pad_bytes, + .rx_desc_get_msdu_limit_error = ath10k_qca99x0_rx_desc_msdu_limit_error, }; const struct ath10k_hw_ops qca6174_ops = { diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h index 1b5da272d18c..e50a8dc5b093 100644 --- a/drivers/net/wireless/ath/ath10k/hw.h +++ b/drivers/net/wireless/ath/ath10k/hw.h @@ -624,6 +624,7 @@ struct ath10k_hw_ops { int (*rx_desc_get_l3_pad_bytes)(struct htt_rx_desc *rxd); void (*set_coverage_class)(struct ath10k *ar, s16 value); int (*enable_pll_clk)(struct ath10k *ar); + bool (*rx_desc_get_msdu_limit_error)(struct htt_rx_desc *rxd); }; extern const struct ath10k_hw_ops qca988x_ops; @@ -642,6 +643,15 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw, return 0; } +static inline bool +ath10k_rx_desc_msdu_limit_error(struct ath10k_hw_params *hw, + struct htt_rx_desc *rxd) +{ + if (hw->hw_ops->rx_desc_get_msdu_limit_error) + return hw->hw_ops->rx_desc_get_msdu_limit_error(rxd); + return false; +} + /* Target specific defines for MAIN firmware */ #define TARGET_NUM_VDEVS 8 #define TARGET_NUM_PEER_AST 2 diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c index 7e49342bae38..e49b36752ba2 100644 --- a/drivers/net/wireless/ath/ath10k/mac.c +++ b/drivers/net/wireless/ath/ath10k/mac.c @@ -22,6 +22,7 @@ #include <net/mac80211.h> #include <linux/etherdevice.h> #include <linux/acpi.h> +#include <linux/of.h> #include "hif.h" #include "core.h" @@ -4637,11 +4638,44 @@ static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) return ret; } +static int __ath10k_fetch_bb_timing_dt(struct ath10k *ar, + struct wmi_bb_timing_cfg_arg *bb_timing) +{ + struct device_node *node; + const char *fem_name; + int ret; + + node = ar->dev->of_node; + if (!node) + return -ENOENT; + + ret = of_property_read_string_index(node, "ext-fem-name", 0, &fem_name); + if (ret) + return -ENOENT; + + /* + * If external Front End module used in hardware, then default base band timing + * parameter cannot be used since they were fine tuned for reference hardware, + * so choosing different value suitable for that external FEM. + */ + if (!strcmp("microsemi-lx5586", fem_name)) { + bb_timing->bb_tx_timing = 0x00; + bb_timing->bb_xpa_timing = 0x0101; + } else { + return -ENOENT; + } + + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot bb_tx_timing 0x%x bb_xpa_timing 0x%x\n", + bb_timing->bb_tx_timing, bb_timing->bb_xpa_timing); + return 0; +} + static int ath10k_start(struct ieee80211_hw *hw) { struct ath10k *ar = hw->priv; u32 param; int ret = 0; + struct wmi_bb_timing_cfg_arg bb_timing = {0}; /* * This makes sense only when restarting hw. It is harmless to call @@ -4796,6 +4830,19 @@ static int ath10k_start(struct ieee80211_hw *hw) clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags); } + if (test_bit(WMI_SERVICE_BB_TIMING_CONFIG_SUPPORT, ar->wmi.svc_map)) { + ret = __ath10k_fetch_bb_timing_dt(ar, &bb_timing); + if (!ret) { + ret = ath10k_wmi_pdev_bb_timing(ar, &bb_timing); + if (ret) { + ath10k_warn(ar, + "failed to set bb timings: %d\n", + ret); + goto err_core_stop; + } + } + } + ar->num_started_vdevs = 0; ath10k_regd_update(ar); @@ -5154,6 +5201,17 @@ static int ath10k_add_interface(struct ieee80211_hw *hw, goto err; } + if (test_bit(WMI_SERVICE_VDEV_DISABLE_4_ADDR_SRC_LRN_SUPPORT, + ar->wmi.svc_map)) { + vdev_param = ar->wmi.vdev_param->disable_4addr_src_lrn; + ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, + WMI_VDEV_DISABLE_4_ADDR_SRC_LRN); + if (ret && ret != -EOPNOTSUPP) { + ath10k_warn(ar, "failed to disable 4addr src lrn vdev %i: %d\n", + arvif->vdev_id, ret); + } + } + ar->free_vdev_map &= ~(1LL << arvif->vdev_id); spin_lock_bh(&ar->data_lock); list_add(&arvif->list, &ar->arvifs); @@ -5754,30 +5812,6 @@ static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw, return data.num_tdls_stations; } -static void ath10k_mac_tdls_vifs_count_iter(void *data, u8 *mac, - struct ieee80211_vif *vif) -{ - struct ath10k_vif *arvif = (void *)vif->drv_priv; - int *num_tdls_vifs = data; - - if (vif->type != NL80211_IFTYPE_STATION) - return; - - if (ath10k_mac_tdls_vif_stations_count(arvif->ar->hw, vif) > 0) - (*num_tdls_vifs)++; -} - -static int ath10k_mac_tdls_vifs_count(struct ieee80211_hw *hw) -{ - int num_tdls_vifs = 0; - - ieee80211_iterate_active_interfaces_atomic(hw, - IEEE80211_IFACE_ITER_NORMAL, - ath10k_mac_tdls_vifs_count_iter, - &num_tdls_vifs); - return num_tdls_vifs; -} - static int ath10k_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_scan_request *hw_req) @@ -6285,7 +6319,6 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, */ enum wmi_peer_type peer_type = WMI_PEER_TYPE_DEFAULT; u32 num_tdls_stations; - u32 num_tdls_vifs; ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n", @@ -6293,15 +6326,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, ar->num_stations + 1, ar->max_num_stations, ar->num_peers + 1, ar->max_num_peers); - if (ath10k_debug_is_extd_tx_stats_enabled(ar)) { - arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats), - GFP_KERNEL); - if (!arsta->tx_stats) - goto exit; - } - num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif); - num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw); if (sta->tdls) { if (num_tdls_stations >= ar->max_num_tdls_vdevs) { @@ -6321,12 +6346,22 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, goto exit; } + if (ath10k_debug_is_extd_tx_stats_enabled(ar)) { + arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats), + GFP_KERNEL); + if (!arsta->tx_stats) { + ret = -ENOMEM; + goto exit; + } + } + ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id, sta->addr, peer_type); if (ret) { ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n", sta->addr, arvif->vdev_id, ret); ath10k_mac_dec_num_stations(arvif, sta); + kfree(arsta->tx_stats); goto exit; } @@ -6339,6 +6374,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, spin_unlock_bh(&ar->data_lock); ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); ath10k_mac_dec_num_stations(arvif, sta); + kfree(arsta->tx_stats); ret = -ENOENT; goto exit; } @@ -6359,6 +6395,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); ath10k_mac_dec_num_stations(arvif, sta); + kfree(arsta->tx_stats); goto exit; } @@ -6370,6 +6407,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, sta->addr, arvif->vdev_id, ret); ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); ath10k_mac_dec_num_stations(arvif, sta); + kfree(arsta->tx_stats); if (num_tdls_stations != 0) goto exit; @@ -6385,9 +6423,6 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, "mac vdev %d peer delete %pM sta %pK (sta gone)\n", arvif->vdev_id, sta->addr, sta); - if (ath10k_debug_is_extd_tx_stats_enabled(ar)) - kfree(arsta->tx_stats); - if (sta->tdls) { ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta, @@ -6427,6 +6462,11 @@ static int ath10k_sta_state(struct ieee80211_hw *hw, } spin_unlock_bh(&ar->data_lock); + if (ath10k_debug_is_extd_tx_stats_enabled(ar)) { + kfree(arsta->tx_stats); + arsta->tx_stats = NULL; + } + for (i = 0; i < ARRAY_SIZE(sta->txq); i++) ath10k_mac_txq_unref(ar, sta->txq[i]); @@ -8313,7 +8353,6 @@ static u32 ath10k_mac_wrdd_get_mcc(struct ath10k *ar, union acpi_object *wrdd) static int ath10k_mac_get_wrdd_regulatory(struct ath10k *ar, u16 *rd) { - struct pci_dev __maybe_unused *pdev = to_pci_dev(ar->dev); acpi_handle root_handle; acpi_handle handle; struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL}; @@ -8321,7 +8360,7 @@ static int ath10k_mac_get_wrdd_regulatory(struct ath10k *ar, u16 *rd) u32 alpha2_code; char alpha2[3]; - root_handle = ACPI_HANDLE(&pdev->dev); + root_handle = ACPI_HANDLE(ar->dev); if (!root_handle) return -EOPNOTSUPP; diff --git a/drivers/net/wireless/ath/ath10k/qmi.c b/drivers/net/wireless/ath/ath10k/qmi.c index 56cb1831dcdf..37b3bd629f48 100644 --- a/drivers/net/wireless/ath/ath10k/qmi.c +++ b/drivers/net/wireless/ath/ath10k/qmi.c @@ -543,7 +543,7 @@ static int ath10k_qmi_cap_send_sync_msg(struct ath10k_qmi *qmi) goto out; if (resp->resp.result != QMI_RESULT_SUCCESS_V01) { - ath10k_err(ar, "capablity req rejected: %d\n", resp->resp.error); + ath10k_err(ar, "capability req rejected: %d\n", resp->resp.error); ret = -EINVAL; goto out; } @@ -623,7 +623,7 @@ static int ath10k_qmi_host_cap_send_sync(struct ath10k_qmi *qmi) goto out; } - ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi host capablity request completed\n"); + ath10k_dbg(ar, ATH10K_DBG_QMI, "qmi host capability request completed\n"); return 0; out: @@ -657,7 +657,7 @@ ath10k_qmi_ind_register_send_sync_msg(struct ath10k_qmi *qmi) wlfw_ind_register_req_msg_v01_ei, &req); if (ret < 0) { qmi_txn_cancel(&txn); - ath10k_err(ar, "failed to send indication registed request: %d\n", ret); + ath10k_err(ar, "failed to send indication registered request: %d\n", ret); goto out; } @@ -931,9 +931,9 @@ static int ath10k_qmi_setup_msa_resources(struct ath10k_qmi *qmi, u32 msa_size) qmi->msa_mem_size = resource_size(&r); qmi->msa_va = devm_memremap(dev, qmi->msa_pa, qmi->msa_mem_size, MEMREMAP_WT); - if (!qmi->msa_pa) { + if (IS_ERR(qmi->msa_va)) { dev_err(dev, "failed to map memory region: %pa\n", &r.start); - return -EBUSY; + return PTR_ERR(qmi->msa_va); } } else { qmi->msa_va = dmam_alloc_coherent(dev, msa_size, diff --git a/drivers/net/wireless/ath/ath10k/rx_desc.h b/drivers/net/wireless/ath/ath10k/rx_desc.h index 310674de3cb8..dfbfe674e11e 100644 --- a/drivers/net/wireless/ath/ath10k/rx_desc.h +++ b/drivers/net/wireless/ath/ath10k/rx_desc.h @@ -572,6 +572,7 @@ struct rx_msdu_start { #define RX_MSDU_END_INFO0_REPORTED_MPDU_LENGTH_LSB 0 #define RX_MSDU_END_INFO0_FIRST_MSDU BIT(14) #define RX_MSDU_END_INFO0_LAST_MSDU BIT(15) +#define RX_MSDU_END_INFO0_MSDU_LIMIT_ERR BIT(18) #define RX_MSDU_END_INFO0_PRE_DELIM_ERR BIT(30) #define RX_MSDU_END_INFO0_RESERVED_3B BIT(31) @@ -676,6 +677,12 @@ struct rx_msdu_end { * Indicates the last MSDU of the A-MSDU. MPDU end status is * only valid when last_msdu is set. * + *msdu_limit_error + * Indicates that the MSDU threshold was exceeded and thus + * all the rest of the MSDUs will not be scattered and + * will not be decapsulated but will be received in RAW format + * as a single MSDU buffer. + * *reserved_3a * Reserved: HW should fill with zero. FW should ignore. * diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c index 8d3d9bca410f..54efe6be8f1d 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.c +++ b/drivers/net/wireless/ath/ath10k/snoc.c @@ -46,14 +46,14 @@ static char *const ce_name[] = { "WLAN_CE_11", }; -static struct ath10k_wcn3990_vreg_info vreg_cfg[] = { - {NULL, "vdd-0.8-cx-mx", 800000, 800000, 0, 0, false}, - {NULL, "vdd-1.8-xo", 1800000, 1800000, 0, 0, false}, - {NULL, "vdd-1.3-rfa", 1304000, 1304000, 0, 0, false}, - {NULL, "vdd-3.3-ch0", 3312000, 3312000, 0, 0, false}, +static struct ath10k_vreg_info vreg_cfg[] = { + {NULL, "vdd-0.8-cx-mx", 800000, 850000, 0, 0, false}, + {NULL, "vdd-1.8-xo", 1800000, 1850000, 0, 0, false}, + {NULL, "vdd-1.3-rfa", 1300000, 1350000, 0, 0, false}, + {NULL, "vdd-3.3-ch0", 3300000, 3350000, 0, 0, false}, }; -static struct ath10k_wcn3990_clk_info clk_cfg[] = { +static struct ath10k_clk_info clk_cfg[] = { {NULL, "cxo_ref_clk_pin", 0, false}, }; @@ -474,14 +474,14 @@ static struct service_to_pipe target_service_to_ce_map_wlan[] = { }, }; -void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value) +static void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value) { struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); iowrite32(value, ar_snoc->mem + offset); } -u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset) +static u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset) { struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); u32 val; @@ -918,7 +918,9 @@ static void ath10k_snoc_buffer_cleanup(struct ath10k *ar) static void ath10k_snoc_hif_stop(struct ath10k *ar) { - ath10k_snoc_irq_disable(ar); + if (!test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) + ath10k_snoc_irq_disable(ar); + napi_synchronize(&ar->napi); napi_disable(&ar->napi); ath10k_snoc_buffer_cleanup(ar); @@ -927,10 +929,14 @@ static void ath10k_snoc_hif_stop(struct ath10k *ar) static int ath10k_snoc_hif_start(struct ath10k *ar) { + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); + napi_enable(&ar->napi); ath10k_snoc_irq_enable(ar); ath10k_snoc_rx_post(ar); + clear_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags); + ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n"); return 0; @@ -994,7 +1000,8 @@ static int ath10k_snoc_wlan_enable(struct ath10k *ar) static void ath10k_snoc_wlan_disable(struct ath10k *ar) { - ath10k_qmi_wlan_disable(ar); + if (!test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) + ath10k_qmi_wlan_disable(ar); } static void ath10k_snoc_hif_power_down(struct ath10k *ar) @@ -1091,6 +1098,11 @@ static int ath10k_snoc_napi_poll(struct napi_struct *ctx, int budget) struct ath10k *ar = container_of(ctx, struct ath10k, napi); int done = 0; + if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) { + napi_complete(ctx); + return done; + } + ath10k_ce_per_engine_service_any(ar); done = ath10k_htt_txrx_compl_task(ar, budget); @@ -1187,17 +1199,29 @@ int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type) struct ath10k_bus_params bus_params; int ret; + if (test_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags)) + return 0; + switch (type) { case ATH10K_QMI_EVENT_FW_READY_IND: + if (test_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags)) { + queue_work(ar->workqueue, &ar->restart_work); + break; + } + bus_params.dev_type = ATH10K_DEV_TYPE_LL; bus_params.chip_id = ar_snoc->target_info.soc_version; ret = ath10k_core_register(ar, &bus_params); if (ret) { - ath10k_err(ar, "failed to register driver core: %d\n", + ath10k_err(ar, "Failed to register driver core: %d\n", ret); + return ret; } + set_bit(ATH10K_SNOC_FLAG_REGISTERED, &ar_snoc->flags); break; case ATH10K_QMI_EVENT_FW_DOWN_IND: + set_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags); + set_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags); break; default: ath10k_err(ar, "invalid fw indication: %llx\n", type); @@ -1246,7 +1270,7 @@ static void ath10k_snoc_release_resource(struct ath10k *ar) } static int ath10k_get_vreg_info(struct ath10k *ar, struct device *dev, - struct ath10k_wcn3990_vreg_info *vreg_info) + struct ath10k_vreg_info *vreg_info) { struct regulator *reg; int ret = 0; @@ -1284,7 +1308,7 @@ done: } static int ath10k_get_clk_info(struct ath10k *ar, struct device *dev, - struct ath10k_wcn3990_clk_info *clk_info) + struct ath10k_clk_info *clk_info) { struct clk *handle; int ret = 0; @@ -1311,10 +1335,80 @@ static int ath10k_get_clk_info(struct ath10k *ar, struct device *dev, return ret; } -static int ath10k_wcn3990_vreg_on(struct ath10k *ar) +static int __ath10k_snoc_vreg_on(struct ath10k *ar, + struct ath10k_vreg_info *vreg_info) +{ + int ret; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being enabled\n", + vreg_info->name); + + ret = regulator_set_voltage(vreg_info->reg, vreg_info->min_v, + vreg_info->max_v); + if (ret) { + ath10k_err(ar, + "failed to set regulator %s voltage-min: %d voltage-max: %d\n", + vreg_info->name, vreg_info->min_v, vreg_info->max_v); + return ret; + } + + if (vreg_info->load_ua) { + ret = regulator_set_load(vreg_info->reg, vreg_info->load_ua); + if (ret < 0) { + ath10k_err(ar, "failed to set regulator %s load: %d\n", + vreg_info->name, vreg_info->load_ua); + goto err_set_load; + } + } + + ret = regulator_enable(vreg_info->reg); + if (ret) { + ath10k_err(ar, "failed to enable regulator %s\n", + vreg_info->name); + goto err_enable; + } + + if (vreg_info->settle_delay) + udelay(vreg_info->settle_delay); + + return 0; + +err_enable: + regulator_set_load(vreg_info->reg, 0); +err_set_load: + regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v); + + return ret; +} + +static int __ath10k_snoc_vreg_off(struct ath10k *ar, + struct ath10k_vreg_info *vreg_info) +{ + int ret; + + ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being disabled\n", + vreg_info->name); + + ret = regulator_disable(vreg_info->reg); + if (ret) + ath10k_err(ar, "failed to disable regulator %s\n", + vreg_info->name); + + ret = regulator_set_load(vreg_info->reg, 0); + if (ret < 0) + ath10k_err(ar, "failed to set load %s\n", vreg_info->name); + + ret = regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v); + if (ret) + ath10k_err(ar, "failed to set voltage %s\n", vreg_info->name); + + return ret; +} + +static int ath10k_snoc_vreg_on(struct ath10k *ar) { struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); - struct ath10k_wcn3990_vreg_info *vreg_info; + struct ath10k_vreg_info *vreg_info; int ret = 0; int i; @@ -1324,62 +1418,30 @@ static int ath10k_wcn3990_vreg_on(struct ath10k *ar) if (!vreg_info->reg) continue; - ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being enabled\n", - vreg_info->name); - - ret = regulator_set_voltage(vreg_info->reg, vreg_info->min_v, - vreg_info->max_v); - if (ret) { - ath10k_err(ar, - "failed to set regulator %s voltage-min: %d voltage-max: %d\n", - vreg_info->name, vreg_info->min_v, vreg_info->max_v); - goto err_reg_config; - } - - if (vreg_info->load_ua) { - ret = regulator_set_load(vreg_info->reg, - vreg_info->load_ua); - if (ret < 0) { - ath10k_err(ar, - "failed to set regulator %s load: %d\n", - vreg_info->name, - vreg_info->load_ua); - goto err_reg_config; - } - } - - ret = regulator_enable(vreg_info->reg); - if (ret) { - ath10k_err(ar, "failed to enable regulator %s\n", - vreg_info->name); + ret = __ath10k_snoc_vreg_on(ar, vreg_info); + if (ret) goto err_reg_config; - } - - if (vreg_info->settle_delay) - udelay(vreg_info->settle_delay); } return 0; err_reg_config: - for (; i >= 0; i--) { + for (i = i - 1; i >= 0; i--) { vreg_info = &ar_snoc->vreg[i]; if (!vreg_info->reg) continue; - regulator_disable(vreg_info->reg); - regulator_set_load(vreg_info->reg, 0); - regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v); + __ath10k_snoc_vreg_off(ar, vreg_info); } return ret; } -static int ath10k_wcn3990_vreg_off(struct ath10k *ar) +static int ath10k_snoc_vreg_off(struct ath10k *ar) { struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); - struct ath10k_wcn3990_vreg_info *vreg_info; + struct ath10k_vreg_info *vreg_info; int ret = 0; int i; @@ -1389,33 +1451,16 @@ static int ath10k_wcn3990_vreg_off(struct ath10k *ar) if (!vreg_info->reg) continue; - ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc regulator %s being disabled\n", - vreg_info->name); - - ret = regulator_disable(vreg_info->reg); - if (ret) - ath10k_err(ar, "failed to disable regulator %s\n", - vreg_info->name); - - ret = regulator_set_load(vreg_info->reg, 0); - if (ret < 0) - ath10k_err(ar, "failed to set load %s\n", - vreg_info->name); - - ret = regulator_set_voltage(vreg_info->reg, 0, - vreg_info->max_v); - if (ret) - ath10k_err(ar, "failed to set voltage %s\n", - vreg_info->name); + ret = __ath10k_snoc_vreg_off(ar, vreg_info); } return ret; } -static int ath10k_wcn3990_clk_init(struct ath10k *ar) +static int ath10k_snoc_clk_init(struct ath10k *ar) { struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); - struct ath10k_wcn3990_clk_info *clk_info; + struct ath10k_clk_info *clk_info; int ret = 0; int i; @@ -1449,7 +1494,7 @@ static int ath10k_wcn3990_clk_init(struct ath10k *ar) return 0; err_clock_config: - for (; i >= 0; i--) { + for (i = i - 1; i >= 0; i--) { clk_info = &ar_snoc->clk[i]; if (!clk_info->handle) @@ -1461,10 +1506,10 @@ err_clock_config: return ret; } -static int ath10k_wcn3990_clk_deinit(struct ath10k *ar) +static int ath10k_snoc_clk_deinit(struct ath10k *ar) { struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); - struct ath10k_wcn3990_clk_info *clk_info; + struct ath10k_clk_info *clk_info; int i; for (i = 0; i < ARRAY_SIZE(clk_cfg); i++) { @@ -1488,18 +1533,18 @@ static int ath10k_hw_power_on(struct ath10k *ar) ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power on\n"); - ret = ath10k_wcn3990_vreg_on(ar); + ret = ath10k_snoc_vreg_on(ar); if (ret) return ret; - ret = ath10k_wcn3990_clk_init(ar); + ret = ath10k_snoc_clk_init(ar); if (ret) goto vreg_off; return ret; vreg_off: - ath10k_wcn3990_vreg_off(ar); + ath10k_snoc_vreg_off(ar); return ret; } @@ -1509,9 +1554,9 @@ static int ath10k_hw_power_off(struct ath10k *ar) ath10k_dbg(ar, ATH10K_DBG_SNOC, "soc power off\n"); - ath10k_wcn3990_clk_deinit(ar); + ath10k_snoc_clk_deinit(ar); - ret = ath10k_wcn3990_vreg_off(ar); + ret = ath10k_snoc_vreg_off(ar); return ret; } @@ -1609,7 +1654,6 @@ static int ath10k_snoc_probe(struct platform_device *pdev) } ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc probe\n"); - ath10k_warn(ar, "Warning: SNOC support is still work-in-progress, it will not work properly!"); return 0; @@ -1628,8 +1672,17 @@ err_core_destroy: static int ath10k_snoc_remove(struct platform_device *pdev) { struct ath10k *ar = platform_get_drvdata(pdev); + struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar); ath10k_dbg(ar, ATH10K_DBG_SNOC, "snoc remove\n"); + + reinit_completion(&ar->driver_recovery); + + if (test_bit(ATH10K_SNOC_FLAG_RECOVERY, &ar_snoc->flags)) + wait_for_completion_timeout(&ar->driver_recovery, 3 * HZ); + + set_bit(ATH10K_SNOC_FLAG_UNREGISTERING, &ar_snoc->flags); + ath10k_core_unregister(ar); ath10k_hw_power_off(ar); ath10k_snoc_free_irq(ar); @@ -1641,12 +1694,12 @@ static int ath10k_snoc_remove(struct platform_device *pdev) } static struct platform_driver ath10k_snoc_driver = { - .probe = ath10k_snoc_probe, - .remove = ath10k_snoc_remove, - .driver = { - .name = "ath10k_snoc", - .of_match_table = ath10k_snoc_dt_match, - }, + .probe = ath10k_snoc_probe, + .remove = ath10k_snoc_remove, + .driver = { + .name = "ath10k_snoc", + .of_match_table = ath10k_snoc_dt_match, + }, }; module_platform_driver(ath10k_snoc_driver); diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h index e1d2d6675556..2b2f23cf7c5d 100644 --- a/drivers/net/wireless/ath/ath10k/snoc.h +++ b/drivers/net/wireless/ath/ath10k/snoc.h @@ -53,7 +53,7 @@ struct ath10k_snoc_ce_irq { u32 irq_line; }; -struct ath10k_wcn3990_vreg_info { +struct ath10k_vreg_info { struct regulator *reg; const char *name; u32 min_v; @@ -63,13 +63,19 @@ struct ath10k_wcn3990_vreg_info { bool required; }; -struct ath10k_wcn3990_clk_info { +struct ath10k_clk_info { struct clk *handle; const char *name; u32 freq; bool required; }; +enum ath10k_snoc_flags { + ATH10K_SNOC_FLAG_REGISTERED, + ATH10K_SNOC_FLAG_UNREGISTERING, + ATH10K_SNOC_FLAG_RECOVERY, +}; + struct ath10k_snoc { struct platform_device *dev; struct ath10k *ar; @@ -81,9 +87,10 @@ struct ath10k_snoc { struct ath10k_snoc_ce_irq ce_irqs[CE_COUNT_MAX]; struct ath10k_ce ce; struct timer_list rx_post_retry; - struct ath10k_wcn3990_vreg_info *vreg; - struct ath10k_wcn3990_clk_info *clk; + struct ath10k_vreg_info *vreg; + struct ath10k_clk_info *clk; struct ath10k_qmi *qmi; + unsigned long int flags; }; static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar) @@ -91,8 +98,6 @@ static inline struct ath10k_snoc *ath10k_snoc_priv(struct ath10k *ar) return (struct ath10k_snoc *)ar->drv_priv; } -void ath10k_snoc_write32(struct ath10k *ar, u32 offset, u32 value); -u32 ath10k_snoc_read32(struct ath10k *ar, u32 offset); int ath10k_snoc_fw_indication(struct ath10k *ar, u64 type); #endif /* _SNOC_H_ */ diff --git a/drivers/net/wireless/ath/ath10k/wmi-ops.h b/drivers/net/wireless/ath/ath10k/wmi-ops.h index 7978a7783f90..04663076d27a 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-ops.h +++ b/drivers/net/wireless/ath/ath10k/wmi-ops.h @@ -219,6 +219,9 @@ struct wmi_ops { struct sk_buff *(*gen_echo)(struct ath10k *ar, u32 value); struct sk_buff *(*gen_pdev_get_tpc_table_cmdid)(struct ath10k *ar, u32 param); + struct sk_buff *(*gen_bb_timing) + (struct ath10k *ar, + const struct wmi_bb_timing_cfg_arg *arg); }; @@ -1576,4 +1579,21 @@ ath10k_wmi_report_radar_found(struct ath10k *ar, ar->wmi.cmd->radar_found_cmdid); } +static inline int +ath10k_wmi_pdev_bb_timing(struct ath10k *ar, + const struct wmi_bb_timing_cfg_arg *arg) +{ + struct sk_buff *skb; + + if (!ar->wmi.ops->gen_bb_timing) + return -EOPNOTSUPP; + + skb = ar->wmi.ops->gen_bb_timing(ar, arg); + + if (IS_ERR(skb)) + return PTR_ERR(skb); + + return ath10k_wmi_cmd_send(ar, skb, + ar->wmi.cmd->set_bb_timing_cmdid); +} #endif diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c index bab8b2527fb8..892bd8c30dd9 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c @@ -621,7 +621,7 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb) ath10k_wmi_event_mgmt_tx_compl(ar, skb); break; default: - ath10k_warn(ar, "Unknown eventid: %d\n", id); + ath10k_dbg(ar, ATH10K_DBG_WMI, "Unknown eventid: %d\n", id); break; } @@ -762,6 +762,9 @@ static int ath10k_wmi_tlv_op_pull_ch_info_ev(struct ath10k *ar, arg->noise_floor = ev->noise_floor; arg->rx_clear_count = ev->rx_clear_count; arg->cycle_count = ev->cycle_count; + if (test_bit(ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL, + ar->running_fw->fw_file.fw_features)) + arg->mac_clk_mhz = ev->mac_clk_mhz; kfree(tb); return 0; @@ -3452,7 +3455,6 @@ ath10k_wmi_tlv_op_gen_config_pno_start(struct ath10k *ar, struct wmi_tlv *tlv; struct sk_buff *skb; __le32 *channel_list; - u16 tlv_len; size_t len; void *ptr; u32 i; @@ -3510,8 +3512,6 @@ ath10k_wmi_tlv_op_gen_config_pno_start(struct ath10k *ar, /* nlo_configured_parameters(nlo_list) */ cmd->no_of_ssids = __cpu_to_le32(min_t(u8, pno->uc_networks_count, WMI_NLO_MAX_SSIDS)); - tlv_len = __le32_to_cpu(cmd->no_of_ssids) * - sizeof(struct nlo_configured_parameters); tlv = ptr; tlv->tag = __cpu_to_le16(WMI_TLV_TAG_ARRAY_STRUCT); diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.h b/drivers/net/wireless/ath/ath10k/wmi-tlv.h index c2cb413392ee..e07e9907e355 100644 --- a/drivers/net/wireless/ath/ath10k/wmi-tlv.h +++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.h @@ -1582,6 +1582,16 @@ struct ath10k_mgmt_tx_pkt_addr { dma_addr_t paddr; }; +struct chan_info_params { + u32 err_code; + u32 freq; + u32 cmd_flags; + u32 noise_floor; + u32 rx_clear_count; + u32 cycle_count; + u32 mac_clk_mhz; +}; + struct wmi_tlv_mgmt_tx_compl_ev { __le32 desc_id; __le32 status; diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c index 25e8fa789e8d..ba837403e266 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.c +++ b/drivers/net/wireless/ath/ath10k/wmi.c @@ -539,6 +539,7 @@ static struct wmi_cmd_map wmi_10_2_4_cmd_map = { WMI_10_2_PDEV_BSS_CHAN_INFO_REQUEST_CMDID, .pdev_get_tpc_table_cmdid = WMI_CMD_UNSUPPORTED, .radar_found_cmdid = WMI_CMD_UNSUPPORTED, + .set_bb_timing_cmdid = WMI_10_2_PDEV_SET_BB_TIMING_CONFIG_CMDID, }; /* 10.4 WMI cmd track */ @@ -825,6 +826,7 @@ static struct wmi_vdev_param_map wmi_vdev_param_map = { .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED, .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED, .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, + .disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED, }; /* 10.X WMI VDEV param map */ @@ -900,6 +902,7 @@ static struct wmi_vdev_param_map wmi_10x_vdev_param_map = { .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED, .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED, .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, + .disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED, }; static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = { @@ -974,6 +977,7 @@ static struct wmi_vdev_param_map wmi_10_2_4_vdev_param_map = { .meru_vc = WMI_VDEV_PARAM_UNSUPPORTED, .rx_decap_type = WMI_VDEV_PARAM_UNSUPPORTED, .bw_nss_ratemask = WMI_VDEV_PARAM_UNSUPPORTED, + .disable_4addr_src_lrn = WMI_VDEV_PARAM_UNSUPPORTED, }; static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = { @@ -1051,6 +1055,7 @@ static struct wmi_vdev_param_map wmi_10_4_vdev_param_map = { .bw_nss_ratemask = WMI_10_4_VDEV_PARAM_BW_NSS_RATEMASK, .inc_tsf = WMI_10_4_VDEV_PARAM_TSF_INCREMENT, .dec_tsf = WMI_10_4_VDEV_PARAM_TSF_DECREMENT, + .disable_4addr_src_lrn = WMI_10_4_VDEV_PARAM_DISABLE_4_ADDR_SRC_LRN, }; static struct wmi_pdev_param_map wmi_pdev_param_map = { @@ -2554,60 +2559,69 @@ static int ath10k_wmi_10_4_op_pull_ch_info_ev(struct ath10k *ar, return 0; } -void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb) +/* + * Handle the channel info event for firmware which only sends one + * chan_info event per scanned channel. + */ +static void ath10k_wmi_event_chan_info_unpaired(struct ath10k *ar, + struct chan_info_params *params) { - struct wmi_ch_info_ev_arg arg = {}; struct survey_info *survey; - u32 err_code, freq, cmd_flags, noise_floor, rx_clear_count, cycle_count; - int idx, ret; + int idx; - ret = ath10k_wmi_pull_ch_info(ar, skb, &arg); - if (ret) { - ath10k_warn(ar, "failed to parse chan info event: %d\n", ret); + if (params->cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) { + ath10k_dbg(ar, ATH10K_DBG_WMI, "chan info report completed\n"); return; } - err_code = __le32_to_cpu(arg.err_code); - freq = __le32_to_cpu(arg.freq); - cmd_flags = __le32_to_cpu(arg.cmd_flags); - noise_floor = __le32_to_cpu(arg.noise_floor); - rx_clear_count = __le32_to_cpu(arg.rx_clear_count); - cycle_count = __le32_to_cpu(arg.cycle_count); + idx = freq_to_idx(ar, params->freq); + if (idx >= ARRAY_SIZE(ar->survey)) { + ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n", + params->freq, idx); + return; + } - ath10k_dbg(ar, ATH10K_DBG_WMI, - "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n", - err_code, freq, cmd_flags, noise_floor, rx_clear_count, - cycle_count); + survey = &ar->survey[idx]; - spin_lock_bh(&ar->data_lock); + if (!params->mac_clk_mhz) + return; - switch (ar->scan.state) { - case ATH10K_SCAN_IDLE: - case ATH10K_SCAN_STARTING: - ath10k_warn(ar, "received chan info event without a scan request, ignoring\n"); - goto exit; - case ATH10K_SCAN_RUNNING: - case ATH10K_SCAN_ABORTING: - break; - } + memset(survey, 0, sizeof(*survey)); - idx = freq_to_idx(ar, freq); + survey->noise = params->noise_floor; + survey->time = (params->cycle_count / params->mac_clk_mhz) / 1000; + survey->time_busy = (params->rx_clear_count / params->mac_clk_mhz) / 1000; + survey->filled |= SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME | + SURVEY_INFO_TIME_BUSY; +} + +/* + * Handle the channel info event for firmware which sends chan_info + * event in pairs(start and stop events) for every scanned channel. + */ +static void ath10k_wmi_event_chan_info_paired(struct ath10k *ar, + struct chan_info_params *params) +{ + struct survey_info *survey; + int idx; + + idx = freq_to_idx(ar, params->freq); if (idx >= ARRAY_SIZE(ar->survey)) { ath10k_warn(ar, "chan info: invalid frequency %d (idx %d out of bounds)\n", - freq, idx); - goto exit; + params->freq, idx); + return; } - if (cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) { + if (params->cmd_flags & WMI_CHAN_INFO_FLAG_COMPLETE) { if (ar->ch_info_can_report_survey) { survey = &ar->survey[idx]; - survey->noise = noise_floor; + survey->noise = params->noise_floor; survey->filled = SURVEY_INFO_NOISE_DBM; ath10k_hw_fill_survey_time(ar, survey, - cycle_count, - rx_clear_count, + params->cycle_count, + params->rx_clear_count, ar->survey_last_cycle_count, ar->survey_last_rx_clear_count); } @@ -2617,10 +2631,55 @@ void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb) ar->ch_info_can_report_survey = true; } - if (!(cmd_flags & WMI_CHAN_INFO_FLAG_PRE_COMPLETE)) { - ar->survey_last_rx_clear_count = rx_clear_count; - ar->survey_last_cycle_count = cycle_count; + if (!(params->cmd_flags & WMI_CHAN_INFO_FLAG_PRE_COMPLETE)) { + ar->survey_last_rx_clear_count = params->rx_clear_count; + ar->survey_last_cycle_count = params->cycle_count; } +} + +void ath10k_wmi_event_chan_info(struct ath10k *ar, struct sk_buff *skb) +{ + struct chan_info_params ch_info_param; + struct wmi_ch_info_ev_arg arg = {}; + int ret; + + ret = ath10k_wmi_pull_ch_info(ar, skb, &arg); + if (ret) { + ath10k_warn(ar, "failed to parse chan info event: %d\n", ret); + return; + } + + ch_info_param.err_code = __le32_to_cpu(arg.err_code); + ch_info_param.freq = __le32_to_cpu(arg.freq); + ch_info_param.cmd_flags = __le32_to_cpu(arg.cmd_flags); + ch_info_param.noise_floor = __le32_to_cpu(arg.noise_floor); + ch_info_param.rx_clear_count = __le32_to_cpu(arg.rx_clear_count); + ch_info_param.cycle_count = __le32_to_cpu(arg.cycle_count); + ch_info_param.mac_clk_mhz = __le32_to_cpu(arg.mac_clk_mhz); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "chan info err_code %d freq %d cmd_flags %d noise_floor %d rx_clear_count %d cycle_count %d\n", + ch_info_param.err_code, ch_info_param.freq, ch_info_param.cmd_flags, + ch_info_param.noise_floor, ch_info_param.rx_clear_count, + ch_info_param.cycle_count); + + spin_lock_bh(&ar->data_lock); + + switch (ar->scan.state) { + case ATH10K_SCAN_IDLE: + case ATH10K_SCAN_STARTING: + ath10k_warn(ar, "received chan info event without a scan request, ignoring\n"); + goto exit; + case ATH10K_SCAN_RUNNING: + case ATH10K_SCAN_ABORTING: + break; + } + + if (test_bit(ATH10K_FW_FEATURE_SINGLE_CHAN_INFO_PER_CHANNEL, + ar->running_fw->fw_file.fw_features)) + ath10k_wmi_event_chan_info_unpaired(ar, &ch_info_param); + else + ath10k_wmi_event_chan_info_paired(ar, &ch_info_param); exit: spin_unlock_bh(&ar->data_lock); @@ -8785,6 +8844,27 @@ ath10k_wmi_barrier(struct ath10k *ar) return 0; } +static struct sk_buff * +ath10k_wmi_10_2_4_op_gen_bb_timing(struct ath10k *ar, + const struct wmi_bb_timing_cfg_arg *arg) +{ + struct wmi_pdev_bb_timing_cfg_cmd *cmd; + struct sk_buff *skb; + + skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd)); + if (!skb) + return ERR_PTR(-ENOMEM); + + cmd = (struct wmi_pdev_bb_timing_cfg_cmd *)skb->data; + cmd->bb_tx_timing = __cpu_to_le32(arg->bb_tx_timing); + cmd->bb_xpa_timing = __cpu_to_le32(arg->bb_xpa_timing); + + ath10k_dbg(ar, ATH10K_DBG_WMI, + "wmi pdev bb_tx_timing 0x%x bb_xpa_timing 0x%x\n", + arg->bb_tx_timing, arg->bb_xpa_timing); + return skb; +} + static const struct wmi_ops wmi_ops = { .rx = ath10k_wmi_op_rx, .map_svc = wmi_main_svc_map, @@ -9058,6 +9138,7 @@ static const struct wmi_ops wmi_10_2_4_ops = { .gen_pdev_enable_adaptive_cca = ath10k_wmi_op_gen_pdev_enable_adaptive_cca, .get_vdev_subtype = ath10k_wmi_10_2_4_op_get_vdev_subtype, + .gen_bb_timing = ath10k_wmi_10_2_4_op_gen_bb_timing, /* .gen_bcn_tmpl not implemented */ /* .gen_prb_tmpl not implemented */ /* .gen_p2p_go_bcn_ie not implemented */ diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h index c5a343c93013..2034ccc7cc72 100644 --- a/drivers/net/wireless/ath/ath10k/wmi.h +++ b/drivers/net/wireless/ath/ath10k/wmi.h @@ -205,6 +205,8 @@ enum wmi_service { WMI_SERVICE_SPOOF_MAC_SUPPORT, WMI_SERVICE_TX_DATA_ACK_RSSI, WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT, + WMI_SERVICE_VDEV_DISABLE_4_ADDR_SRC_LRN_SUPPORT, + WMI_SERVICE_BB_TIMING_CONFIG_SUPPORT, WMI_SERVICE_THERM_THROT, /* keep last */ @@ -245,6 +247,9 @@ enum wmi_10x_service { WMI_10X_SERVICE_PEER_STATS, WMI_10X_SERVICE_RESET_CHIP, WMI_10X_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, + WMI_10X_SERVICE_VDEV_BCN_RATE_CONTROL, + WMI_10X_SERVICE_PER_PACKET_SW_ENCRYPT, + WMI_10X_SERVICE_BB_TIMING_CONFIG_SUPPORT, }; enum wmi_main_service { @@ -360,6 +365,9 @@ enum wmi_10_4_service { WMI_10_4_SERVICE_PEER_TID_CONFIGS_SUPPORT, WMI_10_4_SERVICE_VDEV_BCN_RATE_CONTROL, WMI_10_4_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT, + WMI_10_4_SERVICE_HTT_ASSERT_TRIGGER_SUPPORT, + WMI_10_4_SERVICE_VDEV_FILTER_NEIGHBOR_RX_PACKETS, + WMI_10_4_SERVICE_VDEV_DISABLE_4_ADDR_SRC_LRN_SUPPORT, }; static inline char *wmi_service_name(int service_id) @@ -569,6 +577,8 @@ static inline void wmi_10x_svc_map(const __le32 *in, unsigned long *out, WMI_SERVICE_RESET_CHIP, len); SVCMAP(WMI_10X_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, len); + SVCMAP(WMI_10X_SERVICE_BB_TIMING_CONFIG_SUPPORT, + WMI_SERVICE_BB_TIMING_CONFIG_SUPPORT, len); } static inline void wmi_main_svc_map(const __le32 *in, unsigned long *out, @@ -787,6 +797,8 @@ static inline void wmi_10_4_svc_map(const __le32 *in, unsigned long *out, WMI_SERVICE_TX_DATA_ACK_RSSI, len); SVCMAP(WMI_10_4_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT, WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT, len); + SVCMAP(WMI_10_4_SERVICE_VDEV_DISABLE_4_ADDR_SRC_LRN_SUPPORT, + WMI_SERVICE_VDEV_DISABLE_4_ADDR_SRC_LRN_SUPPORT, len); } #undef SVCMAP @@ -987,6 +999,7 @@ struct wmi_cmd_map { u32 pdev_wds_entry_list_cmdid; u32 tdls_set_offchan_mode_cmdid; u32 radar_found_cmdid; + u32 set_bb_timing_cmdid; }; /* @@ -1602,6 +1615,8 @@ enum wmi_10_2_cmd_id { WMI_10_2_SET_LTEU_CONFIG_CMDID, WMI_10_2_SET_CCA_PARAMS, WMI_10_2_PDEV_BSS_CHAN_INFO_REQUEST_CMDID, + WMI_10_2_FWTEST_CMDID, + WMI_10_2_PDEV_SET_BB_TIMING_CONFIG_CMDID, WMI_10_2_PDEV_UTF_CMDID = WMI_10_2_END_CMDID - 1, }; @@ -4985,6 +5000,7 @@ enum wmi_rate_preamble { (((preamble) << 6) | ((nss) << 4) | (rate)) #define ATH10K_HW_AMPDU(flags) ((flags) & 0x1) #define ATH10K_HW_BA_FAIL(flags) (((flags) >> 1) & 0x3) +#define ATH10K_FW_SKIPPED_RATE_CTRL(flags) (((flags) >> 6) & 0x1) #define ATH10K_VHT_MCS_NUM 10 #define ATH10K_BW_NUM 4 @@ -4992,6 +5008,7 @@ enum wmi_rate_preamble { #define ATH10K_LEGACY_NUM 12 #define ATH10K_GI_NUM 2 #define ATH10K_HT_MCS_NUM 32 +#define ATH10K_RATE_TABLE_NUM 320 /* Value to disable fixed rate setting */ #define WMI_FIXED_RATE_NONE (0xff) @@ -5065,6 +5082,7 @@ struct wmi_vdev_param_map { u32 bw_nss_ratemask; u32 inc_tsf; u32 dec_tsf; + u32 disable_4addr_src_lrn; }; #define WMI_VDEV_PARAM_UNSUPPORTED 0 @@ -5404,8 +5422,20 @@ enum wmi_10_4_vdev_param { WMI_10_4_VDEV_PARAM_ATF_SSID_SCHED_POLICY, WMI_10_4_VDEV_PARAM_DISABLE_DYN_BW_RTS, WMI_10_4_VDEV_PARAM_TSF_DECREMENT, + WMI_10_4_VDEV_PARAM_SELFGEN_FIXED_RATE, + WMI_10_4_VDEV_PARAM_AMPDU_SUBFRAME_SIZE_PER_AC, + WMI_10_4_VDEV_PARAM_NSS_VHT160, + WMI_10_4_VDEV_PARAM_NSS_VHT80_80, + WMI_10_4_VDEV_PARAM_AMSDU_SUBFRAME_SIZE_PER_AC, + WMI_10_4_VDEV_PARAM_DISABLE_CABQ, + WMI_10_4_VDEV_PARAM_SIFS_TRIGGER_RATE, + WMI_10_4_VDEV_PARAM_TX_POWER, + WMI_10_4_VDEV_PARAM_ENABLE_DISABLE_RTT_RESPONDER_ROLE, + WMI_10_4_VDEV_PARAM_DISABLE_4_ADDR_SRC_LRN, }; +#define WMI_VDEV_DISABLE_4_ADDR_SRC_LRN 1 + #define WMI_VDEV_PARAM_TXBF_SU_TX_BFEE BIT(0) #define WMI_VDEV_PARAM_TXBF_MU_TX_BFEE BIT(1) #define WMI_VDEV_PARAM_TXBF_SU_TX_BFER BIT(2) @@ -6442,6 +6472,14 @@ struct wmi_chan_info_event { __le32 noise_floor; __le32 rx_clear_count; __le32 cycle_count; + __le32 chan_tx_pwr_range; + __le32 chan_tx_pwr_tp; + __le32 rx_frame_count; + __le32 my_bss_rx_cycle_count; + __le32 rx_11b_mode_data_duration; + __le32 tx_frame_cnt; + __le32 mac_clk_mhz; + } __packed; struct wmi_10_4_chan_info_event { @@ -6670,6 +6708,10 @@ struct wmi_ch_info_ev_arg { __le32 chan_tx_pwr_range; __le32 chan_tx_pwr_tp; __le32 rx_frame_count; + __le32 my_bss_rx_cycle_count; + __le32 rx_11b_mode_data_duration; + __le32 tx_frame_cnt; + __le32 mac_clk_mhz; }; /* From 10.4 firmware, not sure all have the same values. */ @@ -7141,6 +7183,23 @@ struct wmi_pdev_chan_info_req_cmd { __le32 reserved; } __packed; +/* bb timing register configurations */ +struct wmi_bb_timing_cfg_arg { + /* Tx_end to pa off timing */ + u32 bb_tx_timing; + + /* Tx_end to external pa off timing */ + u32 bb_xpa_timing; +}; + +struct wmi_pdev_bb_timing_cfg_cmd { + /* Tx_end to pa off timing */ + __le32 bb_tx_timing; + + /* Tx_end to external pa off timing */ + __le32 bb_xpa_timing; +} __packed; + struct ath10k; struct ath10k_vif; struct ath10k_fw_stats_pdev; diff --git a/drivers/net/wireless/ath/ath10k/wow.c b/drivers/net/wireless/ath/ath10k/wow.c index 51b26b305885..36d4245c308e 100644 --- a/drivers/net/wireless/ath/ath10k/wow.c +++ b/drivers/net/wireless/ath/ath10k/wow.c @@ -135,7 +135,7 @@ static void ath10k_wow_convert_8023_to_80211 &old_hdr_mask->h_proto, sizeof(old_hdr_mask->h_proto)); - /* Caculate new pkt_offset */ + /* Calculate new pkt_offset */ if (old->pkt_offset < ETH_ALEN) new->pkt_offset = old->pkt_offset + offsetof(struct ieee80211_hdr_3addr, addr1); @@ -146,7 +146,7 @@ static void ath10k_wow_convert_8023_to_80211 else new->pkt_offset = old->pkt_offset + hdr_len + rfc_len - ETH_HLEN; - /* Caculate new hdr end offset */ + /* Calculate new hdr end offset */ if (total_len > ETH_HLEN) hdr_80211_end_offset = hdr_len + rfc_len; else if (total_len > offsetof(struct ethhdr, h_proto)) diff --git a/drivers/net/wireless/ath/ath6kl/cfg80211.c b/drivers/net/wireless/ath/ath6kl/cfg80211.c index e121187f371f..59dd50866932 100644 --- a/drivers/net/wireless/ath/ath6kl/cfg80211.c +++ b/drivers/net/wireless/ath/ath6kl/cfg80211.c @@ -291,7 +291,7 @@ static bool ath6kl_cfg80211_ready(struct ath6kl_vif *vif) } if (!test_bit(WLAN_ENABLED, &vif->flags)) { - ath6kl_err("wlan disabled\n"); + ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "wlan disabled\n"); return false; } @@ -939,7 +939,7 @@ static int ath6kl_set_probed_ssids(struct ath6kl *ar, else ssid_list[i].flag = ANY_SSID_FLAG; - if (n_match_ssid == 0) + if (ar->wiphy->max_match_sets != 0 && n_match_ssid == 0) ssid_list[i].flag |= MATCH_SSID_FLAG; } @@ -1093,7 +1093,7 @@ void ath6kl_cfg80211_scan_complete_event(struct ath6kl_vif *vif, bool aborted) if (vif->scan_req->n_ssids && vif->scan_req->ssids[0].ssid_len) { for (i = 0; i < vif->scan_req->n_ssids; i++) { ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx, - i + 1, DISABLE_SSID_FLAG, + i, DISABLE_SSID_FLAG, 0, NULL); } } diff --git a/drivers/net/wireless/ath/ath6kl/main.c b/drivers/net/wireless/ath/ath6kl/main.c index cb59016c723b..5e7ea838a921 100644 --- a/drivers/net/wireless/ath/ath6kl/main.c +++ b/drivers/net/wireless/ath/ath6kl/main.c @@ -389,6 +389,7 @@ void ath6kl_connect_ap_mode_bss(struct ath6kl_vif *vif, u16 channel) if (!ik->valid || ik->key_type != WAPI_CRYPT) break; /* for WAPI, we need to set the delayed group key, continue: */ + /* fall through */ case WPA_PSK_AUTH: case WPA2_PSK_AUTH: case (WPA_PSK_AUTH | WPA2_PSK_AUTH): diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig index 1f3523019509..ceca23a851d5 100644 --- a/drivers/net/wireless/ath/ath9k/Kconfig +++ b/drivers/net/wireless/ath/ath9k/Kconfig @@ -116,7 +116,7 @@ config ATH9K_DFS_CERTIFIED except increase code size. config ATH9K_DYNACK - bool "Atheros ath9k ACK timeout estimation algorithm (EXPERIMENTAL)" + bool "Atheros ath9k ACK timeout estimation algorithm" depends on ATH9K default n ---help--- diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c index 11d6f975c87d..dae95402eb3a 100644 --- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c @@ -586,7 +586,7 @@ static void ar5008_hw_init_chain_masks(struct ath_hw *ah) REG_WRITE(ah, AR_PHY_CAL_CHAINMASK, 0x7); break; } - /* else: fall through */ + /* fall through */ case 0x1: case 0x2: case 0x7: diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c index 713291881208..6f32b8d2ec7f 100644 --- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c @@ -119,7 +119,7 @@ static int ar9002_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan) aModeRefSel = 2; if (aModeRefSel) break; - /* else: fall through */ + /* fall through */ case 1: default: aModeRefSel = 0; diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mci.c b/drivers/net/wireless/ath/ath9k/ar9003_mci.c index 0fe9c8378249..9899661f9a60 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_mci.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_mci.c @@ -1055,17 +1055,15 @@ void ar9003_mci_stop_bt(struct ath_hw *ah, bool save_fullsleep) static void ar9003_mci_send_2g5g_status(struct ath_hw *ah, bool wait_done) { struct ath9k_hw_mci *mci = &ah->btcoex_hw.mci; - u32 new_flags, to_set, to_clear; + u32 to_set, to_clear; if (!mci->update_2g5g || (mci->bt_state == MCI_BT_SLEEP)) return; if (mci->is_2g) { - new_flags = MCI_2G_FLAGS; to_clear = MCI_2G_FLAGS_CLEAR_MASK; to_set = MCI_2G_FLAGS_SET_MASK; } else { - new_flags = MCI_5G_FLAGS; to_clear = MCI_5G_FLAGS_CLEAR_MASK; to_set = MCI_5G_FLAGS_SET_MASK; } diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h index 21ba20981a80..0fca44e91a71 100644 --- a/drivers/net/wireless/ath/ath9k/ath9k.h +++ b/drivers/net/wireless/ath/ath9k/ath9k.h @@ -272,7 +272,7 @@ struct ath_node { #endif u8 key_idx[4]; - u32 ackto; + int ackto; struct list_head list; }; diff --git a/drivers/net/wireless/ath/ath9k/dynack.c b/drivers/net/wireless/ath/ath9k/dynack.c index 7334c9b09e82..f112fa5b2eac 100644 --- a/drivers/net/wireless/ath/ath9k/dynack.c +++ b/drivers/net/wireless/ath/ath9k/dynack.c @@ -29,9 +29,13 @@ * ath_dynack_ewma - EWMA (Exponentially Weighted Moving Average) calculation * */ -static inline u32 ath_dynack_ewma(u32 old, u32 new) +static inline int ath_dynack_ewma(int old, int new) { - return (new * (EWMA_DIV - EWMA_LEVEL) + old * EWMA_LEVEL) / EWMA_DIV; + if (old > 0) + return (new * (EWMA_DIV - EWMA_LEVEL) + + old * EWMA_LEVEL) / EWMA_DIV; + else + return new; } /** @@ -82,10 +86,10 @@ static inline bool ath_dynack_bssidmask(struct ath_hw *ah, const u8 *mac) */ static void ath_dynack_compute_ackto(struct ath_hw *ah) { - struct ath_node *an; - u32 to = 0; - struct ath_dynack *da = &ah->dynack; struct ath_common *common = ath9k_hw_common(ah); + struct ath_dynack *da = &ah->dynack; + struct ath_node *an; + int to = 0; list_for_each_entry(an, &da->nodes, list) if (an->ackto > to) @@ -144,7 +148,8 @@ static void ath_dynack_compute_to(struct ath_hw *ah) an->ackto = ath_dynack_ewma(an->ackto, ackto); ath_dbg(ath9k_hw_common(ah), DYNACK, - "%pM to %u\n", dst, an->ackto); + "%pM to %d [%u]\n", dst, + an->ackto, ackto); if (time_is_before_jiffies(da->lto)) { ath_dynack_compute_ackto(ah); da->lto = jiffies + COMPUTE_TO; @@ -166,18 +171,21 @@ static void ath_dynack_compute_to(struct ath_hw *ah) * @ah: ath hw * @skb: socket buffer * @ts: tx status info + * @sta: station pointer * */ void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb, - struct ath_tx_status *ts) + struct ath_tx_status *ts, + struct ieee80211_sta *sta) { - u8 ridx; struct ieee80211_hdr *hdr; struct ath_dynack *da = &ah->dynack; struct ath_common *common = ath9k_hw_common(ah); struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + u32 dur = ts->duration; + u8 ridx; - if ((info->flags & IEEE80211_TX_CTL_NO_ACK) || !da->enabled) + if (!da->enabled || (info->flags & IEEE80211_TX_CTL_NO_ACK)) return; spin_lock_bh(&da->qlock); @@ -187,11 +195,19 @@ void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb, /* late ACK */ if (ts->ts_status & ATH9K_TXERR_XRETRY) { if (ieee80211_is_assoc_req(hdr->frame_control) || - ieee80211_is_assoc_resp(hdr->frame_control)) { + ieee80211_is_assoc_resp(hdr->frame_control) || + ieee80211_is_auth(hdr->frame_control)) { ath_dbg(common, DYNACK, "late ack\n"); + ath9k_hw_setslottime(ah, (LATEACK_TO - 3) / 2); ath9k_hw_set_ack_timeout(ah, LATEACK_TO); ath9k_hw_set_cts_timeout(ah, LATEACK_TO); + if (sta) { + struct ath_node *an; + + an = (struct ath_node *)sta->drv_priv; + an->ackto = -1; + } da->lto = jiffies + LATEACK_DELAY; } @@ -202,14 +218,13 @@ void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb, ridx = ts->ts_rateindex; da->st_rbf.ts[da->st_rbf.t_rb].tstamp = ts->ts_tstamp; - da->st_rbf.ts[da->st_rbf.t_rb].dur = ts->duration; ether_addr_copy(da->st_rbf.addr[da->st_rbf.t_rb].h_dest, hdr->addr1); ether_addr_copy(da->st_rbf.addr[da->st_rbf.t_rb].h_src, hdr->addr2); if (!(info->status.rates[ridx].flags & IEEE80211_TX_RC_MCS)) { - u32 phy, sifs; const struct ieee80211_rate *rate; struct ieee80211_tx_rate *rates = info->status.rates; + u32 phy; rate = &common->sbands[info->band].bitrates[rates[ridx].idx]; if (info->band == NL80211_BAND_2GHZ && @@ -218,19 +233,18 @@ void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb, else phy = WLAN_RC_PHY_OFDM; - sifs = ath_dynack_get_sifs(ah, phy); - da->st_rbf.ts[da->st_rbf.t_rb].dur -= sifs; + dur -= ath_dynack_get_sifs(ah, phy); } - - ath_dbg(common, DYNACK, "{%pM} tx sample %u [dur %u][h %u-t %u]\n", - hdr->addr1, da->st_rbf.ts[da->st_rbf.t_rb].tstamp, - da->st_rbf.ts[da->st_rbf.t_rb].dur, da->st_rbf.h_rb, - (da->st_rbf.t_rb + 1) % ATH_DYN_BUF); + da->st_rbf.ts[da->st_rbf.t_rb].dur = dur; INCR(da->st_rbf.t_rb, ATH_DYN_BUF); if (da->st_rbf.t_rb == da->st_rbf.h_rb) INCR(da->st_rbf.h_rb, ATH_DYN_BUF); + ath_dbg(common, DYNACK, "{%pM} tx sample %u [dur %u][h %u-t %u]\n", + hdr->addr1, ts->ts_tstamp, dur, da->st_rbf.h_rb, + da->st_rbf.t_rb); + ath_dynack_compute_to(ah); spin_unlock_bh(&da->qlock); @@ -251,20 +265,19 @@ void ath_dynack_sample_ack_ts(struct ath_hw *ah, struct sk_buff *skb, struct ath_common *common = ath9k_hw_common(ah); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - if (!ath_dynack_bssidmask(ah, hdr->addr1) || !da->enabled) + if (!da->enabled || !ath_dynack_bssidmask(ah, hdr->addr1)) return; spin_lock_bh(&da->qlock); da->ack_rbf.tstamp[da->ack_rbf.t_rb] = ts; - ath_dbg(common, DYNACK, "rx sample %u [h %u-t %u]\n", - da->ack_rbf.tstamp[da->ack_rbf.t_rb], - da->ack_rbf.h_rb, (da->ack_rbf.t_rb + 1) % ATH_DYN_BUF); - INCR(da->ack_rbf.t_rb, ATH_DYN_BUF); if (da->ack_rbf.t_rb == da->ack_rbf.h_rb) INCR(da->ack_rbf.h_rb, ATH_DYN_BUF); + ath_dbg(common, DYNACK, "rx sample %u [h %u-t %u]\n", + ts, da->ack_rbf.h_rb, da->ack_rbf.t_rb); + ath_dynack_compute_to(ah); spin_unlock_bh(&da->qlock); diff --git a/drivers/net/wireless/ath/ath9k/dynack.h b/drivers/net/wireless/ath/ath9k/dynack.h index 6d7bef976742..cf60224d40df 100644 --- a/drivers/net/wireless/ath/ath9k/dynack.h +++ b/drivers/net/wireless/ath/ath9k/dynack.h @@ -86,7 +86,8 @@ void ath_dynack_node_deinit(struct ath_hw *ah, struct ath_node *an); void ath_dynack_init(struct ath_hw *ah); void ath_dynack_sample_ack_ts(struct ath_hw *ah, struct sk_buff *skb, u32 ts); void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb, - struct ath_tx_status *ts); + struct ath_tx_status *ts, + struct ieee80211_sta *sta); #else static inline void ath_dynack_init(struct ath_hw *ah) {} static inline void ath_dynack_node_init(struct ath_hw *ah, @@ -97,7 +98,8 @@ static inline void ath_dynack_sample_ack_ts(struct ath_hw *ah, struct sk_buff *skb, u32 ts) {} static inline void ath_dynack_sample_tx_ts(struct ath_hw *ah, struct sk_buff *skb, - struct ath_tx_status *ts) {} + struct ath_tx_status *ts, + struct ieee80211_sta *sta) {} #endif #endif /* DYNACK_H */ diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c index bb319f22761f..8581d917635a 100644 --- a/drivers/net/wireless/ath/ath9k/hw.c +++ b/drivers/net/wireless/ath/ath9k/hw.c @@ -2279,6 +2279,7 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period) case NL80211_IFTYPE_ADHOC: REG_SET_BIT(ah, AR_TXCFG, AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY); + /* fall through */ case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_AP: REG_WRITE(ah, AR_NEXT_TBTT_TIMER, next_beacon); diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 25b3fc82d4ac..f448d5716639 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -629,7 +629,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, if (bf == bf->bf_lastbf) ath_dynack_sample_tx_ts(sc->sc_ah, bf->bf_mpdu, - ts); + ts, sta); } ath_tx_complete_buf(sc, bf, txq, &bf_head, sta, ts, @@ -773,7 +773,8 @@ static void ath_tx_process_buffer(struct ath_softc *sc, struct ath_txq *txq, memcpy(info->control.rates, bf->rates, sizeof(info->control.rates)); ath_tx_rc_status(sc, bf, ts, 1, txok ? 0 : 1, txok); - ath_dynack_sample_tx_ts(sc->sc_ah, bf->bf_mpdu, ts); + ath_dynack_sample_tx_ts(sc->sc_ah, bf->bf_mpdu, ts, + sta); } ath_tx_complete_buf(sc, bf, txq, bf_head, sta, ts, txok); } else diff --git a/drivers/net/wireless/ath/carl9170/rx.c b/drivers/net/wireless/ath/carl9170/rx.c index 705063259c8f..f7c2f19e81c1 100644 --- a/drivers/net/wireless/ath/carl9170/rx.c +++ b/drivers/net/wireless/ath/carl9170/rx.c @@ -766,6 +766,7 @@ static void carl9170_rx_untie_data(struct ar9170 *ar, u8 *buf, int len) goto drop; } + /* fall through */ case AR9170_RX_STATUS_MPDU_MIDDLE: /* These are just data + mac status */ diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c index 8c75651ede6c..2407931440ed 100644 --- a/drivers/net/wireless/ath/carl9170/tx.c +++ b/drivers/net/wireless/ath/carl9170/tx.c @@ -830,10 +830,12 @@ static bool carl9170_tx_rts_check(struct ar9170 *ar, case CARL9170_ERP_AUTO: if (ampdu) break; + /* fall through */ case CARL9170_ERP_MAC80211: if (!(rate->flags & IEEE80211_TX_RC_USE_RTS_CTS)) break; + /* fall through */ case CARL9170_ERP_RTS: if (likely(!multi)) @@ -854,6 +856,7 @@ static bool carl9170_tx_cts_check(struct ar9170 *ar, case CARL9170_ERP_MAC80211: if (!(rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)) break; + /* fall through */ case CARL9170_ERP_CTS: return true; diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c index d18e81fae5f1..9b2f9f543952 100644 --- a/drivers/net/wireless/ath/wil6210/cfg80211.c +++ b/drivers/net/wireless/ath/wil6210/cfg80211.c @@ -51,6 +51,19 @@ static struct ieee80211_channel wil_60ghz_channels[] = { CHAN60G(4, 0), }; +static void +wil_memdup_ie(u8 **pdst, size_t *pdst_len, const u8 *src, size_t src_len) +{ + kfree(*pdst); + *pdst = NULL; + *pdst_len = 0; + if (src_len > 0) { + *pdst = kmemdup(src, src_len, GFP_KERNEL); + if (*pdst) + *pdst_len = src_len; + } +} + static int wil_num_supported_channels(struct wil6210_priv *wil) { int num_channels = ARRAY_SIZE(wil_60ghz_channels); @@ -1441,11 +1454,19 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy, rc = wmi_add_cipher_key(vif, key_index, mac_addr, params->key_len, params->key, key_usage); - if (!rc && !IS_ERR(cs)) + if (!rc && !IS_ERR(cs)) { + /* update local storage used for AP recovery */ + if (key_usage == WMI_KEY_USE_TX_GROUP && params->key && + params->key_len <= WMI_MAX_KEY_LEN) { + vif->gtk_index = key_index; + memcpy(vif->gtk, params->key, params->key_len); + vif->gtk_len = params->key_len; + } /* in FT set crypto will take place upon receiving * WMI_RING_EN_EVENTID event */ wil_set_crypto_rx(key_index, key_usage, cs, params); + } return rc; } @@ -1634,6 +1655,14 @@ static int _wil_cfg80211_set_ies(struct wil6210_vif *vif, u16 len = 0, proberesp_len = 0; u8 *ies = NULL, *proberesp; + /* update local storage used for AP recovery */ + wil_memdup_ie(&vif->proberesp, &vif->proberesp_len, bcon->probe_resp, + bcon->probe_resp_len); + wil_memdup_ie(&vif->proberesp_ies, &vif->proberesp_ies_len, + bcon->proberesp_ies, bcon->proberesp_ies_len); + wil_memdup_ie(&vif->assocresp_ies, &vif->assocresp_ies_len, + bcon->assocresp_ies, bcon->assocresp_ies_len); + proberesp = _wil_cfg80211_get_proberesp_ies(bcon->probe_resp, bcon->probe_resp_len, &proberesp_len); @@ -1735,6 +1764,9 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy, vif->channel = chan; vif->hidden_ssid = hidden_ssid; vif->pbss = pbss; + vif->bi = bi; + memcpy(vif->ssid, ssid, ssid_len); + vif->ssid_len = ssid_len; netif_carrier_on(ndev); if (!wil_has_other_active_ifaces(wil, ndev, false, true)) @@ -1761,11 +1793,64 @@ out: return rc; } +void wil_cfg80211_ap_recovery(struct wil6210_priv *wil) +{ + int rc, i; + struct wiphy *wiphy = wil_to_wiphy(wil); + + for (i = 0; i < wil->max_vifs; i++) { + struct wil6210_vif *vif = wil->vifs[i]; + struct net_device *ndev; + struct cfg80211_beacon_data bcon = {}; + struct key_params key_params = {}; + + if (!vif || vif->ssid_len == 0) + continue; + + ndev = vif_to_ndev(vif); + bcon.proberesp_ies = vif->proberesp_ies; + bcon.assocresp_ies = vif->assocresp_ies; + bcon.probe_resp = vif->proberesp; + bcon.proberesp_ies_len = vif->proberesp_ies_len; + bcon.assocresp_ies_len = vif->assocresp_ies_len; + bcon.probe_resp_len = vif->proberesp_len; + + wil_info(wil, + "AP (vif %d) recovery: privacy %d, bi %d, channel %d, hidden %d, pbss %d\n", + i, vif->privacy, vif->bi, vif->channel, + vif->hidden_ssid, vif->pbss); + wil_hex_dump_misc("SSID ", DUMP_PREFIX_OFFSET, 16, 1, + vif->ssid, vif->ssid_len, true); + rc = _wil_cfg80211_start_ap(wiphy, ndev, + vif->ssid, vif->ssid_len, + vif->privacy, vif->bi, + vif->channel, &bcon, + vif->hidden_ssid, vif->pbss); + if (rc) { + wil_err(wil, "vif %d recovery failed (%d)\n", i, rc); + continue; + } + + if (!vif->privacy || vif->gtk_len == 0) + continue; + + key_params.key = vif->gtk; + key_params.key_len = vif->gtk_len; + key_params.seq_len = IEEE80211_GCMP_PN_LEN; + rc = wil_cfg80211_add_key(wiphy, ndev, vif->gtk_index, false, + NULL, &key_params); + if (rc) + wil_err(wil, "vif %d recovery add key failed (%d)\n", + i, rc); + } +} + static int wil_cfg80211_change_beacon(struct wiphy *wiphy, struct net_device *ndev, struct cfg80211_beacon_data *bcon) { struct wil6210_priv *wil = wiphy_to_wil(wiphy); + struct wireless_dev *wdev = ndev->ieee80211_ptr; struct wil6210_vif *vif = ndev_to_vif(ndev); int rc; u32 privacy = 0; @@ -1778,15 +1863,16 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy, bcon->tail_len)) privacy = 1; + memcpy(vif->ssid, wdev->ssid, wdev->ssid_len); + vif->ssid_len = wdev->ssid_len; + /* in case privacy has changed, need to restart the AP */ if (vif->privacy != privacy) { - struct wireless_dev *wdev = ndev->ieee80211_ptr; - wil_dbg_misc(wil, "privacy changed %d=>%d. Restarting AP\n", vif->privacy, privacy); - rc = _wil_cfg80211_start_ap(wiphy, ndev, wdev->ssid, - wdev->ssid_len, privacy, + rc = _wil_cfg80211_start_ap(wiphy, ndev, vif->ssid, + vif->ssid_len, privacy, wdev->beacon_interval, vif->channel, bcon, vif->hidden_ssid, @@ -1876,6 +1962,12 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy, wmi_pcp_stop(vif); clear_bit(wil_vif_ft_roam, vif->status); + vif->ssid_len = 0; + wil_memdup_ie(&vif->proberesp, &vif->proberesp_len, NULL, 0); + wil_memdup_ie(&vif->proberesp_ies, &vif->proberesp_ies_len, NULL, 0); + wil_memdup_ie(&vif->assocresp_ies, &vif->assocresp_ies_len, NULL, 0); + memset(vif->gtk, 0, WMI_MAX_KEY_LEN); + vif->gtk_len = 0; if (last) __wil_down(wil); @@ -1923,7 +2015,7 @@ static int wil_cfg80211_del_station(struct wiphy *wiphy, params->mac, params->reason_code, vif->mid); mutex_lock(&wil->mutex); - wil6210_disconnect(vif, params->mac, params->reason_code, false); + wil6210_disconnect(vif, params->mac, params->reason_code); mutex_unlock(&wil->mutex); return 0; diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c index aa50813a0595..835c902b84c1 100644 --- a/drivers/net/wireless/ath/wil6210/debugfs.c +++ b/drivers/net/wireless/ath/wil6210/debugfs.c @@ -124,7 +124,7 @@ static void wil_print_ring(struct seq_file *s, struct wil6210_priv *wil, seq_puts(s, "}\n"); } -static int wil_ring_debugfs_show(struct seq_file *s, void *data) +static int ring_show(struct seq_file *s, void *data) { uint i; struct wil6210_priv *wil = s->private; @@ -183,18 +183,7 @@ static int wil_ring_debugfs_show(struct seq_file *s, void *data) return 0; } - -static int wil_ring_seq_open(struct inode *inode, struct file *file) -{ - return single_open(file, wil_ring_debugfs_show, inode->i_private); -} - -static const struct file_operations fops_ring = { - .open = wil_ring_seq_open, - .release = single_release, - .read = seq_read, - .llseek = seq_lseek, -}; +DEFINE_SHOW_ATTRIBUTE(ring); static void wil_print_sring(struct seq_file *s, struct wil6210_priv *wil, struct wil_status_ring *sring) @@ -240,7 +229,7 @@ static void wil_print_sring(struct seq_file *s, struct wil6210_priv *wil, seq_puts(s, "}\n"); } -static int wil_srings_debugfs_show(struct seq_file *s, void *data) +static int srings_show(struct seq_file *s, void *data) { struct wil6210_priv *wil = s->private; int i = 0; @@ -251,18 +240,7 @@ static int wil_srings_debugfs_show(struct seq_file *s, void *data) return 0; } - -static int wil_srings_seq_open(struct inode *inode, struct file *file) -{ - return single_open(file, wil_srings_debugfs_show, inode->i_private); -} - -static const struct file_operations fops_srings = { - .open = wil_srings_seq_open, - .release = single_release, - .read = seq_read, - .llseek = seq_lseek, -}; +DEFINE_SHOW_ATTRIBUTE(srings); static void wil_seq_hexdump(struct seq_file *s, void *p, int len, const char *prefix) @@ -348,7 +326,7 @@ static void wil_print_mbox_ring(struct seq_file *s, const char *prefix, wil_halp_unvote(wil); } -static int wil_mbox_debugfs_show(struct seq_file *s, void *data) +static int mbox_show(struct seq_file *s, void *data) { struct wil6210_priv *wil = s->private; int ret; @@ -366,18 +344,7 @@ static int wil_mbox_debugfs_show(struct seq_file *s, void *data) return 0; } - -static int wil_mbox_seq_open(struct inode *inode, struct file *file) -{ - return single_open(file, wil_mbox_debugfs_show, inode->i_private); -} - -static const struct file_operations fops_mbox = { - .open = wil_mbox_seq_open, - .release = single_release, - .read = seq_read, - .llseek = seq_lseek, -}; +DEFINE_SHOW_ATTRIBUTE(mbox); static int wil_debugfs_iomem_x32_set(void *data, u64 val) { @@ -624,7 +591,7 @@ static int wil6210_debugfs_create_ITR_CNT(struct wil6210_priv *wil, return 0; } -static int wil_memread_debugfs_show(struct seq_file *s, void *data) +static int memread_show(struct seq_file *s, void *data) { struct wil6210_priv *wil = s->private; void __iomem *a; @@ -645,18 +612,7 @@ static int wil_memread_debugfs_show(struct seq_file *s, void *data) return 0; } - -static int wil_memread_seq_open(struct inode *inode, struct file *file) -{ - return single_open(file, wil_memread_debugfs_show, inode->i_private); -} - -static const struct file_operations fops_memread = { - .open = wil_memread_seq_open, - .release = single_release, - .read = seq_read, - .llseek = seq_lseek, -}; +DEFINE_SHOW_ATTRIBUTE(memread); static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) @@ -664,10 +620,10 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf, enum { max_count = 4096 }; struct wil_blob_wrapper *wil_blob = file->private_data; struct wil6210_priv *wil = wil_blob->wil; - loff_t pos = *ppos; + loff_t aligned_pos, pos = *ppos; size_t available = wil_blob->blob.size; void *buf; - size_t ret; + size_t unaligned_bytes, aligned_count, ret; int rc; if (test_bit(wil_status_suspending, wil_blob->wil->status) || @@ -685,7 +641,12 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf, if (count > max_count) count = max_count; - buf = kmalloc(count, GFP_KERNEL); + /* set pos to 4 bytes aligned */ + unaligned_bytes = pos % 4; + aligned_pos = pos - unaligned_bytes; + aligned_count = count + unaligned_bytes; + + buf = kmalloc(aligned_count, GFP_KERNEL); if (!buf) return -ENOMEM; @@ -696,9 +657,9 @@ static ssize_t wil_read_file_ioblob(struct file *file, char __user *user_buf, } wil_memcpy_fromio_32(buf, (const void __iomem *) - wil_blob->blob.data + pos, count); + wil_blob->blob.data + aligned_pos, aligned_count); - ret = copy_to_user(user_buf, buf, count); + ret = copy_to_user(user_buf, buf + unaligned_bytes, count); wil_pm_runtime_put(wil); @@ -962,6 +923,8 @@ static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf, int rc; void *frame; + memset(¶ms, 0, sizeof(params)); + if (!len) return -EINVAL; @@ -1053,7 +1016,7 @@ static void wil_seq_print_skb(struct seq_file *s, struct sk_buff *skb) } /*---------Tx/Rx descriptor------------*/ -static int wil_txdesc_debugfs_show(struct seq_file *s, void *data) +static int txdesc_show(struct seq_file *s, void *data) { struct wil6210_priv *wil = s->private; struct wil_ring *ring; @@ -1146,21 +1109,10 @@ static int wil_txdesc_debugfs_show(struct seq_file *s, void *data) return 0; } - -static int wil_txdesc_seq_open(struct inode *inode, struct file *file) -{ - return single_open(file, wil_txdesc_debugfs_show, inode->i_private); -} - -static const struct file_operations fops_txdesc = { - .open = wil_txdesc_seq_open, - .release = single_release, - .read = seq_read, - .llseek = seq_lseek, -}; +DEFINE_SHOW_ATTRIBUTE(txdesc); /*---------Tx/Rx status message------------*/ -static int wil_status_msg_debugfs_show(struct seq_file *s, void *data) +static int status_msg_show(struct seq_file *s, void *data) { struct wil6210_priv *wil = s->private; int sring_idx = dbg_sring_index; @@ -1202,19 +1154,7 @@ static int wil_status_msg_debugfs_show(struct seq_file *s, void *data) return 0; } - -static int wil_status_msg_seq_open(struct inode *inode, struct file *file) -{ - return single_open(file, wil_status_msg_debugfs_show, - inode->i_private); -} - -static const struct file_operations fops_status_msg = { - .open = wil_status_msg_seq_open, - .release = single_release, - .read = seq_read, - .llseek = seq_lseek, -}; +DEFINE_SHOW_ATTRIBUTE(status_msg); static int wil_print_rx_buff(struct seq_file *s, struct list_head *lh) { @@ -1232,7 +1172,7 @@ static int wil_print_rx_buff(struct seq_file *s, struct list_head *lh) return i; } -static int wil_rx_buff_mgmt_debugfs_show(struct seq_file *s, void *data) +static int rx_buff_mgmt_show(struct seq_file *s, void *data) { struct wil6210_priv *wil = s->private; struct wil_rx_buff_mgmt *rbm = &wil->rx_buff_mgmt; @@ -1257,19 +1197,7 @@ static int wil_rx_buff_mgmt_debugfs_show(struct seq_file *s, void *data) return 0; } - -static int wil_rx_buff_mgmt_seq_open(struct inode *inode, struct file *file) -{ - return single_open(file, wil_rx_buff_mgmt_debugfs_show, - inode->i_private); -} - -static const struct file_operations fops_rx_buff_mgmt = { - .open = wil_rx_buff_mgmt_seq_open, - .release = single_release, - .read = seq_read, - .llseek = seq_lseek, -}; +DEFINE_SHOW_ATTRIBUTE(rx_buff_mgmt); /*---------beamforming------------*/ static char *wil_bfstatus_str(u32 status) @@ -1299,7 +1227,7 @@ static bool is_all_zeros(void * const x_, size_t sz) return true; } -static int wil_bf_debugfs_show(struct seq_file *s, void *data) +static int bf_show(struct seq_file *s, void *data) { int rc; int i; @@ -1353,18 +1281,7 @@ static int wil_bf_debugfs_show(struct seq_file *s, void *data) } return 0; } - -static int wil_bf_seq_open(struct inode *inode, struct file *file) -{ - return single_open(file, wil_bf_debugfs_show, inode->i_private); -} - -static const struct file_operations fops_bf = { - .open = wil_bf_seq_open, - .release = single_release, - .read = seq_read, - .llseek = seq_lseek, -}; +DEFINE_SHOW_ATTRIBUTE(bf); /*---------temp------------*/ static void print_temp(struct seq_file *s, const char *prefix, s32 t) @@ -1381,7 +1298,7 @@ static void print_temp(struct seq_file *s, const char *prefix, s32 t) } } -static int wil_temp_debugfs_show(struct seq_file *s, void *data) +static int temp_show(struct seq_file *s, void *data) { struct wil6210_priv *wil = s->private; s32 t_m, t_r; @@ -1397,21 +1314,10 @@ static int wil_temp_debugfs_show(struct seq_file *s, void *data) return 0; } - -static int wil_temp_seq_open(struct inode *inode, struct file *file) -{ - return single_open(file, wil_temp_debugfs_show, inode->i_private); -} - -static const struct file_operations fops_temp = { - .open = wil_temp_seq_open, - .release = single_release, - .read = seq_read, - .llseek = seq_lseek, -}; +DEFINE_SHOW_ATTRIBUTE(temp); /*---------freq------------*/ -static int wil_freq_debugfs_show(struct seq_file *s, void *data) +static int freq_show(struct seq_file *s, void *data) { struct wil6210_priv *wil = s->private; struct wireless_dev *wdev = wil->main_ndev->ieee80211_ptr; @@ -1421,21 +1327,10 @@ static int wil_freq_debugfs_show(struct seq_file *s, void *data) return 0; } - -static int wil_freq_seq_open(struct inode *inode, struct file *file) -{ - return single_open(file, wil_freq_debugfs_show, inode->i_private); -} - -static const struct file_operations fops_freq = { - .open = wil_freq_seq_open, - .release = single_release, - .read = seq_read, - .llseek = seq_lseek, -}; +DEFINE_SHOW_ATTRIBUTE(freq); /*---------link------------*/ -static int wil_link_debugfs_show(struct seq_file *s, void *data) +static int link_show(struct seq_file *s, void *data) { struct wil6210_priv *wil = s->private; struct station_info *sinfo; @@ -1487,21 +1382,10 @@ out: kfree(sinfo); return rc; } - -static int wil_link_seq_open(struct inode *inode, struct file *file) -{ - return single_open(file, wil_link_debugfs_show, inode->i_private); -} - -static const struct file_operations fops_link = { - .open = wil_link_seq_open, - .release = single_release, - .read = seq_read, - .llseek = seq_lseek, -}; +DEFINE_SHOW_ATTRIBUTE(link); /*---------info------------*/ -static int wil_info_debugfs_show(struct seq_file *s, void *data) +static int info_show(struct seq_file *s, void *data) { struct wil6210_priv *wil = s->private; struct net_device *ndev = wil->main_ndev; @@ -1536,18 +1420,7 @@ static int wil_info_debugfs_show(struct seq_file *s, void *data) #undef CHECK_QSTATE return 0; } - -static int wil_info_seq_open(struct inode *inode, struct file *file) -{ - return single_open(file, wil_info_debugfs_show, inode->i_private); -} - -static const struct file_operations fops_info = { - .open = wil_info_seq_open, - .release = single_release, - .read = seq_read, - .llseek = seq_lseek, -}; +DEFINE_SHOW_ATTRIBUTE(info); /*---------recovery------------*/ /* mode = [manual|auto] @@ -1663,7 +1536,7 @@ has_keys: seq_puts(s, "\n"); } -static int wil_sta_debugfs_show(struct seq_file *s, void *data) +static int sta_show(struct seq_file *s, void *data) __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock) { struct wil6210_priv *wil = s->private; @@ -1745,20 +1618,9 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock) return 0; } +DEFINE_SHOW_ATTRIBUTE(sta); -static int wil_sta_seq_open(struct inode *inode, struct file *file) -{ - return single_open(file, wil_sta_debugfs_show, inode->i_private); -} - -static const struct file_operations fops_sta = { - .open = wil_sta_seq_open, - .release = single_release, - .read = seq_read, - .llseek = seq_lseek, -}; - -static int wil_mids_debugfs_show(struct seq_file *s, void *data) +static int mids_show(struct seq_file *s, void *data) { struct wil6210_priv *wil = s->private; struct wil6210_vif *vif; @@ -1781,18 +1643,7 @@ static int wil_mids_debugfs_show(struct seq_file *s, void *data) return 0; } - -static int wil_mids_seq_open(struct inode *inode, struct file *file) -{ - return single_open(file, wil_mids_debugfs_show, inode->i_private); -} - -static const struct file_operations fops_mids = { - .open = wil_mids_seq_open, - .release = single_release, - .read = seq_read, - .llseek = seq_lseek, -}; +DEFINE_SHOW_ATTRIBUTE(mids); static int wil_tx_latency_debugfs_show(struct seq_file *s, void *data) __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock) @@ -2436,23 +2287,23 @@ static const struct { umode_t mode; const struct file_operations *fops; } dbg_files[] = { - {"mbox", 0444, &fops_mbox}, - {"rings", 0444, &fops_ring}, - {"stations", 0444, &fops_sta}, - {"mids", 0444, &fops_mids}, - {"desc", 0444, &fops_txdesc}, - {"bf", 0444, &fops_bf}, - {"mem_val", 0644, &fops_memread}, + {"mbox", 0444, &mbox_fops}, + {"rings", 0444, &ring_fops}, + {"stations", 0444, &sta_fops}, + {"mids", 0444, &mids_fops}, + {"desc", 0444, &txdesc_fops}, + {"bf", 0444, &bf_fops}, + {"mem_val", 0644, &memread_fops}, {"rxon", 0244, &fops_rxon}, {"tx_mgmt", 0244, &fops_txmgmt}, {"wmi_send", 0244, &fops_wmi}, {"back", 0644, &fops_back}, {"pmccfg", 0644, &fops_pmccfg}, {"pmcdata", 0444, &fops_pmcdata}, - {"temp", 0444, &fops_temp}, - {"freq", 0444, &fops_freq}, - {"link", 0444, &fops_link}, - {"info", 0444, &fops_info}, + {"temp", 0444, &temp_fops}, + {"freq", 0444, &freq_fops}, + {"link", 0444, &link_fops}, + {"info", 0444, &info_fops}, {"recovery", 0644, &fops_recovery}, {"led_cfg", 0644, &fops_led_cfg}, {"led_blink_time", 0644, &fops_led_blink_time}, @@ -2460,9 +2311,9 @@ static const struct { {"fw_version", 0444, &fops_fw_version}, {"suspend_stats", 0644, &fops_suspend_stats}, {"compressed_rx_status", 0644, &fops_compressed_rx_status}, - {"srings", 0444, &fops_srings}, - {"status_msg", 0444, &fops_status_msg}, - {"rx_buff_mgmt", 0444, &fops_rx_buff_mgmt}, + {"srings", 0444, &srings_fops}, + {"status_msg", 0444, &status_msg_fops}, + {"rx_buff_mgmt", 0444, &rx_buff_mgmt_fops}, {"tx_latency", 0644, &fops_tx_latency}, {"link_stats", 0644, &fops_link_stats}, {"link_stats_global", 0644, &fops_link_stats_global}, diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c index 398900a1c29e..5b7de00affe2 100644 --- a/drivers/net/wireless/ath/wil6210/main.c +++ b/drivers/net/wireless/ath/wil6210/main.c @@ -18,6 +18,7 @@ #include <linux/moduleparam.h> #include <linux/if_arp.h> #include <linux/etherdevice.h> +#include <linux/rtnetlink.h> #include "wil6210.h" #include "txrx.h" @@ -80,7 +81,7 @@ static const struct kernel_param_ops mtu_max_ops = { module_param_cb(mtu_max, &mtu_max_ops, &mtu_max, 0444); MODULE_PARM_DESC(mtu_max, " Max MTU value."); -static uint rx_ring_order = WIL_RX_RING_SIZE_ORDER_DEFAULT; +static uint rx_ring_order; static uint tx_ring_order = WIL_TX_RING_SIZE_ORDER_DEFAULT; static uint bcast_ring_order = WIL_BCAST_RING_SIZE_ORDER_DEFAULT; @@ -214,8 +215,21 @@ static void wil_ring_fini_tx(struct wil6210_priv *wil, int id) wil->txrx_ops.ring_fini_tx(wil, ring); } -static void wil_disconnect_cid(struct wil6210_vif *vif, int cid, - u16 reason_code, bool from_event) +static bool wil_vif_is_connected(struct wil6210_priv *wil, u8 mid) +{ + int i; + + for (i = 0; i < WIL6210_MAX_CID; i++) { + if (wil->sta[i].mid == mid && + wil->sta[i].status == wil_sta_connected) + return true; + } + + return false; +} + +static void wil_disconnect_cid_complete(struct wil6210_vif *vif, int cid, + u16 reason_code) __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) { uint i; @@ -226,24 +240,14 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) int min_ring_id = wil_get_min_tx_ring_id(wil); might_sleep(); - wil_dbg_misc(wil, "disconnect_cid: CID %d, MID %d, status %d\n", + wil_dbg_misc(wil, + "disconnect_cid_complete: CID %d, MID %d, status %d\n", cid, sta->mid, sta->status); - /* inform upper/lower layers */ + /* inform upper layers */ if (sta->status != wil_sta_unused) { if (vif->mid != sta->mid) { wil_err(wil, "STA MID mismatch with VIF MID(%d)\n", vif->mid); - /* let FW override sta->mid but be more strict with - * user space requests - */ - if (!from_event) - return; - } - if (!from_event) { - bool del_sta = (wdev->iftype == NL80211_IFTYPE_AP) ? - disable_ap_sme : false; - wmi_disconnect_sta(vif, sta->addr, reason_code, - true, del_sta); } switch (wdev->iftype) { @@ -283,36 +287,20 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock) sta->stats.tx_latency_min_us = U32_MAX; } -static bool wil_vif_is_connected(struct wil6210_priv *wil, u8 mid) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(wil->sta); i++) { - if (wil->sta[i].mid == mid && - wil->sta[i].status == wil_sta_connected) - return true; - } - - return false; -} - -static void _wil6210_disconnect(struct wil6210_vif *vif, const u8 *bssid, - u16 reason_code, bool from_event) +static void _wil6210_disconnect_complete(struct wil6210_vif *vif, + const u8 *bssid, u16 reason_code) { struct wil6210_priv *wil = vif_to_wil(vif); int cid = -ENOENT; struct net_device *ndev; struct wireless_dev *wdev; - if (unlikely(!vif)) - return; - ndev = vif_to_ndev(vif); wdev = vif_to_wdev(vif); might_sleep(); - wil_info(wil, "bssid=%pM, reason=%d, ev%s\n", bssid, - reason_code, from_event ? "+" : "-"); + wil_info(wil, "disconnect_complete: bssid=%pM, reason=%d\n", + bssid, reason_code); /* Cases are: * - disconnect single STA, still connected @@ -327,14 +315,15 @@ static void _wil6210_disconnect(struct wil6210_vif *vif, const u8 *bssid, if (bssid && !is_broadcast_ether_addr(bssid) && !ether_addr_equal_unaligned(ndev->dev_addr, bssid)) { cid = wil_find_cid(wil, vif->mid, bssid); - wil_dbg_misc(wil, "Disconnect %pM, CID=%d, reason=%d\n", + wil_dbg_misc(wil, + "Disconnect complete %pM, CID=%d, reason=%d\n", bssid, cid, reason_code); if (cid >= 0) /* disconnect 1 peer */ - wil_disconnect_cid(vif, cid, reason_code, from_event); + wil_disconnect_cid_complete(vif, cid, reason_code); } else { /* all */ - wil_dbg_misc(wil, "Disconnect all\n"); + wil_dbg_misc(wil, "Disconnect complete all\n"); for (cid = 0; cid < WIL6210_MAX_CID; cid++) - wil_disconnect_cid(vif, cid, reason_code, from_event); + wil_disconnect_cid_complete(vif, cid, reason_code); } /* link state */ @@ -380,6 +369,82 @@ static void _wil6210_disconnect(struct wil6210_vif *vif, const u8 *bssid, } } +static int wil_disconnect_cid(struct wil6210_vif *vif, int cid, + u16 reason_code) +{ + struct wil6210_priv *wil = vif_to_wil(vif); + struct wireless_dev *wdev = vif_to_wdev(vif); + struct wil_sta_info *sta = &wil->sta[cid]; + bool del_sta = false; + + might_sleep(); + wil_dbg_misc(wil, "disconnect_cid: CID %d, MID %d, status %d\n", + cid, sta->mid, sta->status); + + if (sta->status == wil_sta_unused) + return 0; + + if (vif->mid != sta->mid) { + wil_err(wil, "STA MID mismatch with VIF MID(%d)\n", vif->mid); + return -EINVAL; + } + + /* inform lower layers */ + if (wdev->iftype == NL80211_IFTYPE_AP && disable_ap_sme) + del_sta = true; + + /* disconnect by sending command disconnect/del_sta and wait + * synchronously for WMI_DISCONNECT_EVENTID event. + */ + return wmi_disconnect_sta(vif, sta->addr, reason_code, del_sta); +} + +static void _wil6210_disconnect(struct wil6210_vif *vif, const u8 *bssid, + u16 reason_code) +{ + struct wil6210_priv *wil; + struct net_device *ndev; + int cid = -ENOENT; + + if (unlikely(!vif)) + return; + + wil = vif_to_wil(vif); + ndev = vif_to_ndev(vif); + + might_sleep(); + wil_info(wil, "disconnect bssid=%pM, reason=%d\n", bssid, reason_code); + + /* Cases are: + * - disconnect single STA, still connected + * - disconnect single STA, already disconnected + * - disconnect all + * + * For "disconnect all", there are 3 options: + * - bssid == NULL + * - bssid is broadcast address (ff:ff:ff:ff:ff:ff) + * - bssid is our MAC address + */ + if (bssid && !is_broadcast_ether_addr(bssid) && + !ether_addr_equal_unaligned(ndev->dev_addr, bssid)) { + cid = wil_find_cid(wil, vif->mid, bssid); + wil_dbg_misc(wil, "Disconnect %pM, CID=%d, reason=%d\n", + bssid, cid, reason_code); + if (cid >= 0) /* disconnect 1 peer */ + wil_disconnect_cid(vif, cid, reason_code); + } else { /* all */ + wil_dbg_misc(wil, "Disconnect all\n"); + for (cid = 0; cid < WIL6210_MAX_CID; cid++) + wil_disconnect_cid(vif, cid, reason_code); + } + + /* call event handler manually after processing wmi_call, + * to avoid deadlock - disconnect event handler acquires + * wil->mutex while it is already held here + */ + _wil6210_disconnect_complete(vif, bssid, reason_code); +} + void wil_disconnect_worker(struct work_struct *work) { struct wil6210_vif *vif = container_of(work, @@ -485,10 +550,11 @@ static void wil_fw_error_worker(struct work_struct *work) if (wil_wait_for_recovery(wil) != 0) return; + rtnl_lock(); mutex_lock(&wil->mutex); /* Needs adaptation for multiple VIFs * need to go over all VIFs and consider the appropriate - * recovery. + * recovery because each one can have different iftype. */ switch (wdev->iftype) { case NL80211_IFTYPE_STATION: @@ -500,15 +566,24 @@ static void wil_fw_error_worker(struct work_struct *work) break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: - wil_info(wil, "No recovery for AP-like interface\n"); - /* recovery in these modes is done by upper layers */ + if (no_fw_recovery) /* upper layers do recovery */ + break; + /* silent recovery, upper layers will see disconnect */ + __wil_down(wil); + __wil_up(wil); + mutex_unlock(&wil->mutex); + wil_cfg80211_ap_recovery(wil); + mutex_lock(&wil->mutex); + wil_info(wil, "... completed\n"); break; default: wil_err(wil, "No recovery - unknown interface type %d\n", wdev->iftype); break; } + mutex_unlock(&wil->mutex); + rtnl_unlock(); } static int wil_find_free_ring(struct wil6210_priv *wil) @@ -694,20 +769,41 @@ void wil6210_bus_request(struct wil6210_priv *wil, u32 kbps) * @vif: virtual interface context * @bssid: peer to disconnect, NULL to disconnect all * @reason_code: Reason code for the Disassociation frame - * @from_event: whether is invoked from FW event handler * - * Disconnect and release associated resources. If invoked not from the - * FW event handler, issue WMI command(s) to trigger MAC disconnect. + * Disconnect and release associated resources. Issue WMI + * command(s) to trigger MAC disconnect. When command was issued + * successfully, call the wil6210_disconnect_complete function + * to handle the event synchronously */ void wil6210_disconnect(struct wil6210_vif *vif, const u8 *bssid, - u16 reason_code, bool from_event) + u16 reason_code) +{ + struct wil6210_priv *wil = vif_to_wil(vif); + + wil_dbg_misc(wil, "disconnecting\n"); + + del_timer_sync(&vif->connect_timer); + _wil6210_disconnect(vif, bssid, reason_code); +} + +/** + * wil6210_disconnect_complete - handle disconnect event + * @vif: virtual interface context + * @bssid: peer to disconnect, NULL to disconnect all + * @reason_code: Reason code for the Disassociation frame + * + * Release associated resources and indicate upper layers the + * connection is terminated. + */ +void wil6210_disconnect_complete(struct wil6210_vif *vif, const u8 *bssid, + u16 reason_code) { struct wil6210_priv *wil = vif_to_wil(vif); - wil_dbg_misc(wil, "disconnect\n"); + wil_dbg_misc(wil, "got disconnect\n"); del_timer_sync(&vif->connect_timer); - _wil6210_disconnect(vif, bssid, reason_code, from_event); + _wil6210_disconnect_complete(vif, bssid, reason_code); } void wil_priv_deinit(struct wil6210_priv *wil) @@ -998,10 +1094,13 @@ static int wil_target_reset(struct wil6210_priv *wil, int no_flash) wil_dbg_misc(wil, "Resetting \"%s\"...\n", wil->hw_name); - /* Clear MAC link up */ - wil_s(wil, RGF_HP_CTRL, BIT(15)); - wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_HPAL_PERST_FROM_PAD); - wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST); + if (wil->hw_version < HW_VER_TALYN) { + /* Clear MAC link up */ + wil_s(wil, RGF_HP_CTRL, BIT(15)); + wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, + BIT_HPAL_PERST_FROM_PAD); + wil_s(wil, RGF_USER_CLKS_CTL_SW_RST_MASK_0, BIT_CAR_PERST_RST); + } wil_halt_cpu(wil); @@ -1398,8 +1497,15 @@ static void wil_pre_fw_config(struct wil6210_priv *wil) wil6210_clear_irq(wil); /* CAF_ICR - clear and mask */ /* it is W1C, clear by writing back same value */ - wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0); - wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0); + if (wil->hw_version < HW_VER_TALYN_MB) { + wil_s(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, ICR), 0); + wil_w(wil, RGF_CAF_ICR + offsetof(struct RGF_ICR, IMV), ~0); + } else { + wil_s(wil, + RGF_CAF_ICR_TALYN_MB + offsetof(struct RGF_ICR, ICR), 0); + wil_w(wil, RGF_CAF_ICR_TALYN_MB + + offsetof(struct RGF_ICR, IMV), ~0); + } /* clear PAL_UNIT_ICR (potential D0->D3 leftover) * In Talyn-MB host cannot access this register due to * access control, hence PAL_UNIT_ICR is cleared by the FW @@ -1511,7 +1617,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw) if (vif) { cancel_work_sync(&vif->disconnect_worker); wil6210_disconnect(vif, NULL, - WLAN_REASON_DEAUTH_LEAVING, false); + WLAN_REASON_DEAUTH_LEAVING); } } wil_bcast_fini_all(wil); @@ -1681,7 +1787,12 @@ int __wil_up(struct wil6210_priv *wil) return rc; /* Rx RING. After MAC and beacon */ - rc = wil->txrx_ops.rx_init(wil, 1 << rx_ring_order); + if (rx_ring_order == 0) + rx_ring_order = wil->hw_version < HW_VER_TALYN_MB ? + WIL_RX_RING_SIZE_ORDER_DEFAULT : + WIL_RX_RING_SIZE_ORDER_TALYN_DEFAULT; + + rc = wil->txrx_ops.rx_init(wil, rx_ring_order); if (rc) return rc; diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c index 7a78a06bd356..b4e0eb1585b9 100644 --- a/drivers/net/wireless/ath/wil6210/netdev.c +++ b/drivers/net/wireless/ath/wil6210/netdev.c @@ -345,8 +345,7 @@ wil_vif_alloc(struct wil6210_priv *wil, const char *name, ndev->ieee80211_ptr = wdev; ndev->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GRO | - NETIF_F_TSO | NETIF_F_TSO6 | - NETIF_F_RXHASH; + NETIF_F_TSO | NETIF_F_TSO6; ndev->features |= ndev->hw_features; SET_NETDEV_DEV(ndev, wiphy_dev(wdev->wiphy)); @@ -513,7 +512,7 @@ void wil_vif_remove(struct wil6210_priv *wil, u8 mid) } mutex_lock(&wil->mutex); - wil6210_disconnect(vif, NULL, WLAN_REASON_DEAUTH_LEAVING, false); + wil6210_disconnect(vif, NULL, WLAN_REASON_DEAUTH_LEAVING); mutex_unlock(&wil->mutex); ndev = vif_to_ndev(vif); diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c index cc5f263cc965..3e1c831ab2fb 100644 --- a/drivers/net/wireless/ath/wil6210/txrx.c +++ b/drivers/net/wireless/ath/wil6210/txrx.c @@ -743,14 +743,6 @@ void wil_netif_rx_any(struct sk_buff *skb, struct net_device *ndev) stats = &wil->sta[cid].stats; - if (ndev->features & NETIF_F_RXHASH) - /* fake L4 to ensure it won't be re-calculated later - * set hash to any non-zero value to activate rps - * mechanism, core will be chosen according - * to user-level rps configuration. - */ - skb_set_hash(skb, 1, PKT_HASH_TYPE_L4); - skb_orphan(skb); if (security && (wil->txrx_ops.rx_crypto_check(wil, skb) != 0)) { @@ -880,7 +872,7 @@ static void wil_rx_buf_len_init(struct wil6210_priv *wil) } } -static int wil_rx_init(struct wil6210_priv *wil, u16 size) +static int wil_rx_init(struct wil6210_priv *wil, uint order) { struct wil_ring *vring = &wil->ring_rx; int rc; @@ -894,7 +886,7 @@ static int wil_rx_init(struct wil6210_priv *wil, u16 size) wil_rx_buf_len_init(wil); - vring->size = size; + vring->size = 1 << order; vring->is_rx = true; rc = wil_vring_alloc(wil, vring); if (rc) @@ -1403,6 +1395,8 @@ found: wil_dbg_txrx(wil, "BCAST DUP -> ring %d\n", i); wil_set_da_for_vring(wil, skb2, i); wil_tx_ring(wil, vif, v2, skb2); + /* successful call to wil_tx_ring takes skb2 ref */ + dev_kfree_skb_any(skb2); } else { wil_err(wil, "skb_copy failed\n"); } diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.c b/drivers/net/wireless/ath/wil6210/txrx_edma.c index 2bbae75b9a84..05a8348bd7b9 100644 --- a/drivers/net/wireless/ath/wil6210/txrx_edma.c +++ b/drivers/net/wireless/ath/wil6210/txrx_edma.c @@ -160,7 +160,7 @@ static int wil_ring_alloc_skb_edma(struct wil6210_priv *wil, struct wil_ring *ring, u32 i) { struct device *dev = wil_to_dev(wil); - unsigned int sz = ALIGN(wil->rx_buf_len, 4); + unsigned int sz = wil->rx_buf_len; dma_addr_t pa; u16 buff_id; struct list_head *active = &wil->rx_buff_mgmt.active; @@ -234,9 +234,10 @@ static int wil_rx_refill_edma(struct wil6210_priv *wil) struct wil_ring *ring = &wil->ring_rx; u32 next_head; int rc = 0; - u32 swtail = *ring->edma_rx_swtail.va; + ring->swtail = *ring->edma_rx_swtail.va; - for (; next_head = wil_ring_next_head(ring), (next_head != swtail); + for (; next_head = wil_ring_next_head(ring), + (next_head != ring->swtail); ring->swhead = next_head) { rc = wil_ring_alloc_skb_edma(wil, ring, ring->swhead); if (unlikely(rc)) { @@ -264,43 +265,26 @@ static void wil_move_all_rx_buff_to_free_list(struct wil6210_priv *wil, struct wil_ring *ring) { struct device *dev = wil_to_dev(wil); - u32 next_tail; - u32 swhead = (ring->swhead + 1) % ring->size; + struct list_head *active = &wil->rx_buff_mgmt.active; dma_addr_t pa; - u16 dmalen; - for (; next_tail = wil_ring_next_tail(ring), (next_tail != swhead); - ring->swtail = next_tail) { - struct wil_rx_enhanced_desc dd, *d = ⅆ - struct wil_rx_enhanced_desc *_d = - (struct wil_rx_enhanced_desc *) - &ring->va[ring->swtail].rx.enhanced; - struct sk_buff *skb; - u16 buff_id; + while (!list_empty(active)) { + struct wil_rx_buff *rx_buff = + list_first_entry(active, struct wil_rx_buff, list); + struct sk_buff *skb = rx_buff->skb; - *d = *_d; - - /* Extract the SKB from the rx_buff management array */ - buff_id = __le16_to_cpu(d->mac.buff_id); - if (buff_id >= wil->rx_buff_mgmt.size) { - wil_err(wil, "invalid buff_id %d\n", buff_id); - continue; - } - skb = wil->rx_buff_mgmt.buff_arr[buff_id].skb; - wil->rx_buff_mgmt.buff_arr[buff_id].skb = NULL; if (unlikely(!skb)) { - wil_err(wil, "No Rx skb at buff_id %d\n", buff_id); + wil_err(wil, "No Rx skb at buff_id %d\n", rx_buff->id); } else { - pa = wil_rx_desc_get_addr_edma(&d->dma); - dmalen = le16_to_cpu(d->dma.length); - dma_unmap_single(dev, pa, dmalen, DMA_FROM_DEVICE); - + rx_buff->skb = NULL; + memcpy(&pa, skb->cb, sizeof(pa)); + dma_unmap_single(dev, pa, wil->rx_buf_len, + DMA_FROM_DEVICE); kfree_skb(skb); } /* Move the buffer from the active to the free list */ - list_move(&wil->rx_buff_mgmt.buff_arr[buff_id].list, - &wil->rx_buff_mgmt.free); + list_move(&rx_buff->list, &wil->rx_buff_mgmt.free); } } @@ -357,8 +341,8 @@ static int wil_init_rx_sring(struct wil6210_priv *wil, struct wil_status_ring *sring = &wil->srings[ring_id]; int rc; - wil_dbg_misc(wil, "init RX sring: size=%u, ring_id=%u\n", sring->size, - ring_id); + wil_dbg_misc(wil, "init RX sring: size=%u, ring_id=%u\n", + status_ring_size, ring_id); memset(&sring->rx_data, 0, sizeof(sring->rx_data)); @@ -602,20 +586,20 @@ static bool wil_is_rx_idle_edma(struct wil6210_priv *wil) static void wil_rx_buf_len_init_edma(struct wil6210_priv *wil) { + /* RX buffer size must be aligned to 4 bytes */ wil->rx_buf_len = rx_large_buf ? WIL_MAX_ETH_MTU : WIL_EDMA_RX_BUF_LEN_DEFAULT; } -static int wil_rx_init_edma(struct wil6210_priv *wil, u16 desc_ring_size) +static int wil_rx_init_edma(struct wil6210_priv *wil, uint desc_ring_order) { - u16 status_ring_size; + u16 status_ring_size, desc_ring_size = 1 << desc_ring_order; struct wil_ring *ring = &wil->ring_rx; int rc; size_t elem_size = wil->use_compressed_rx_status ? sizeof(struct wil_rx_status_compressed) : sizeof(struct wil_rx_status_extended); int i; - u16 max_rx_pl_per_desc; /* In SW reorder one must use extended status messages */ if (wil->use_compressed_rx_status && !wil->use_rx_hw_reordering) { @@ -623,7 +607,12 @@ static int wil_rx_init_edma(struct wil6210_priv *wil, u16 desc_ring_size) "compressed RX status cannot be used with SW reorder\n"); return -EINVAL; } - + if (wil->rx_status_ring_order <= desc_ring_order) + /* make sure sring is larger than desc ring */ + wil->rx_status_ring_order = desc_ring_order + 1; + if (wil->rx_buff_id_count <= desc_ring_size) + /* make sure we will not run out of buff_ids */ + wil->rx_buff_id_count = desc_ring_size + 512; if (wil->rx_status_ring_order < WIL_SRING_SIZE_ORDER_MIN || wil->rx_status_ring_order > WIL_SRING_SIZE_ORDER_MAX) wil->rx_status_ring_order = WIL_RX_SRING_SIZE_ORDER_DEFAULT; @@ -636,8 +625,6 @@ static int wil_rx_init_edma(struct wil6210_priv *wil, u16 desc_ring_size) wil_rx_buf_len_init_edma(wil); - max_rx_pl_per_desc = ALIGN(wil->rx_buf_len, 4); - /* Use debugfs dbg_num_rx_srings if set, reserve one sring for TX */ if (wil->num_rx_status_rings > WIL6210_MAX_STATUS_RINGS - 1) wil->num_rx_status_rings = WIL6210_MAX_STATUS_RINGS - 1; @@ -645,7 +632,7 @@ static int wil_rx_init_edma(struct wil6210_priv *wil, u16 desc_ring_size) wil_dbg_misc(wil, "rx_init: allocate %d status rings\n", wil->num_rx_status_rings); - rc = wil_wmi_cfg_def_rx_offload(wil, max_rx_pl_per_desc); + rc = wil_wmi_cfg_def_rx_offload(wil, wil->rx_buf_len); if (rc) return rc; @@ -834,23 +821,24 @@ static int wil_rx_error_check_edma(struct wil6210_priv *wil, wil_dbg_txrx(wil, "L2 RX error, l2_rx_status=0x%x\n", l2_rx_status); /* Due to HW issue, KEY error will trigger a MIC error */ - if (l2_rx_status & WIL_RX_EDMA_ERROR_MIC) { - wil_dbg_txrx(wil, - "L2 MIC/KEY error, dropping packet\n"); + if (l2_rx_status == WIL_RX_EDMA_ERROR_MIC) { + wil_err_ratelimited(wil, + "L2 MIC/KEY error, dropping packet\n"); stats->rx_mic_error++; } - if (l2_rx_status & WIL_RX_EDMA_ERROR_KEY) { - wil_dbg_txrx(wil, "L2 KEY error, dropping packet\n"); + if (l2_rx_status == WIL_RX_EDMA_ERROR_KEY) { + wil_err_ratelimited(wil, + "L2 KEY error, dropping packet\n"); stats->rx_key_error++; } - if (l2_rx_status & WIL_RX_EDMA_ERROR_REPLAY) { - wil_dbg_txrx(wil, - "L2 REPLAY error, dropping packet\n"); + if (l2_rx_status == WIL_RX_EDMA_ERROR_REPLAY) { + wil_err_ratelimited(wil, + "L2 REPLAY error, dropping packet\n"); stats->rx_replay++; } - if (l2_rx_status & WIL_RX_EDMA_ERROR_AMSDU) { - wil_dbg_txrx(wil, - "L2 AMSDU error, dropping packet\n"); + if (l2_rx_status == WIL_RX_EDMA_ERROR_AMSDU) { + wil_err_ratelimited(wil, + "L2 AMSDU error, dropping packet\n"); stats->rx_amsdu_error++; } return -EFAULT; @@ -881,7 +869,7 @@ static struct sk_buff *wil_sring_reap_rx_edma(struct wil6210_priv *wil, struct sk_buff *skb; dma_addr_t pa; struct wil_ring_rx_data *rxdata = &sring->rx_data; - unsigned int sz = ALIGN(wil->rx_buf_len, 4); + unsigned int sz = wil->rx_buf_len; struct wil_net_stats *stats = NULL; u16 dmalen; int cid; diff --git a/drivers/net/wireless/ath/wil6210/txrx_edma.h b/drivers/net/wireless/ath/wil6210/txrx_edma.h index a7fe9292fda3..343516a03a1e 100644 --- a/drivers/net/wireless/ath/wil6210/txrx_edma.h +++ b/drivers/net/wireless/ath/wil6210/txrx_edma.h @@ -23,9 +23,9 @@ #define WIL_SRING_SIZE_ORDER_MIN (WIL_RING_SIZE_ORDER_MIN) #define WIL_SRING_SIZE_ORDER_MAX (WIL_RING_SIZE_ORDER_MAX) /* RX sring order should be bigger than RX ring order */ -#define WIL_RX_SRING_SIZE_ORDER_DEFAULT (11) +#define WIL_RX_SRING_SIZE_ORDER_DEFAULT (12) #define WIL_TX_SRING_SIZE_ORDER_DEFAULT (12) -#define WIL_RX_BUFF_ARR_SIZE_DEFAULT (1536) +#define WIL_RX_BUFF_ARR_SIZE_DEFAULT (2600) #define WIL_DEFAULT_RX_STATUS_RING_ID 0 #define WIL_RX_DESC_RING_ID 0 diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h index abb82018d3b4..0f3be3ffc6a2 100644 --- a/drivers/net/wireless/ath/wil6210/wil6210.h +++ b/drivers/net/wireless/ath/wil6210/wil6210.h @@ -81,6 +81,7 @@ static inline u32 WIL_GET_BITS(u32 x, int b0, int b1) #define WIL_TX_Q_LEN_DEFAULT (4000) #define WIL_RX_RING_SIZE_ORDER_DEFAULT (10) +#define WIL_RX_RING_SIZE_ORDER_TALYN_DEFAULT (11) #define WIL_TX_RING_SIZE_ORDER_DEFAULT (12) #define WIL_BCAST_RING_SIZE_ORDER_DEFAULT (7) #define WIL_BCAST_MCS0_LIMIT (1024) /* limit for MCS0 frame size */ @@ -319,6 +320,7 @@ struct RGF_ICR { /* MAC timer, usec, for packet lifetime */ #define RGF_MAC_MTRL_COUNTER_0 (0x886aa8) +#define RGF_CAF_ICR_TALYN_MB (0x8893d4) /* struct RGF_ICR */ #define RGF_CAF_ICR (0x88946c) /* struct RGF_ICR */ #define RGF_CAF_OSC_CONTROL (0x88afa4) #define BIT_CAF_OSC_XTAL_EN BIT(0) @@ -613,7 +615,7 @@ struct wil_txrx_ops { int cid, int tid); irqreturn_t (*irq_tx)(int irq, void *cookie); /* RX ops */ - int (*rx_init)(struct wil6210_priv *wil, u16 ring_size); + int (*rx_init)(struct wil6210_priv *wil, uint ring_order); void (*rx_fini)(struct wil6210_priv *wil); int (*wmi_addba_rx_resp)(struct wil6210_priv *wil, u8 mid, u8 cid, u8 tid, u8 token, u16 status, bool amsdu, @@ -848,6 +850,14 @@ struct wil6210_vif { u8 hidden_ssid; /* relevant in AP mode */ u32 ap_isolate; /* no intra-BSS communication */ bool pbss; + int bi; + u8 *proberesp, *proberesp_ies, *assocresp_ies; + size_t proberesp_len, proberesp_ies_len, assocresp_ies_len; + u8 ssid[IEEE80211_MAX_SSID_LEN]; + size_t ssid_len; + u8 gtk_index; + u8 gtk[WMI_MAX_KEY_LEN]; + size_t gtk_len; int bcast_ring; struct cfg80211_bss *bss; /* connected bss, relevant in STA mode */ int locally_generated_disc; /* relevant in STA mode */ @@ -1220,8 +1230,8 @@ int wmi_rx_chain_add(struct wil6210_priv *wil, struct wil_ring *vring); int wmi_update_ft_ies(struct wil6210_vif *vif, u16 ie_len, const void *ie); int wmi_rxon(struct wil6210_priv *wil, bool on); int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_m, u32 *t_r); -int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac, - u16 reason, bool full_disconnect, bool del_sta); +int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac, u16 reason, + bool del_sta); int wmi_addba(struct wil6210_priv *wil, u8 mid, u8 ringid, u8 size, u16 timeout); int wmi_delba_tx(struct wil6210_priv *wil, u8 mid, u8 ringid, u16 reason); @@ -1276,6 +1286,7 @@ int wmi_stop_discovery(struct wil6210_vif *vif); int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_mgmt_tx_params *params, u64 *cookie); +void wil_cfg80211_ap_recovery(struct wil6210_priv *wil); int wil_cfg80211_iface_combinations_from_fw( struct wil6210_priv *wil, const struct wil_fw_record_concurrency *conc); @@ -1306,7 +1317,9 @@ void wil_abort_scan(struct wil6210_vif *vif, bool sync); void wil_abort_scan_all_vifs(struct wil6210_priv *wil, bool sync); void wil6210_bus_request(struct wil6210_priv *wil, u32 kbps); void wil6210_disconnect(struct wil6210_vif *vif, const u8 *bssid, - u16 reason_code, bool from_event); + u16 reason_code); +void wil6210_disconnect_complete(struct wil6210_vif *vif, const u8 *bssid, + u16 reason_code); void wil_probe_client_flush(struct wil6210_vif *vif); void wil_probe_client_worker(struct work_struct *work); void wil_disconnect_worker(struct work_struct *work); diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c index 4859f0e43658..345f05969190 100644 --- a/drivers/net/wireless/ath/wil6210/wmi.c +++ b/drivers/net/wireless/ath/wil6210/wmi.c @@ -1018,7 +1018,7 @@ static void wmi_evt_connect(struct wil6210_vif *vif, int id, void *d, int len) wil_err(wil, "config tx vring failed for CID %d, rc (%d)\n", evt->cid, rc); wmi_disconnect_sta(vif, wil->sta[evt->cid].addr, - WLAN_REASON_UNSPECIFIED, false, false); + WLAN_REASON_UNSPECIFIED, false); } else { wil_info(wil, "successful connection to CID %d\n", evt->cid); } @@ -1112,7 +1112,24 @@ static void wmi_evt_disconnect(struct wil6210_vif *vif, int id, } mutex_lock(&wil->mutex); - wil6210_disconnect(vif, evt->bssid, reason_code, true); + wil6210_disconnect_complete(vif, evt->bssid, reason_code); + if (disable_ap_sme) { + struct wireless_dev *wdev = vif_to_wdev(vif); + struct net_device *ndev = vif_to_ndev(vif); + + /* disconnect event in disable_ap_sme mode means link loss */ + switch (wdev->iftype) { + /* AP-like interface */ + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_P2P_GO: + /* notify hostapd about link loss */ + cfg80211_cqm_pktloss_notify(ndev, evt->bssid, 0, + GFP_KERNEL); + break; + default: + break; + } + } mutex_unlock(&wil->mutex); } @@ -1637,7 +1654,7 @@ wmi_evt_auth_status(struct wil6210_vif *vif, int id, void *d, int len) return; fail: - wil6210_disconnect(vif, NULL, WLAN_REASON_PREV_AUTH_NOT_VALID, false); + wil6210_disconnect(vif, NULL, WLAN_REASON_PREV_AUTH_NOT_VALID); } static void @@ -1766,7 +1783,7 @@ wmi_evt_reassoc_status(struct wil6210_vif *vif, int id, void *d, int len) return; fail: - wil6210_disconnect(vif, NULL, WLAN_REASON_PREV_AUTH_NOT_VALID, false); + wil6210_disconnect(vif, NULL, WLAN_REASON_PREV_AUTH_NOT_VALID); } /** @@ -1949,16 +1966,17 @@ int wmi_call(struct wil6210_priv *wil, u16 cmdid, u8 mid, void *buf, u16 len, { int rc; unsigned long remain; + ulong flags; mutex_lock(&wil->wmi_mutex); - spin_lock(&wil->wmi_ev_lock); + spin_lock_irqsave(&wil->wmi_ev_lock, flags); wil->reply_id = reply_id; wil->reply_mid = mid; wil->reply_buf = reply; wil->reply_size = reply_size; reinit_completion(&wil->wmi_call); - spin_unlock(&wil->wmi_ev_lock); + spin_unlock_irqrestore(&wil->wmi_ev_lock, flags); rc = __wmi_send(wil, cmdid, mid, buf, len); if (rc) @@ -1978,12 +1996,12 @@ int wmi_call(struct wil6210_priv *wil, u16 cmdid, u8 mid, void *buf, u16 len, } out: - spin_lock(&wil->wmi_ev_lock); + spin_lock_irqsave(&wil->wmi_ev_lock, flags); wil->reply_id = 0; wil->reply_mid = U8_MAX; wil->reply_buf = NULL; wil->reply_size = 0; - spin_unlock(&wil->wmi_ev_lock); + spin_unlock_irqrestore(&wil->wmi_ev_lock, flags); mutex_unlock(&wil->wmi_mutex); @@ -2560,12 +2578,11 @@ int wmi_get_temperature(struct wil6210_priv *wil, u32 *t_bb, u32 *t_rf) return 0; } -int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac, - u16 reason, bool full_disconnect, bool del_sta) +int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac, u16 reason, + bool del_sta) { struct wil6210_priv *wil = vif_to_wil(vif); int rc; - u16 reason_code; struct wmi_disconnect_sta_cmd disc_sta_cmd = { .disconnect_reason = cpu_to_le16(reason), }; @@ -2598,21 +2615,8 @@ int wmi_disconnect_sta(struct wil6210_vif *vif, const u8 *mac, wil_fw_error_recovery(wil); return rc; } + wil->sinfo_gen++; - if (full_disconnect) { - /* call event handler manually after processing wmi_call, - * to avoid deadlock - disconnect event handler acquires - * wil->mutex while it is already held here - */ - reason_code = le16_to_cpu(reply.evt.protocol_reason_status); - - wil_dbg_wmi(wil, "Disconnect %pM reason [proto %d wmi %d]\n", - reply.evt.bssid, reason_code, - reply.evt.disconnect_reason); - - wil->sinfo_gen++; - wil6210_disconnect(vif, reply.evt.bssid, reason_code, true); - } return 0; } @@ -3145,7 +3149,7 @@ static void wmi_event_handle(struct wil6210_priv *wil, if (mid == MID_BROADCAST) mid = 0; - if (mid >= wil->max_vifs) { + if (mid >= ARRAY_SIZE(wil->vifs) || mid >= wil->max_vifs) { wil_dbg_wmi(wil, "invalid mid %d, event skipped\n", mid); return; diff --git a/drivers/net/wireless/broadcom/b43/Kconfig b/drivers/net/wireless/broadcom/b43/Kconfig index fba856032ca5..3e4145747b20 100644 --- a/drivers/net/wireless/broadcom/b43/Kconfig +++ b/drivers/net/wireless/broadcom/b43/Kconfig @@ -4,6 +4,7 @@ config B43 select BCMA if B43_BCMA select SSB if B43_SSB select FW_LOADER + select CORDIC ---help--- b43 is a driver for the Broadcom 43xx series wireless devices. diff --git a/drivers/net/wireless/broadcom/b43/phy_common.c b/drivers/net/wireless/broadcom/b43/phy_common.c index 85f2ca989565..98c4fa5b919c 100644 --- a/drivers/net/wireless/broadcom/b43/phy_common.c +++ b/drivers/net/wireless/broadcom/b43/phy_common.c @@ -604,50 +604,3 @@ void b43_phy_force_clock(struct b43_wldev *dev, bool force) #endif } } - -/* http://bcm-v4.sipsolutions.net/802.11/PHY/Cordic */ -struct b43_c32 b43_cordic(int theta) -{ - static const u32 arctg[] = { - 2949120, 1740967, 919879, 466945, 234379, 117304, - 58666, 29335, 14668, 7334, 3667, 1833, - 917, 458, 229, 115, 57, 29, - }; - u8 i; - s32 tmp; - s8 signx = 1; - u32 angle = 0; - struct b43_c32 ret = { .i = 39797, .q = 0, }; - - while (theta > (180 << 16)) - theta -= (360 << 16); - while (theta < -(180 << 16)) - theta += (360 << 16); - - if (theta > (90 << 16)) { - theta -= (180 << 16); - signx = -1; - } else if (theta < -(90 << 16)) { - theta += (180 << 16); - signx = -1; - } - - for (i = 0; i <= 17; i++) { - if (theta > angle) { - tmp = ret.i - (ret.q >> i); - ret.q += ret.i >> i; - ret.i = tmp; - angle += arctg[i]; - } else { - tmp = ret.i + (ret.q >> i); - ret.q -= ret.i >> i; - ret.i = tmp; - angle -= arctg[i]; - } - } - - ret.i *= signx; - ret.q *= signx; - - return ret; -} diff --git a/drivers/net/wireless/broadcom/b43/phy_common.h b/drivers/net/wireless/broadcom/b43/phy_common.h index 57a1ad8afa08..4213caca9117 100644 --- a/drivers/net/wireless/broadcom/b43/phy_common.h +++ b/drivers/net/wireless/broadcom/b43/phy_common.h @@ -7,13 +7,6 @@ struct b43_wldev; -/* Complex number using 2 32-bit signed integers */ -struct b43_c32 { s32 i, q; }; - -#define CORDIC_CONVERT(value) (((value) >= 0) ? \ - ((((value) >> 15) + 1) >> 1) : \ - -((((-(value)) >> 15) + 1) >> 1)) - /* PHY register routing bits */ #define B43_PHYROUTE 0x0C00 /* PHY register routing bits mask */ #define B43_PHYROUTE_BASE 0x0000 /* Base registers */ @@ -450,6 +443,4 @@ bool b43_is_40mhz(struct b43_wldev *dev); void b43_phy_force_clock(struct b43_wldev *dev, bool force); -struct b43_c32 b43_cordic(int theta); - #endif /* LINUX_B43_PHY_COMMON_H_ */ diff --git a/drivers/net/wireless/broadcom/b43/phy_lp.c b/drivers/net/wireless/broadcom/b43/phy_lp.c index 6922cbb99a04..46408a560814 100644 --- a/drivers/net/wireless/broadcom/b43/phy_lp.c +++ b/drivers/net/wireless/broadcom/b43/phy_lp.c @@ -23,6 +23,7 @@ */ +#include <linux/cordic.h> #include <linux/slab.h> #include "b43.h" @@ -1780,9 +1781,9 @@ static void lpphy_start_tx_tone(struct b43_wldev *dev, s32 freq, u16 max) { struct b43_phy_lp *lpphy = dev->phy.lp; u16 buf[64]; - int i, samples = 0, angle = 0; + int i, samples = 0, theta = 0; int rotation = (((36 * freq) / 20) << 16) / 100; - struct b43_c32 sample; + struct cordic_iq sample; lpphy->tx_tone_freq = freq; @@ -1798,10 +1799,10 @@ static void lpphy_start_tx_tone(struct b43_wldev *dev, s32 freq, u16 max) } for (i = 0; i < samples; i++) { - sample = b43_cordic(angle); - angle += rotation; - buf[i] = CORDIC_CONVERT((sample.i * max) & 0xFF) << 8; - buf[i] |= CORDIC_CONVERT((sample.q * max) & 0xFF); + sample = cordic_calc_iq(CORDIC_FIXED(theta)); + theta += rotation; + buf[i] = CORDIC_FLOAT((sample.i * max) & 0xFF) << 8; + buf[i] |= CORDIC_FLOAT((sample.q * max) & 0xFF); } b43_lptab_write_bulk(dev, B43_LPTAB16(5, 0), samples, buf); diff --git a/drivers/net/wireless/broadcom/b43/phy_n.c b/drivers/net/wireless/broadcom/b43/phy_n.c index 44ab080d6518..77d7cd5563c4 100644 --- a/drivers/net/wireless/broadcom/b43/phy_n.c +++ b/drivers/net/wireless/broadcom/b43/phy_n.c @@ -23,6 +23,7 @@ */ +#include <linux/cordic.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/types.h> @@ -1513,7 +1514,7 @@ static void b43_radio_init2055(struct b43_wldev *dev) /* http://bcm-v4.sipsolutions.net/802.11/PHY/N/LoadSampleTable */ static int b43_nphy_load_samples(struct b43_wldev *dev, - struct b43_c32 *samples, u16 len) { + struct cordic_iq *samples, u16 len) { struct b43_phy_n *nphy = dev->phy.n; u16 i; u32 *data; @@ -1544,7 +1545,7 @@ static u16 b43_nphy_gen_load_samples(struct b43_wldev *dev, u32 freq, u16 max, { int i; u16 bw, len, rot, angle; - struct b43_c32 *samples; + struct cordic_iq *samples; bw = b43_is_40mhz(dev) ? 40 : 20; len = bw << 3; @@ -1561,7 +1562,7 @@ static u16 b43_nphy_gen_load_samples(struct b43_wldev *dev, u32 freq, u16 max, len = bw << 1; } - samples = kcalloc(len, sizeof(struct b43_c32), GFP_KERNEL); + samples = kcalloc(len, sizeof(struct cordic_iq), GFP_KERNEL); if (!samples) { b43err(dev->wl, "allocation for samples generation failed\n"); return 0; @@ -1570,10 +1571,10 @@ static u16 b43_nphy_gen_load_samples(struct b43_wldev *dev, u32 freq, u16 max, angle = 0; for (i = 0; i < len; i++) { - samples[i] = b43_cordic(angle); + samples[i] = cordic_calc_iq(CORDIC_FIXED(angle)); angle += rot; - samples[i].q = CORDIC_CONVERT(samples[i].q * max); - samples[i].i = CORDIC_CONVERT(samples[i].i * max); + samples[i].q = CORDIC_FLOAT(samples[i].q * max); + samples[i].i = CORDIC_FLOAT(samples[i].i * max); } i = b43_nphy_load_samples(dev, samples, len); @@ -5894,7 +5895,6 @@ static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev, struct ieee80211_channel *channel = dev->wl->hw->conf.chandef.chan; struct b43_ppr *ppr = &nphy->tx_pwr_max_ppr; u8 max; /* qdBm */ - bool tx_pwr_state; if (nphy->tx_pwr_last_recalc_freq == channel->center_freq && nphy->tx_pwr_last_recalc_limit == phy->desired_txpower) @@ -5930,7 +5930,6 @@ static enum b43_txpwr_result b43_nphy_op_recalc_txpower(struct b43_wldev *dev, b43_ppr_apply_min(dev, ppr, INT_TO_Q52(8)); /* Apply */ - tx_pwr_state = nphy->txpwrctrl; b43_mac_suspend(dev); b43_nphy_tx_power_ctl_setup(dev); if (dev->dev->core_rev == 11 || dev->dev->core_rev == 12) { @@ -6043,7 +6042,6 @@ static int b43_phy_initn(struct b43_wldev *dev) u8 tx_pwr_state; struct nphy_txgains target; u16 tmp; - enum nl80211_band tmp2; bool do_rssi_cal; u16 clip[2]; @@ -6137,7 +6135,6 @@ static int b43_phy_initn(struct b43_wldev *dev) b43_phy_write(dev, B43_NPHY_DUP40_BL, 0x9A4); } - tmp2 = b43_current_band(dev->wl); if (b43_nphy_ipa(dev)) { b43_phy_set(dev, B43_NPHY_PAPD_EN0, 0x1); b43_phy_maskset(dev, B43_NPHY_EPS_TABLE_ADJ0, 0x007F, diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile index 1f5a9b948abf..22fd95a736a8 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/Makefile @@ -54,3 +54,5 @@ brcmfmac-$(CONFIG_BRCM_TRACING) += \ tracepoint.o brcmfmac-$(CONFIG_OF) += \ of.o +brcmfmac-$(CONFIG_DMI) += \ + dmi.o diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c index 3e37c8cf82c6..d64bf233b12c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/bcmsdh.c @@ -342,6 +342,37 @@ static int brcmf_sdiod_skbuff_write(struct brcmf_sdio_dev *sdiodev, return err; } +static int mmc_submit_one(struct mmc_data *md, struct mmc_request *mr, + struct mmc_command *mc, int sg_cnt, int req_sz, + int func_blk_sz, u32 *addr, + struct brcmf_sdio_dev *sdiodev, + struct sdio_func *func, int write) +{ + int ret; + + md->sg_len = sg_cnt; + md->blocks = req_sz / func_blk_sz; + mc->arg |= (*addr & 0x1FFFF) << 9; /* address */ + mc->arg |= md->blocks & 0x1FF; /* block count */ + /* incrementing addr for function 1 */ + if (func->num == 1) + *addr += req_sz; + + mmc_set_data_timeout(md, func->card); + mmc_wait_for_req(func->card->host, mr); + + ret = mc->error ? mc->error : md->error; + if (ret == -ENOMEDIUM) { + brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM); + } else if (ret != 0) { + brcmf_err("CMD53 sg block %s failed %d\n", + write ? "write" : "read", ret); + ret = -EIO; + } + + return ret; +} + /** * brcmf_sdiod_sglist_rw - SDIO interface function for block data access * @sdiodev: brcmfmac sdio device @@ -360,11 +391,11 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, struct sk_buff_head *pktlist) { unsigned int req_sz, func_blk_sz, sg_cnt, sg_data_sz, pkt_offset; - unsigned int max_req_sz, orig_offset, dst_offset; - unsigned short max_seg_cnt, seg_sz; + unsigned int max_req_sz, src_offset, dst_offset; unsigned char *pkt_data, *orig_data, *dst_data; - struct sk_buff *pkt_next = NULL, *local_pkt_next; struct sk_buff_head local_list, *target_list; + struct sk_buff *pkt_next = NULL, *src; + unsigned short max_seg_cnt; struct mmc_request mmc_req; struct mmc_command mmc_cmd; struct mmc_data mmc_dat; @@ -404,9 +435,6 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, max_req_sz = sdiodev->max_request_size; max_seg_cnt = min_t(unsigned short, sdiodev->max_segment_count, target_list->qlen); - seg_sz = target_list->qlen; - pkt_offset = 0; - pkt_next = target_list->next; memset(&mmc_req, 0, sizeof(struct mmc_request)); memset(&mmc_cmd, 0, sizeof(struct mmc_command)); @@ -425,12 +453,12 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, mmc_req.cmd = &mmc_cmd; mmc_req.data = &mmc_dat; - while (seg_sz) { - req_sz = 0; - sg_cnt = 0; - sgl = sdiodev->sgtable.sgl; - /* prep sg table */ - while (pkt_next != (struct sk_buff *)target_list) { + req_sz = 0; + sg_cnt = 0; + sgl = sdiodev->sgtable.sgl; + skb_queue_walk(target_list, pkt_next) { + pkt_offset = 0; + while (pkt_offset < pkt_next->len) { pkt_data = pkt_next->data + pkt_offset; sg_data_sz = pkt_next->len - pkt_offset; if (sg_data_sz > sdiodev->max_segment_size) @@ -439,72 +467,55 @@ static int brcmf_sdiod_sglist_rw(struct brcmf_sdio_dev *sdiodev, sg_data_sz = max_req_sz - req_sz; sg_set_buf(sgl, pkt_data, sg_data_sz); - sg_cnt++; + sgl = sg_next(sgl); req_sz += sg_data_sz; pkt_offset += sg_data_sz; - if (pkt_offset == pkt_next->len) { - pkt_offset = 0; - pkt_next = pkt_next->next; + if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt) { + ret = mmc_submit_one(&mmc_dat, &mmc_req, &mmc_cmd, + sg_cnt, req_sz, func_blk_sz, + &addr, sdiodev, func, write); + if (ret) + goto exit_queue_walk; + req_sz = 0; + sg_cnt = 0; + sgl = sdiodev->sgtable.sgl; } - - if (req_sz >= max_req_sz || sg_cnt >= max_seg_cnt) - break; - } - seg_sz -= sg_cnt; - - if (req_sz % func_blk_sz != 0) { - brcmf_err("sg request length %u is not %u aligned\n", - req_sz, func_blk_sz); - ret = -ENOTBLK; - goto exit; - } - - mmc_dat.sg_len = sg_cnt; - mmc_dat.blocks = req_sz / func_blk_sz; - mmc_cmd.arg |= (addr & 0x1FFFF) << 9; /* address */ - mmc_cmd.arg |= mmc_dat.blocks & 0x1FF; /* block count */ - /* incrementing addr for function 1 */ - if (func->num == 1) - addr += req_sz; - - mmc_set_data_timeout(&mmc_dat, func->card); - mmc_wait_for_req(func->card->host, &mmc_req); - - ret = mmc_cmd.error ? mmc_cmd.error : mmc_dat.error; - if (ret == -ENOMEDIUM) { - brcmf_sdiod_change_state(sdiodev, BRCMF_SDIOD_NOMEDIUM); - break; - } else if (ret != 0) { - brcmf_err("CMD53 sg block %s failed %d\n", - write ? "write" : "read", ret); - ret = -EIO; - break; } } - + if (sg_cnt) + ret = mmc_submit_one(&mmc_dat, &mmc_req, &mmc_cmd, + sg_cnt, req_sz, func_blk_sz, + &addr, sdiodev, func, write); +exit_queue_walk: if (!write && sdiodev->settings->bus.sdio.broken_sg_support) { - local_pkt_next = local_list.next; - orig_offset = 0; + src = __skb_peek(&local_list); + src_offset = 0; skb_queue_walk(pktlist, pkt_next) { dst_offset = 0; - do { - req_sz = local_pkt_next->len - orig_offset; - req_sz = min_t(uint, pkt_next->len - dst_offset, - req_sz); - orig_data = local_pkt_next->data + orig_offset; + + /* This is safe because we must have enough SKB data + * in the local list to cover everything in pktlist. + */ + while (1) { + req_sz = pkt_next->len - dst_offset; + if (req_sz > src->len - src_offset) + req_sz = src->len - src_offset; + + orig_data = src->data + src_offset; dst_data = pkt_next->data + dst_offset; memcpy(dst_data, orig_data, req_sz); - orig_offset += req_sz; - dst_offset += req_sz; - if (orig_offset == local_pkt_next->len) { - orig_offset = 0; - local_pkt_next = local_pkt_next->next; + + src_offset += req_sz; + if (src_offset == src->len) { + src_offset = 0; + src = skb_peek_next(src, &local_list); } + dst_offset += req_sz; if (dst_offset == pkt_next->len) break; - } while (!skb_queue_empty(&local_list)); + } } } @@ -972,6 +983,7 @@ static const struct sdio_device_id brcmf_sdmmc_ids[] = { BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4354), BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_BROADCOM_4356), BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_CYPRESS_4373), + BRCMF_SDIO_DEVICE(SDIO_DEVICE_ID_CYPRESS_43012), { /* end: all zeroes */ } }; MODULE_DEVICE_TABLE(sdio, brcmf_sdmmc_ids); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c index 7f0a5bade70a..35301237d435 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.c @@ -5196,10 +5196,17 @@ static struct cfg80211_ops brcmf_cfg80211_ops = { .del_pmk = brcmf_cfg80211_del_pmk, }; -struct cfg80211_ops *brcmf_cfg80211_get_ops(void) +struct cfg80211_ops *brcmf_cfg80211_get_ops(struct brcmf_mp_device *settings) { - return kmemdup(&brcmf_cfg80211_ops, sizeof(brcmf_cfg80211_ops), + struct cfg80211_ops *ops; + + ops = kmemdup(&brcmf_cfg80211_ops, sizeof(brcmf_cfg80211_ops), GFP_KERNEL); + + if (ops && settings->roamoff) + ops->update_connect_params = NULL; + + return ops; } struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg, @@ -6309,6 +6316,16 @@ brcmf_txrx_stypes[NUM_NL80211_IFTYPES] = { .tx = 0xffff, .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4) + }, + [NL80211_IFTYPE_AP] = { + .tx = 0xffff, + .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | + BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | + BIT(IEEE80211_STYPE_DISASSOC >> 4) | + BIT(IEEE80211_STYPE_AUTH >> 4) | + BIT(IEEE80211_STYPE_DEAUTH >> 4) | + BIT(IEEE80211_STYPE_ACTION >> 4) } }; @@ -6639,6 +6656,12 @@ static s32 brcmf_config_dongle(struct brcmf_cfg80211_info *cfg) brcmf_configure_arp_nd_offload(ifp, true); + err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_FAKEFRAG, 1); + if (err) { + brcmf_err("failed to set frameburst mode\n"); + goto default_conf_out; + } + cfg->dongle_up = true; default_conf_out: diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h index a4aec0004e4f..9a6287f084a9 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/cfg80211.h @@ -404,7 +404,7 @@ struct brcmf_cfg80211_info *brcmf_cfg80211_attach(struct brcmf_pub *drvr, void brcmf_cfg80211_detach(struct brcmf_cfg80211_info *cfg); s32 brcmf_cfg80211_up(struct net_device *ndev); s32 brcmf_cfg80211_down(struct net_device *ndev); -struct cfg80211_ops *brcmf_cfg80211_get_ops(void); +struct cfg80211_ops *brcmf_cfg80211_get_ops(struct brcmf_mp_device *settings); enum nl80211_iftype brcmf_cfg80211_get_iftype(struct brcmf_if *ifp); struct brcmf_cfg80211_vif *brcmf_alloc_vif(struct brcmf_cfg80211_info *cfg, diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c index 927d62b3d41b..22534bf2a90c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/chip.c @@ -165,6 +165,7 @@ struct sbconfig { #define SRCI_LSS_MASK 0x00f00000 #define SRCI_LSS_SHIFT 20 #define SRCI_SRNB_MASK 0xf0 +#define SRCI_SRNB_MASK_EXT 0x100 #define SRCI_SRNB_SHIFT 4 #define SRCI_SRBSZ_MASK 0xf #define SRCI_SRBSZ_SHIFT 0 @@ -592,7 +593,13 @@ static void brcmf_chip_socram_ramsize(struct brcmf_core_priv *sr, u32 *ramsize, if (lss != 0) *ramsize += (1 << ((lss - 1) + SR_BSZ_BASE)); } else { - nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; + /* length of SRAM Banks increased for corerev greater than 23 */ + if (sr->pub.rev >= 23) { + nb = (coreinfo & (SRCI_SRNB_MASK | SRCI_SRNB_MASK_EXT)) + >> SRCI_SRNB_SHIFT; + } else { + nb = (coreinfo & SRCI_SRNB_MASK) >> SRCI_SRNB_SHIFT; + } for (i = 0; i < nb; i++) { retent = brcmf_chip_socram_banksize(sr, i, &banksize); *ramsize += banksize; @@ -779,7 +786,7 @@ static int brcmf_chip_dmp_get_regaddr(struct brcmf_chip_priv *ci, u32 *eromaddr, u32 *regbase, u32 *wrapbase) { u8 desc; - u32 val; + u32 val, szdesc; u8 mpnum = 0; u8 stype, sztype, wraptype; @@ -825,14 +832,15 @@ static int brcmf_chip_dmp_get_regaddr(struct brcmf_chip_priv *ci, u32 *eromaddr, /* next size descriptor can be skipped */ if (sztype == DMP_SLAVE_SIZE_DESC) { - val = brcmf_chip_dmp_get_desc(ci, eromaddr, NULL); + szdesc = brcmf_chip_dmp_get_desc(ci, eromaddr, NULL); /* skip upper size descriptor if present */ - if (val & DMP_DESC_ADDRSIZE_GT32) + if (szdesc & DMP_DESC_ADDRSIZE_GT32) brcmf_chip_dmp_get_desc(ci, eromaddr, NULL); } - /* only look for 4K register regions */ - if (sztype != DMP_SLAVE_SIZE_4K) + /* look for 4K or 8K register regions */ + if (sztype != DMP_SLAVE_SIZE_4K && + sztype != DMP_SLAVE_SIZE_8K) continue; stype = (val & DMP_SLAVE_TYPE) >> DMP_SLAVE_TYPE_S; @@ -889,7 +897,8 @@ int brcmf_chip_dmp_erom_scan(struct brcmf_chip_priv *ci) /* need core with ports */ if (nmw + nsw == 0 && - id != BCMA_CORE_PMU) + id != BCMA_CORE_PMU && + id != BCMA_CORE_GCI) continue; /* try to obtain register address info */ @@ -1356,6 +1365,16 @@ bool brcmf_chip_sr_capable(struct brcmf_chip *pub) addr = CORE_CC_REG(base, sr_control1); reg = chip->ops->read32(chip->ctx, addr); return reg != 0; + case CY_CC_4373_CHIP_ID: + /* explicitly check SR engine enable bit */ + addr = CORE_CC_REG(base, sr_control0); + reg = chip->ops->read32(chip->ctx, addr); + return (reg & CC_SR_CTL0_ENABLE_MASK) != 0; + case CY_CC_43012_CHIP_ID: + addr = CORE_CC_REG(pmu->base, retention_ctl); + reg = chip->ops->read32(chip->ctx, addr); + return (reg & (PMU_RCTL_MACPHY_DISABLE_MASK | + PMU_RCTL_LOGIC_DISABLE_MASK)) == 0; default: addr = CORE_CC_REG(pmu->base, pmucapabilities_ext); reg = chip->ops->read32(chip->ctx, addr); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c index 94044a7a6021..1f1e95a15a17 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.c @@ -214,7 +214,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp) err = brcmf_fil_iovar_data_get(ifp, "cur_etheraddr", ifp->mac_addr, sizeof(ifp->mac_addr)); if (err < 0) { - brcmf_err("Retreiving cur_etheraddr failed, %d\n", err); + brcmf_err("Retrieving cur_etheraddr failed, %d\n", err); goto done; } memcpy(ifp->drvr->wiphy->perm_addr, ifp->drvr->mac, ETH_ALEN); @@ -269,7 +269,7 @@ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp) strcpy(buf, "ver"); err = brcmf_fil_iovar_data_get(ifp, "ver", buf, sizeof(buf)); if (err < 0) { - brcmf_err("Retreiving version information failed, %d\n", + brcmf_err("Retrieving version information failed, %d\n", err); goto done; } @@ -448,7 +448,8 @@ struct brcmf_mp_device *brcmf_get_module_param(struct device *dev, } } if (!found) { - /* No platform data for this device, try OF (Open Firwmare) */ + /* No platform data for this device, try OF and DMI data */ + brcmf_dmi_probe(settings, chip, chiprev); brcmf_of_probe(dev, bus_type, settings); } return settings; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h index a34642cb4d2f..4ce56be90b74 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/common.h @@ -59,6 +59,7 @@ struct brcmf_mp_device { bool iapp; bool ignore_probe_fail; struct brcmfmac_pd_cc *country_codes; + const char *board_type; union { struct brcmfmac_sdio_pd sdio; } bus; @@ -74,4 +75,11 @@ void brcmf_release_module_param(struct brcmf_mp_device *module_param); /* Sets dongle media info (drv_version, mac address). */ int brcmf_c_preinit_dcmds(struct brcmf_if *ifp); +#ifdef CONFIG_DMI +void brcmf_dmi_probe(struct brcmf_mp_device *settings, u32 chip, u32 chiprev); +#else +static inline void +brcmf_dmi_probe(struct brcmf_mp_device *settings, u32 chip, u32 chiprev) {} +#endif + #endif /* BRCMFMAC_COMMON_H */ diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c index b1f702faff4f..860a4372cb56 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/core.c @@ -1130,7 +1130,7 @@ int brcmf_attach(struct device *dev, struct brcmf_mp_device *settings) brcmf_dbg(TRACE, "Enter\n"); - ops = brcmf_cfg80211_get_ops(); + ops = brcmf_cfg80211_get_ops(settings); if (!ops) return -ENOMEM; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c new file mode 100644 index 000000000000..51d76ac45075 --- /dev/null +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/dmi.c @@ -0,0 +1,116 @@ +/* + * Copyright 2018 Hans de Goede <hdegoede@redhat.com> + * + * Permission to use, copy, modify, and/or distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY + * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION + * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN + * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include <linux/dmi.h> +#include <linux/mod_devicetable.h> +#include "core.h" +#include "common.h" +#include "brcm_hw_ids.h" + +/* The DMI data never changes so we can use a static buf for this */ +static char dmi_board_type[128]; + +struct brcmf_dmi_data { + u32 chip; + u32 chiprev; + const char *board_type; +}; + +/* NOTE: Please keep all entries sorted alphabetically */ + +static const struct brcmf_dmi_data gpd_win_pocket_data = { + BRCM_CC_4356_CHIP_ID, 2, "gpd-win-pocket" +}; + +static const struct brcmf_dmi_data jumper_ezpad_mini3_data = { + BRCM_CC_43430_CHIP_ID, 0, "jumper-ezpad-mini3" +}; + +static const struct brcmf_dmi_data meegopad_t08_data = { + BRCM_CC_43340_CHIP_ID, 2, "meegopad-t08" +}; + +static const struct dmi_system_id dmi_platform_data[] = { + { + /* Match for the GPDwin which unfortunately uses somewhat + * generic dmi strings, which is why we test for 4 strings. + * Comparing against 23 other byt/cht boards, board_vendor + * and board_name are unique to the GPDwin, where as only one + * other board has the same board_serial and 3 others have + * the same default product_name. Also the GPDwin is the + * only device to have both board_ and product_name not set. + */ + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), + DMI_MATCH(DMI_BOARD_NAME, "Default string"), + DMI_MATCH(DMI_BOARD_SERIAL, "Default string"), + DMI_MATCH(DMI_PRODUCT_NAME, "Default string"), + }, + .driver_data = (void *)&gpd_win_pocket_data, + }, + { + /* Jumper EZpad mini3 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Insyde"), + DMI_MATCH(DMI_PRODUCT_NAME, "CherryTrail"), + /* jumperx.T87.KFBNEEA02 with the version-nr dropped */ + DMI_MATCH(DMI_BIOS_VERSION, "jumperx.T87.KFBNEEA"), + }, + .driver_data = (void *)&jumper_ezpad_mini3_data, + }, + { + /* Meegopad T08 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Default string"), + DMI_MATCH(DMI_PRODUCT_NAME, "Default string"), + DMI_MATCH(DMI_BOARD_NAME, "T3 MRD"), + DMI_MATCH(DMI_BOARD_VERSION, "V1.1"), + }, + .driver_data = (void *)&meegopad_t08_data, + }, + {} +}; + +void brcmf_dmi_probe(struct brcmf_mp_device *settings, u32 chip, u32 chiprev) +{ + const struct dmi_system_id *match; + const struct brcmf_dmi_data *data; + const char *sys_vendor; + const char *product_name; + + /* Some models have DMI strings which are too generic, e.g. + * "Default string", we use a quirk table for these. + */ + for (match = dmi_first_match(dmi_platform_data); + match; + match = dmi_first_match(match + 1)) { + data = match->driver_data; + + if (data->chip == chip && data->chiprev == chiprev) { + settings->board_type = data->board_type; + return; + } + } + + /* Not found in the quirk-table, use sys_vendor-product_name */ + sys_vendor = dmi_get_system_info(DMI_SYS_VENDOR); + product_name = dmi_get_system_info(DMI_PRODUCT_NAME); + if (sys_vendor && product_name) { + snprintf(dmi_board_type, sizeof(dmi_board_type), "%s-%s", + sys_vendor, product_name); + settings->board_type = dmi_board_type; + } +} diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c index 9095b830ae4d..14b948917a1a 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.c @@ -14,6 +14,7 @@ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ +#include <linux/efi.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/device.h> @@ -445,6 +446,75 @@ struct brcmf_fw { static void brcmf_fw_request_done(const struct firmware *fw, void *ctx); +#ifdef CONFIG_EFI +/* In some cases the EFI-var stored nvram contains "ccode=ALL" or "ccode=XV" + * to specify "worldwide" compatible settings, but these 2 ccode-s do not work + * properly. "ccode=ALL" causes channels 12 and 13 to not be available, + * "ccode=XV" causes all 5GHz channels to not be available. So we replace both + * with "ccode=X2" which allows channels 12+13 and 5Ghz channels in + * no-Initiate-Radiation mode. This means that we will never send on these + * channels without first having received valid wifi traffic on the channel. + */ +static void brcmf_fw_fix_efi_nvram_ccode(char *data, unsigned long data_len) +{ + char *ccode; + + ccode = strnstr((char *)data, "ccode=ALL", data_len); + if (!ccode) + ccode = strnstr((char *)data, "ccode=XV\r", data_len); + if (!ccode) + return; + + ccode[6] = 'X'; + ccode[7] = '2'; + ccode[8] = '\r'; +} + +static u8 *brcmf_fw_nvram_from_efi(size_t *data_len_ret) +{ + const u16 name[] = { 'n', 'v', 'r', 'a', 'm', 0 }; + struct efivar_entry *nvram_efivar; + unsigned long data_len = 0; + u8 *data = NULL; + int err; + + nvram_efivar = kzalloc(sizeof(*nvram_efivar), GFP_KERNEL); + if (!nvram_efivar) + return NULL; + + memcpy(&nvram_efivar->var.VariableName, name, sizeof(name)); + nvram_efivar->var.VendorGuid = EFI_GUID(0x74b00bd9, 0x805a, 0x4d61, + 0xb5, 0x1f, 0x43, 0x26, + 0x81, 0x23, 0xd1, 0x13); + + err = efivar_entry_size(nvram_efivar, &data_len); + if (err) + goto fail; + + data = kmalloc(data_len, GFP_KERNEL); + if (!data) + goto fail; + + err = efivar_entry_get(nvram_efivar, NULL, &data_len, data); + if (err) + goto fail; + + brcmf_fw_fix_efi_nvram_ccode(data, data_len); + brcmf_info("Using nvram EFI variable\n"); + + kfree(nvram_efivar); + *data_len_ret = data_len; + return data; + +fail: + kfree(data); + kfree(nvram_efivar); + return NULL; +} +#else +static inline u8 *brcmf_fw_nvram_from_efi(size_t *data_len) { return NULL; } +#endif + static void brcmf_fw_free_request(struct brcmf_fw_request *req) { struct brcmf_fw_item *item; @@ -463,11 +533,12 @@ static int brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx) { struct brcmf_fw *fwctx = ctx; struct brcmf_fw_item *cur; + bool free_bcm47xx_nvram = false; + bool kfree_nvram = false; u32 nvram_length = 0; void *nvram = NULL; u8 *data = NULL; size_t data_len; - bool raw_nvram; brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(fwctx->dev)); @@ -476,12 +547,13 @@ static int brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx) if (fw && fw->data) { data = (u8 *)fw->data; data_len = fw->size; - raw_nvram = false; } else { - data = bcm47xx_nvram_get_contents(&data_len); - if (!data && !(cur->flags & BRCMF_FW_REQF_OPTIONAL)) + if ((data = bcm47xx_nvram_get_contents(&data_len))) + free_bcm47xx_nvram = true; + else if ((data = brcmf_fw_nvram_from_efi(&data_len))) + kfree_nvram = true; + else if (!(cur->flags & BRCMF_FW_REQF_OPTIONAL)) goto fail; - raw_nvram = true; } if (data) @@ -489,8 +561,11 @@ static int brcmf_fw_request_nvram_done(const struct firmware *fw, void *ctx) fwctx->req->domain_nr, fwctx->req->bus_nr); - if (raw_nvram) + if (free_bcm47xx_nvram) bcm47xx_nvram_release_contents(data); + if (kfree_nvram) + kfree(data); + release_firmware(fw); if (!nvram && !(cur->flags & BRCMF_FW_REQF_OPTIONAL)) goto fail; @@ -504,90 +579,75 @@ fail: return -ENOENT; } -static int brcmf_fw_request_next_item(struct brcmf_fw *fwctx, bool async) +static int brcmf_fw_complete_request(const struct firmware *fw, + struct brcmf_fw *fwctx) { - struct brcmf_fw_item *cur; - const struct firmware *fw = NULL; - int ret; - - cur = &fwctx->req->items[fwctx->curpos]; - - brcmf_dbg(TRACE, "%srequest for %s\n", async ? "async " : "", - cur->path); - - if (async) - ret = request_firmware_nowait(THIS_MODULE, true, cur->path, - fwctx->dev, GFP_KERNEL, fwctx, - brcmf_fw_request_done); - else - ret = request_firmware(&fw, cur->path, fwctx->dev); - - if (ret < 0) { - brcmf_fw_request_done(NULL, fwctx); - } else if (!async && fw) { - brcmf_dbg(TRACE, "firmware %s %sfound\n", cur->path, - fw ? "" : "not "); - if (cur->type == BRCMF_FW_TYPE_BINARY) - cur->binary = fw; - else if (cur->type == BRCMF_FW_TYPE_NVRAM) - brcmf_fw_request_nvram_done(fw, fwctx); - else - release_firmware(fw); - - return -EAGAIN; - } - return 0; -} - -static void brcmf_fw_request_done(const struct firmware *fw, void *ctx) -{ - struct brcmf_fw *fwctx = ctx; - struct brcmf_fw_item *cur; + struct brcmf_fw_item *cur = &fwctx->req->items[fwctx->curpos]; int ret = 0; - cur = &fwctx->req->items[fwctx->curpos]; - - brcmf_dbg(TRACE, "enter: firmware %s %sfound\n", cur->path, - fw ? "" : "not "); - - if (!fw) - ret = -ENOENT; + brcmf_dbg(TRACE, "firmware %s %sfound\n", cur->path, fw ? "" : "not "); switch (cur->type) { case BRCMF_FW_TYPE_NVRAM: ret = brcmf_fw_request_nvram_done(fw, fwctx); break; case BRCMF_FW_TYPE_BINARY: - cur->binary = fw; + if (fw) + cur->binary = fw; + else + ret = -ENOENT; break; default: /* something fishy here so bail out early */ brcmf_err("unknown fw type: %d\n", cur->type); release_firmware(fw); ret = -EINVAL; - goto fail; } - if (ret < 0 && !(cur->flags & BRCMF_FW_REQF_OPTIONAL)) - goto fail; + return (cur->flags & BRCMF_FW_REQF_OPTIONAL) ? 0 : ret; +} - do { - if (++fwctx->curpos == fwctx->req->n_items) { - ret = 0; - goto done; - } +static int brcmf_fw_request_firmware(const struct firmware **fw, + struct brcmf_fw *fwctx) +{ + struct brcmf_fw_item *cur = &fwctx->req->items[fwctx->curpos]; + int ret; - ret = brcmf_fw_request_next_item(fwctx, false); - } while (ret == -EAGAIN); + /* nvram files are board-specific, first try a board-specific path */ + if (cur->type == BRCMF_FW_TYPE_NVRAM && fwctx->req->board_type) { + char alt_path[BRCMF_FW_NAME_LEN]; - return; + strlcpy(alt_path, cur->path, BRCMF_FW_NAME_LEN); + /* strip .txt at the end */ + alt_path[strlen(alt_path) - 4] = 0; + strlcat(alt_path, ".", BRCMF_FW_NAME_LEN); + strlcat(alt_path, fwctx->req->board_type, BRCMF_FW_NAME_LEN); + strlcat(alt_path, ".txt", BRCMF_FW_NAME_LEN); -fail: - brcmf_dbg(TRACE, "failed err=%d: dev=%s, fw=%s\n", ret, - dev_name(fwctx->dev), cur->path); - brcmf_fw_free_request(fwctx->req); - fwctx->req = NULL; -done: + ret = request_firmware(fw, alt_path, fwctx->dev); + if (ret == 0) + return ret; + } + + return request_firmware(fw, cur->path, fwctx->dev); +} + +static void brcmf_fw_request_done(const struct firmware *fw, void *ctx) +{ + struct brcmf_fw *fwctx = ctx; + int ret; + + ret = brcmf_fw_complete_request(fw, fwctx); + + while (ret == 0 && ++fwctx->curpos < fwctx->req->n_items) { + brcmf_fw_request_firmware(&fw, fwctx); + ret = brcmf_fw_complete_request(fw, ctx); + } + + if (ret) { + brcmf_fw_free_request(fwctx->req); + fwctx->req = NULL; + } fwctx->done(fwctx->dev, ret, fwctx->req); kfree(fwctx); } @@ -611,7 +671,9 @@ int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req, void (*fw_cb)(struct device *dev, int err, struct brcmf_fw_request *req)) { + struct brcmf_fw_item *first = &req->items[0]; struct brcmf_fw *fwctx; + int ret; brcmf_dbg(TRACE, "enter: dev=%s\n", dev_name(dev)); if (!fw_cb) @@ -628,7 +690,12 @@ int brcmf_fw_get_firmwares(struct device *dev, struct brcmf_fw_request *req, fwctx->req = req; fwctx->done = fw_cb; - brcmf_fw_request_next_item(fwctx, true); + ret = request_firmware_nowait(THIS_MODULE, true, first->path, + fwctx->dev, GFP_KERNEL, fwctx, + brcmf_fw_request_done); + if (ret < 0) + brcmf_fw_request_done(NULL, fwctx); + return 0; } @@ -641,8 +708,9 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev, struct brcmf_fw_request *fwreq; char chipname[12]; const char *mp_path; + size_t mp_path_len; u32 i, j; - char end; + char end = '\0'; size_t reqsz; for (i = 0; i < table_size; i++) { @@ -667,7 +735,10 @@ brcmf_fw_alloc_request(u32 chip, u32 chiprev, mapping_table[i].fw_base, chipname); mp_path = brcmf_mp_global.firmware_path; - end = mp_path[strlen(mp_path) - 1]; + mp_path_len = strnlen(mp_path, BRCMF_FW_ALTPATH_LEN); + if (mp_path_len) + end = mp_path[mp_path_len - 1]; + fwreq->n_items = n_fwnames; for (j = 0; j < n_fwnames; j++) { diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h index 2893e56910f0..a0834be8864e 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/firmware.h @@ -70,6 +70,7 @@ struct brcmf_fw_request { u16 domain_nr; u16 bus_nr; u32 n_items; + const char *board_type; struct brcmf_fw_item items[0]; }; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h index 63b1287e2e6d..b6b183b18413 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil.h @@ -80,6 +80,7 @@ #define BRCMF_C_SCB_DEAUTHENTICATE_FOR_REASON 201 #define BRCMF_C_SET_ASSOC_PREFER 205 #define BRCMF_C_GET_VALID_CHANNELS 217 +#define BRCMF_C_SET_FAKEFRAG 219 #define BRCMF_C_GET_KEY_PRIMARY 235 #define BRCMF_C_SET_KEY_PRIMARY 236 #define BRCMF_C_SET_SCAN_PASSIVE_TIME 258 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h index d5bb81e88762..39ac1bbb6cc0 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwil_types.h @@ -176,6 +176,8 @@ #define BRCMF_VHT_CAP_MCS_MAP_NSS_MAX 8 +#define BRCMF_HE_CAP_MCS_MAP_NSS_MAX 8 + /* MAX_CHUNK_LEN is the maximum length for data passing to firmware in each * ioctl. It is relatively small because firmware has small maximum size input * playload restriction for ioctls. @@ -601,13 +603,37 @@ struct brcmf_sta_info_le { __le32 rx_pkts_retried; /* # rx with retry bit set */ __le32 tx_rate_fallback; /* lowest fallback TX rate */ - /* Fields valid for ver >= 5 */ - struct { - __le32 count; /* # rates in this set */ - u8 rates[BRCMF_MAXRATES_IN_SET]; /* rates in 500kbps units w/hi bit set if basic */ - u8 mcs[BRCMF_MCSSET_LEN]; /* supported mcs index bit map */ - __le16 vht_mcs[BRCMF_VHT_CAP_MCS_MAP_NSS_MAX]; /* supported mcs index bit map per nss */ - } rateset_adv; + union { + struct { + struct { + __le32 count; /* # rates in this set */ + u8 rates[BRCMF_MAXRATES_IN_SET]; /* rates in 500kbps units w/hi bit set if basic */ + u8 mcs[BRCMF_MCSSET_LEN]; /* supported mcs index bit map */ + __le16 vht_mcs[BRCMF_VHT_CAP_MCS_MAP_NSS_MAX]; /* supported mcs index bit map per nss */ + } rateset_adv; + } v5; + + struct { + __le32 rx_dur_total; /* total user RX duration (estimated) */ + __le16 chanspec; /** chanspec this sta is on */ + __le16 pad_1; + struct { + __le16 version; /* version */ + __le16 len; /* length */ + __le32 count; /* # rates in this set */ + u8 rates[BRCMF_MAXRATES_IN_SET]; /* rates in 500kbps units w/hi bit set if basic */ + u8 mcs[BRCMF_MCSSET_LEN]; /* supported mcs index bit map */ + __le16 vht_mcs[BRCMF_VHT_CAP_MCS_MAP_NSS_MAX]; /* supported mcs index bit map per nss */ + __le16 he_mcs[BRCMF_HE_CAP_MCS_MAP_NSS_MAX]; /* supported he mcs index bit map per nss */ + } rateset_adv; /* rateset along with mcs index bitmap */ + __le16 wpauth; /* authentication type */ + u8 algo; /* crypto algorithm */ + u8 pad_2; + __le32 tx_rspec; /* Rate of last successful tx frame */ + __le32 rx_rspec; /* Rate of last successful rx frame */ + __le32 wnm_cap; /* wnm capabilities */ + } v7; + }; }; struct brcmf_chanspec_list { diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c index f3cbf78c8899..02759ebd207c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fwsignal.c @@ -511,6 +511,7 @@ struct brcmf_fws_info { struct work_struct fws_dequeue_work; u32 fifo_enqpkt[BRCMF_FWS_FIFO_COUNT]; int fifo_credit[BRCMF_FWS_FIFO_COUNT]; + int init_fifo_credit[BRCMF_FWS_FIFO_COUNT]; int credits_borrowed[BRCMF_FWS_FIFO_AC_VO + 1]; int deq_node_pos[BRCMF_FWS_FIFO_COUNT]; u32 fifo_credit_map; @@ -1237,6 +1238,9 @@ static void brcmf_fws_return_credits(struct brcmf_fws_info *fws, } fws->fifo_credit[fifo] += credits; + if (fws->fifo_credit[fifo] > fws->init_fifo_credit[fifo]) + fws->fifo_credit[fifo] = fws->init_fifo_credit[fifo]; + } static void brcmf_fws_schedule_deq(struct brcmf_fws_info *fws) @@ -1451,9 +1455,10 @@ static int brcmf_fws_txstatus_suppressed(struct brcmf_fws_info *fws, int fifo, static int brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot, - u32 genbit, u16 seq) + u32 genbit, u16 seq, u8 compcnt) { u32 fifo; + u8 cnt = 0; int ret; bool remove_from_hanger = true; struct sk_buff *skb; @@ -1464,60 +1469,71 @@ brcmf_fws_txs_process(struct brcmf_fws_info *fws, u8 flags, u32 hslot, brcmf_dbg(DATA, "flags %d\n", flags); if (flags == BRCMF_FWS_TXSTATUS_DISCARD) - fws->stats.txs_discard++; + fws->stats.txs_discard += compcnt; else if (flags == BRCMF_FWS_TXSTATUS_CORE_SUPPRESS) { - fws->stats.txs_supp_core++; + fws->stats.txs_supp_core += compcnt; remove_from_hanger = false; } else if (flags == BRCMF_FWS_TXSTATUS_FW_PS_SUPPRESS) { - fws->stats.txs_supp_ps++; + fws->stats.txs_supp_ps += compcnt; remove_from_hanger = false; } else if (flags == BRCMF_FWS_TXSTATUS_FW_TOSSED) - fws->stats.txs_tossed++; + fws->stats.txs_tossed += compcnt; else if (flags == BRCMF_FWS_TXSTATUS_HOST_TOSSED) - fws->stats.txs_host_tossed++; + fws->stats.txs_host_tossed += compcnt; else brcmf_err("unexpected txstatus\n"); - ret = brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb, - remove_from_hanger); - if (ret != 0) { - brcmf_err("no packet in hanger slot: hslot=%d\n", hslot); - return ret; - } + while (cnt < compcnt) { + ret = brcmf_fws_hanger_poppkt(&fws->hanger, hslot, &skb, + remove_from_hanger); + if (ret != 0) { + brcmf_err("no packet in hanger slot: hslot=%d\n", + hslot); + goto cont; + } - skcb = brcmf_skbcb(skb); - entry = skcb->mac; - if (WARN_ON(!entry)) { - brcmu_pkt_buf_free_skb(skb); - return -EINVAL; - } - entry->transit_count--; - if (entry->suppressed && entry->suppr_transit_count) - entry->suppr_transit_count--; + skcb = brcmf_skbcb(skb); + entry = skcb->mac; + if (WARN_ON(!entry)) { + brcmu_pkt_buf_free_skb(skb); + goto cont; + } + entry->transit_count--; + if (entry->suppressed && entry->suppr_transit_count) + entry->suppr_transit_count--; - brcmf_dbg(DATA, "%s flags %d htod %X seq %X\n", entry->name, flags, - skcb->htod, seq); + brcmf_dbg(DATA, "%s flags %d htod %X seq %X\n", entry->name, + flags, skcb->htod, seq); - /* pick up the implicit credit from this packet */ - fifo = brcmf_skb_htod_tag_get_field(skb, FIFO); - if ((fws->fcmode == BRCMF_FWS_FCMODE_IMPLIED_CREDIT) || - (brcmf_skb_if_flags_get_field(skb, REQ_CREDIT)) || - (flags == BRCMF_FWS_TXSTATUS_HOST_TOSSED)) { - brcmf_fws_return_credits(fws, fifo, 1); - brcmf_fws_schedule_deq(fws); - } - brcmf_fws_macdesc_return_req_credit(skb); + /* pick up the implicit credit from this packet */ + fifo = brcmf_skb_htod_tag_get_field(skb, FIFO); + if (fws->fcmode == BRCMF_FWS_FCMODE_IMPLIED_CREDIT || + (brcmf_skb_if_flags_get_field(skb, REQ_CREDIT)) || + flags == BRCMF_FWS_TXSTATUS_HOST_TOSSED) { + brcmf_fws_return_credits(fws, fifo, 1); + brcmf_fws_schedule_deq(fws); + } + brcmf_fws_macdesc_return_req_credit(skb); - ret = brcmf_proto_hdrpull(fws->drvr, false, skb, &ifp); - if (ret) { - brcmu_pkt_buf_free_skb(skb); - return -EINVAL; + ret = brcmf_proto_hdrpull(fws->drvr, false, skb, &ifp); + if (ret) { + brcmu_pkt_buf_free_skb(skb); + goto cont; + } + if (!remove_from_hanger) + ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb, + genbit, seq); + if (remove_from_hanger || ret) + brcmf_txfinalize(ifp, skb, true); + +cont: + hslot = (hslot + 1) & (BRCMF_FWS_TXSTAT_HSLOT_MASK >> + BRCMF_FWS_TXSTAT_HSLOT_SHIFT); + if (BRCMF_FWS_MODE_GET_REUSESEQ(fws->mode)) + seq = (seq + 1) & BRCMF_SKB_HTOD_SEQ_NR_MASK; + + cnt++; } - if (!remove_from_hanger) - ret = brcmf_fws_txstatus_suppressed(fws, fifo, skb, - genbit, seq); - if (remove_from_hanger || ret) - brcmf_txfinalize(ifp, skb, true); return 0; } @@ -1543,7 +1559,8 @@ static int brcmf_fws_fifocreditback_indicate(struct brcmf_fws_info *fws, return BRCMF_FWS_RET_OK_SCHEDULE; } -static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 *data) +static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 type, + u8 *data) { __le32 status_le; __le16 seq_le; @@ -1552,23 +1569,31 @@ static int brcmf_fws_txstatus_indicate(struct brcmf_fws_info *fws, u8 *data) u32 genbit; u8 flags; u16 seq; + u8 compcnt; + u8 compcnt_offset = BRCMF_FWS_TYPE_TXSTATUS_LEN; - fws->stats.txs_indicate++; memcpy(&status_le, data, sizeof(status_le)); status = le32_to_cpu(status_le); flags = brcmf_txstatus_get_field(status, FLAGS); hslot = brcmf_txstatus_get_field(status, HSLOT); genbit = brcmf_txstatus_get_field(status, GENERATION); if (BRCMF_FWS_MODE_GET_REUSESEQ(fws->mode)) { - memcpy(&seq_le, &data[BRCMF_FWS_TYPE_PKTTAG_LEN], + memcpy(&seq_le, &data[BRCMF_FWS_TYPE_TXSTATUS_LEN], sizeof(seq_le)); seq = le16_to_cpu(seq_le); + compcnt_offset += BRCMF_FWS_TYPE_SEQ_LEN; } else { seq = 0; } + if (type == BRCMF_FWS_TYPE_COMP_TXSTATUS) + compcnt = data[compcnt_offset]; + else + compcnt = 1; + fws->stats.txs_indicate += compcnt; + brcmf_fws_lock(fws); - brcmf_fws_txs_process(fws, flags, hslot, genbit, seq); + brcmf_fws_txs_process(fws, flags, hslot, genbit, seq, compcnt); brcmf_fws_unlock(fws); return BRCMF_FWS_RET_OK_NOSCHEDULE; } @@ -1595,19 +1620,21 @@ static int brcmf_fws_notify_credit_map(struct brcmf_if *ifp, brcmf_err("event payload too small (%d)\n", e->datalen); return -EINVAL; } - if (fws->creditmap_received) - return 0; fws->creditmap_received = true; brcmf_dbg(TRACE, "enter: credits %pM\n", credits); brcmf_fws_lock(fws); for (i = 0; i < ARRAY_SIZE(fws->fifo_credit); i++) { - if (*credits) + fws->fifo_credit[i] += credits[i] - fws->init_fifo_credit[i]; + fws->init_fifo_credit[i] = credits[i]; + if (fws->fifo_credit[i] > 0) fws->fifo_credit_map |= 1 << i; else fws->fifo_credit_map &= ~(1 << i); - fws->fifo_credit[i] = *credits++; + WARN_ONCE(fws->fifo_credit[i] < 0, + "fifo_credit[%d] is negative(%d)\n", i, + fws->fifo_credit[i]); } brcmf_fws_schedule_deq(fws); brcmf_fws_unlock(fws); @@ -1882,8 +1909,6 @@ void brcmf_fws_hdrpull(struct brcmf_if *ifp, s16 siglen, struct sk_buff *skb) err = BRCMF_FWS_RET_OK_NOSCHEDULE; switch (type) { - case BRCMF_FWS_TYPE_COMP_TXSTATUS: - break; case BRCMF_FWS_TYPE_HOST_REORDER_RXPKTS: rd = (struct brcmf_skb_reorder_data *)skb->cb; rd->reorder = data; @@ -1906,7 +1931,8 @@ void brcmf_fws_hdrpull(struct brcmf_if *ifp, s16 siglen, struct sk_buff *skb) err = brcmf_fws_request_indicate(fws, type, data); break; case BRCMF_FWS_TYPE_TXSTATUS: - brcmf_fws_txstatus_indicate(fws, data); + case BRCMF_FWS_TYPE_COMP_TXSTATUS: + brcmf_fws_txstatus_indicate(fws, type, data); break; case BRCMF_FWS_TYPE_FIFO_CREDITBACK: err = brcmf_fws_fifocreditback_indicate(fws, data); @@ -1995,7 +2021,7 @@ static void brcmf_fws_rollback_toq(struct brcmf_fws_info *fws, fws->stats.rollback_failed++; hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT); brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED, - hslot, 0, 0); + hslot, 0, 0, 1); } else { fws->stats.rollback_success++; brcmf_fws_return_credits(fws, fifo, 1); @@ -2013,7 +2039,7 @@ static int brcmf_fws_borrow_credit(struct brcmf_fws_info *fws) } for (lender_ac = 0; lender_ac <= BRCMF_FWS_FIFO_AC_VO; lender_ac++) { - if (fws->fifo_credit[lender_ac]) { + if (fws->fifo_credit[lender_ac] > 0) { fws->credits_borrowed[lender_ac]++; fws->fifo_credit[lender_ac]--; if (fws->fifo_credit[lender_ac] == 0) @@ -2210,8 +2236,9 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker) } continue; } - while ((fws->fifo_credit[fifo]) || ((!fws->bcmc_credit_check) && - (fifo == BRCMF_FWS_FIFO_BCMC))) { + while ((fws->fifo_credit[fifo] > 0) || + ((!fws->bcmc_credit_check) && + (fifo == BRCMF_FWS_FIFO_BCMC))) { skb = brcmf_fws_deq(fws, fifo); if (!skb) break; @@ -2222,7 +2249,7 @@ static void brcmf_fws_dequeue_worker(struct work_struct *worker) break; } if ((fifo == BRCMF_FWS_FIFO_AC_BE) && - (fws->fifo_credit[fifo] == 0) && + (fws->fifo_credit[fifo] <= 0) && (!fws->bus_flow_blocked)) { while (brcmf_fws_borrow_credit(fws) == 0) { skb = brcmf_fws_deq(fws, fifo); @@ -2455,7 +2482,8 @@ void brcmf_fws_bustxfail(struct brcmf_fws_info *fws, struct sk_buff *skb) } brcmf_fws_lock(fws); hslot = brcmf_skb_htod_tag_get_field(skb, HSLOT); - brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED, hslot, 0, 0); + brcmf_fws_txs_process(fws, BRCMF_FWS_TXSTATUS_HOST_TOSSED, hslot, 0, 0, + 1); brcmf_fws_unlock(fws); } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c index aee6e5937c41..84e3373289eb 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/of.c @@ -27,11 +27,20 @@ void brcmf_of_probe(struct device *dev, enum brcmf_bus_type bus_type, struct brcmf_mp_device *settings) { struct brcmfmac_sdio_pd *sdio = &settings->bus.sdio; - struct device_node *np = dev->of_node; + struct device_node *root, *np = dev->of_node; + struct property *prop; int irq; u32 irqf; u32 val; + /* Set board-type to the first string of the machine compatible prop */ + root = of_find_node_by_path("/"); + if (root) { + prop = of_find_property(root, "compatible", NULL); + settings->board_type = of_prop_next_string(prop, NULL); + of_node_put(root); + } + if (!np || bus_type != BRCMF_BUSTYPE_SDIO || !of_device_is_compatible(np, "brcm,bcm4329-fmac")) return; diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c index 5dea569d63ed..16d7dda965d8 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c @@ -1785,6 +1785,7 @@ brcmf_pcie_prepare_fw_request(struct brcmf_pciedev_info *devinfo) fwreq->items[BRCMF_PCIE_FW_CODE].type = BRCMF_FW_TYPE_BINARY; fwreq->items[BRCMF_PCIE_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM; fwreq->items[BRCMF_PCIE_FW_NVRAM].flags = BRCMF_FW_REQF_OPTIONAL; + fwreq->board_type = devinfo->settings->board_type; /* NVRAM reserves PCI domain 0 for Broadcom's SDK faked bus */ fwreq->domain_nr = pci_domain_nr(devinfo->pdev->bus) + 1; fwreq->bus_nr = devinfo->pdev->bus->number; @@ -2018,6 +2019,7 @@ static const struct dev_pm_ops brcmf_pciedrvr_pm = { static const struct pci_device_id brcmf_pcie_devid_table[] = { BRCMF_PCIE_DEVICE(BRCM_PCIE_4350_DEVICE_ID), BRCMF_PCIE_DEVICE_SUB(0x4355, BRCM_PCIE_VENDOR_ID_BROADCOM, 0x4355), + BRCMF_PCIE_DEVICE(BRCM_PCIE_4354_RAW_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_4356_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_43567_DEVICE_ID), BRCMF_PCIE_DEVICE(BRCM_PCIE_43570_DEVICE_ID), diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index b2e1ab5adb64..0cd5b8d970d7 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -49,6 +49,11 @@ #define DCMD_RESP_TIMEOUT msecs_to_jiffies(2500) #define CTL_DONE_TIMEOUT msecs_to_jiffies(2500) +/* watermark expressed in number of words */ +#define DEFAULT_F2_WATERMARK 0x8 +#define CY_4373_F2_WATERMARK 0x40 +#define CY_43012_F2_WATERMARK 0x60 + #ifdef DEBUG #define BRCMF_TRAP_INFO_SIZE 80 @@ -138,6 +143,8 @@ struct rte_console { /* 1: isolate internal sdio signals, put external pads in tri-state; requires * sdio bus power cycle to clear (rev 9) */ #define SBSDIO_DEVCTL_PADS_ISO 0x08 +/* 1: enable F2 Watermark */ +#define SBSDIO_DEVCTL_F2WM_ENAB 0x10 /* Force SD->SB reset mapping (rev 11) */ #define SBSDIO_DEVCTL_SB_RST_CTL 0x30 /* Determined by CoreControl bit */ @@ -618,6 +625,7 @@ BRCMF_FW_DEF(43455, "brcmfmac43455-sdio"); BRCMF_FW_DEF(4354, "brcmfmac4354-sdio"); BRCMF_FW_DEF(4356, "brcmfmac4356-sdio"); BRCMF_FW_DEF(4373, "brcmfmac4373-sdio"); +BRCMF_FW_DEF(43012, "brcmfmac43012-sdio"); static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = { BRCMF_FW_ENTRY(BRCM_CC_43143_CHIP_ID, 0xFFFFFFFF, 43143), @@ -637,7 +645,8 @@ static const struct brcmf_firmware_mapping brcmf_sdio_fwnames[] = { BRCMF_FW_ENTRY(BRCM_CC_4345_CHIP_ID, 0xFFFFFFC0, 43455), BRCMF_FW_ENTRY(BRCM_CC_4354_CHIP_ID, 0xFFFFFFFF, 4354), BRCMF_FW_ENTRY(BRCM_CC_4356_CHIP_ID, 0xFFFFFFFF, 4356), - BRCMF_FW_ENTRY(CY_CC_4373_CHIP_ID, 0xFFFFFFFF, 4373) + BRCMF_FW_ENTRY(CY_CC_4373_CHIP_ID, 0xFFFFFFFF, 4373), + BRCMF_FW_ENTRY(CY_CC_43012_CHIP_ID, 0xFFFFFFFF, 43012) }; static void pkt_align(struct sk_buff *p, int len, int align) @@ -671,6 +680,14 @@ brcmf_sdio_kso_control(struct brcmf_sdio *bus, bool on) /* 1st KSO write goes to AOS wake up core if device is asleep */ brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_SLEEPCSR, wr_val, &err); + /* In case of 43012 chip, the chip could go down immediately after + * KSO bit is cleared. So the further reads of KSO register could + * fail. Thereby just bailing out immediately after clearing KSO + * bit, to avoid polling of KSO bit. + */ + if (!on && bus->ci->chip == CY_CC_43012_CHIP_ID) + return err; + if (on) { /* device WAKEUP through KSO: * write bit 0 & read back until @@ -2396,6 +2413,14 @@ static int brcmf_sdio_tx_ctrlframe(struct brcmf_sdio *bus, u8 *frame, u16 len) return ret; } +static bool brcmf_chip_is_ulp(struct brcmf_chip *ci) +{ + if (ci->chip == CY_CC_43012_CHIP_ID) + return true; + else + return false; +} + static void brcmf_sdio_bus_stop(struct device *dev) { struct brcmf_bus *bus_if = dev_get_drvdata(dev); @@ -2403,7 +2428,7 @@ static void brcmf_sdio_bus_stop(struct device *dev) struct brcmf_sdio *bus = sdiodev->bus; struct brcmf_core *core = bus->sdio_core; u32 local_hostintmask; - u8 saveclk; + u8 saveclk, bpreq; int err; brcmf_dbg(TRACE, "Enter\n"); @@ -2430,9 +2455,14 @@ static void brcmf_sdio_bus_stop(struct device *dev) /* Force backplane clocks to assure F2 interrupt propagates */ saveclk = brcmf_sdiod_readb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, &err); - if (!err) - brcmf_sdiod_writeb(sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, - (saveclk | SBSDIO_FORCE_HT), &err); + if (!err) { + bpreq = saveclk; + bpreq |= brcmf_chip_is_ulp(bus->ci) ? + SBSDIO_HT_AVAIL_REQ : SBSDIO_FORCE_HT; + brcmf_sdiod_writeb(sdiodev, + SBSDIO_FUNC1_CHIPCLKCSR, + bpreq, &err); + } if (err) brcmf_err("Failed to force clock for F2: err %d\n", err); @@ -3322,20 +3352,49 @@ err: return bcmerror; } +static bool brcmf_sdio_aos_no_decode(struct brcmf_sdio *bus) +{ + if (bus->ci->chip == CY_CC_43012_CHIP_ID || + bus->ci->chip == CY_CC_4373_CHIP_ID || + bus->ci->chip == BRCM_CC_4339_CHIP_ID || + bus->ci->chip == BRCM_CC_4345_CHIP_ID || + bus->ci->chip == BRCM_CC_4354_CHIP_ID) + return true; + else + return false; +} + static void brcmf_sdio_sr_init(struct brcmf_sdio *bus) { int err = 0; u8 val; + u8 wakeupctrl; + u8 cardcap; + u8 chipclkcsr; brcmf_dbg(TRACE, "Enter\n"); + if (brcmf_chip_is_ulp(bus->ci)) { + wakeupctrl = SBSDIO_FUNC1_WCTRL_ALPWAIT_SHIFT; + chipclkcsr = SBSDIO_HT_AVAIL_REQ; + } else { + wakeupctrl = SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT; + chipclkcsr = SBSDIO_FORCE_HT; + } + + if (brcmf_sdio_aos_no_decode(bus)) { + cardcap = SDIO_CCCR_BRCM_CARDCAP_CMD_NODEC; + } else { + cardcap = (SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT | + SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT); + } + val = brcmf_sdiod_readb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, &err); if (err) { brcmf_err("error reading SBSDIO_FUNC1_WAKEUPCTRL\n"); return; } - - val |= 1 << SBSDIO_FUNC1_WCTRL_HTWAIT_SHIFT; + val |= 1 << wakeupctrl; brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_WAKEUPCTRL, val, &err); if (err) { brcmf_err("error writing SBSDIO_FUNC1_WAKEUPCTRL\n"); @@ -3344,8 +3403,7 @@ static void brcmf_sdio_sr_init(struct brcmf_sdio *bus) /* Add CMD14 Support */ brcmf_sdiod_func0_wb(bus->sdiodev, SDIO_CCCR_BRCM_CARDCAP, - (SDIO_CCCR_BRCM_CARDCAP_CMD14_SUPPORT | - SDIO_CCCR_BRCM_CARDCAP_CMD14_EXT), + cardcap, &err); if (err) { brcmf_err("error writing SDIO_CCCR_BRCM_CARDCAP\n"); @@ -3353,7 +3411,7 @@ static void brcmf_sdio_sr_init(struct brcmf_sdio *bus) } brcmf_sdiod_writeb(bus->sdiodev, SBSDIO_FUNC1_CHIPCLKCSR, - SBSDIO_FORCE_HT, &err); + chipclkcsr, &err); if (err) { brcmf_err("error writing SBSDIO_FUNC1_CHIPCLKCSR\n"); return; @@ -4045,7 +4103,8 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err, const struct firmware *code; void *nvram; u32 nvram_len; - u8 saveclk; + u8 saveclk, bpreq; + u8 devctl; brcmf_dbg(TRACE, "Enter: dev=%s, err=%d\n", dev_name(dev), err); @@ -4078,8 +4137,11 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err, /* Force clocks on backplane to be sure F2 interrupt propagates */ saveclk = brcmf_sdiod_readb(sdiod, SBSDIO_FUNC1_CHIPCLKCSR, &err); if (!err) { + bpreq = saveclk; + bpreq |= brcmf_chip_is_ulp(bus->ci) ? + SBSDIO_HT_AVAIL_REQ : SBSDIO_FORCE_HT; brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_CHIPCLKCSR, - (saveclk | SBSDIO_FORCE_HT), &err); + bpreq, &err); } if (err) { brcmf_err("Failed to force clock for F2: err %d\n", err); @@ -4101,8 +4163,37 @@ static void brcmf_sdio_firmware_callback(struct device *dev, int err, brcmf_sdiod_writel(sdiod, core->base + SD_REG(hostintmask), bus->hostintmask, NULL); - - brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK, 8, &err); + switch (sdiod->func1->device) { + case SDIO_DEVICE_ID_CYPRESS_4373: + brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes\n", + CY_4373_F2_WATERMARK); + brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK, + CY_4373_F2_WATERMARK, &err); + devctl = brcmf_sdiod_readb(sdiod, SBSDIO_DEVICE_CTL, + &err); + devctl |= SBSDIO_DEVCTL_F2WM_ENAB; + brcmf_sdiod_writeb(sdiod, SBSDIO_DEVICE_CTL, devctl, + &err); + brcmf_sdiod_writeb(sdiod, SBSDIO_FUNC1_MESBUSYCTRL, + CY_4373_F2_WATERMARK | + SBSDIO_MESBUSYCTRL_ENAB, &err); + break; + case SDIO_DEVICE_ID_CYPRESS_43012: + brcmf_dbg(INFO, "set F2 watermark to 0x%x*4 bytes\n", + CY_43012_F2_WATERMARK); + brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK, + CY_43012_F2_WATERMARK, &err); + devctl = brcmf_sdiod_readb(sdiod, SBSDIO_DEVICE_CTL, + &err); + devctl |= SBSDIO_DEVCTL_F2WM_ENAB; + brcmf_sdiod_writeb(sdiod, SBSDIO_DEVICE_CTL, devctl, + &err); + break; + default: + brcmf_sdiod_writeb(sdiod, SBSDIO_WATERMARK, + DEFAULT_F2_WATERMARK, &err); + break; + } } else { /* Disable F2 again */ sdio_disable_func(sdiod->func2); @@ -4174,6 +4265,7 @@ brcmf_sdio_prepare_fw_request(struct brcmf_sdio *bus) fwreq->items[BRCMF_SDIO_FW_CODE].type = BRCMF_FW_TYPE_BINARY; fwreq->items[BRCMF_SDIO_FW_NVRAM].type = BRCMF_FW_TYPE_NVRAM; + fwreq->board_type = bus->sdiodev->settings->board_type; return fwreq; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h index 7faed831f07d..34b031154da9 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.h @@ -77,7 +77,7 @@ #define SBSDIO_GPIO_OUT 0x10006 /* gpio enable */ #define SBSDIO_GPIO_EN 0x10007 -/* rev < 7, watermark for sdio device */ +/* rev < 7, watermark for sdio device TX path */ #define SBSDIO_WATERMARK 0x10008 /* control busy signal generation */ #define SBSDIO_DEVICE_CTL 0x10009 @@ -104,6 +104,13 @@ #define SBSDIO_FUNC1_RFRAMEBCHI 0x1001C /* MesBusyCtl (rev 11) */ #define SBSDIO_FUNC1_MESBUSYCTRL 0x1001D +/* Watermark for sdio device RX path */ +#define SBSDIO_MESBUSY_RXFIFO_WM_MASK 0x7F +#define SBSDIO_MESBUSY_RXFIFO_WM_SHIFT 0 +/* Enable busy capability for MES access */ +#define SBSDIO_MESBUSYCTRL_ENAB 0x80 +#define SBSDIO_MESBUSYCTRL_ENAB_SHIFT 7 + /* Sdio Core Rev 12 */ #define SBSDIO_FUNC1_WAKEUPCTRL 0x1001E #define SBSDIO_FUNC1_WCTRL_ALPWAIT_MASK 0x1 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c index 81ff558046a8..6188275b17e5 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/mac80211_if.c @@ -846,8 +846,8 @@ brcms_ops_ampdu_action(struct ieee80211_hw *hw, status = brcms_c_aggregatable(wl->wlc, tid); spin_unlock_bh(&wl->lock); if (!status) { - brcms_err(wl->wlc->hw->d11core, - "START: tid %d is not agg\'able\n", tid); + brcms_dbg_ht(wl->wlc->hw->d11core, + "START: tid %d is not agg\'able\n", tid); return -EINVAL; } ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h index 4960f7d26804..e9e8337f386c 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_int.h @@ -220,13 +220,6 @@ enum phy_cal_mode { #define BB_MULT_MASK 0x0000ffff #define BB_MULT_VALID_MASK 0x80000000 -#define CORDIC_AG 39797 -#define CORDIC_NI 18 -#define FIXED(X) ((s32)((X) << 16)) - -#define FLOAT(X) \ - (((X) >= 0) ? ((((X) >> 15) + 1) >> 1) : -((((-(X)) >> 15) + 1) >> 1)) - #define PHY_CHAIN_TX_DISABLE_TEMP 115 #define PHY_HYSTERESIS_DELTATEMP 5 diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c index 9fb0d9fbd939..e78a93a45741 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_lcn.c @@ -3447,8 +3447,8 @@ wlc_lcnphy_start_tx_tone(struct brcms_phy *pi, s32 f_kHz, u16 max_val, theta += rot; - i_samp = (u16) (FLOAT(tone_samp.i * max_val) & 0x3ff); - q_samp = (u16) (FLOAT(tone_samp.q * max_val) & 0x3ff); + i_samp = (u16)(CORDIC_FLOAT(tone_samp.i * max_val) & 0x3ff); + q_samp = (u16)(CORDIC_FLOAT(tone_samp.q * max_val) & 0x3ff); data_buf[t] = (i_samp << 10) | q_samp; } diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c index a57f2711f3c0..f4f5e9044152 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmsmac/phy/phy_n.c @@ -23089,8 +23089,8 @@ wlc_phy_gen_load_samples_nphy(struct brcms_phy *pi, u32 f_kHz, u16 max_val, theta += rot; - tone_buf[t].q = (s32) FLOAT(tone_buf[t].q * max_val); - tone_buf[t].i = (s32) FLOAT(tone_buf[t].i * max_val); + tone_buf[t].q = (s32)CORDIC_FLOAT(tone_buf[t].q * max_val); + tone_buf[t].i = (s32)CORDIC_FLOAT(tone_buf[t].i * max_val); } wlc_phy_loadsampletable_nphy(pi, tone_buf, num_samps); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c index eb5db94f5745..8ac34821f1c1 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmutil/d11.c @@ -128,7 +128,7 @@ static void brcmu_d11n_decchspec(struct brcmu_chan *ch) } break; default: - WARN_ON_ONCE(1); + WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec); break; } @@ -140,7 +140,7 @@ static void brcmu_d11n_decchspec(struct brcmu_chan *ch) ch->band = BRCMU_CHAN_BAND_2G; break; default: - WARN_ON_ONCE(1); + WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec); break; } } @@ -167,7 +167,7 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch) ch->sb = BRCMU_CHAN_SB_U; ch->control_ch_num += CH_10MHZ_APART; } else { - WARN_ON_ONCE(1); + WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec); } break; case BRCMU_CHSPEC_D11AC_BW_80: @@ -188,7 +188,7 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch) ch->control_ch_num += CH_30MHZ_APART; break; default: - WARN_ON_ONCE(1); + WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec); break; } break; @@ -222,13 +222,13 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch) ch->control_ch_num += CH_70MHZ_APART; break; default: - WARN_ON_ONCE(1); + WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec); break; } break; case BRCMU_CHSPEC_D11AC_BW_8080: default: - WARN_ON_ONCE(1); + WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec); break; } @@ -240,7 +240,7 @@ static void brcmu_d11ac_decchspec(struct brcmu_chan *ch) ch->band = BRCMU_CHAN_BAND_2G; break; default: - WARN_ON_ONCE(1); + WARN_ONCE(1, "Invalid chanspec 0x%04x\n", ch->chspec); break; } } diff --git a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h index 686f7a85a045..839980da9643 100644 --- a/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h +++ b/drivers/net/wireless/broadcom/brcm80211/include/brcm_hw_ids.h @@ -60,6 +60,7 @@ #define BRCM_CC_43664_CHIP_ID 43664 #define BRCM_CC_4371_CHIP_ID 0x4371 #define CY_CC_4373_CHIP_ID 0x4373 +#define CY_CC_43012_CHIP_ID 43012 /* USB Device IDs */ #define BRCM_USB_43143_DEVICE_ID 0xbd1e @@ -74,6 +75,7 @@ /* PCIE Device IDs */ #define BRCM_PCIE_4350_DEVICE_ID 0x43a3 #define BRCM_PCIE_4354_DEVICE_ID 0x43df +#define BRCM_PCIE_4354_RAW_DEVICE_ID 0x4354 #define BRCM_PCIE_4356_DEVICE_ID 0x43ec #define BRCM_PCIE_43567_DEVICE_ID 0x43d3 #define BRCM_PCIE_43570_DEVICE_ID 0x43d9 diff --git a/drivers/net/wireless/broadcom/brcm80211/include/chipcommon.h b/drivers/net/wireless/broadcom/brcm80211/include/chipcommon.h index e1fd499930a0..de8225e6248b 100644 --- a/drivers/net/wireless/broadcom/brcm80211/include/chipcommon.h +++ b/drivers/net/wireless/broadcom/brcm80211/include/chipcommon.h @@ -269,6 +269,25 @@ struct chipcregs { /* GSIO (spi/i2c) present, rev >= 37 */ #define CC_CAP2_GSIO 0x00000002 +/* sr_control0, rev >= 48 */ +#define CC_SR_CTL0_ENABLE_MASK BIT(0) +#define CC_SR_CTL0_ENABLE_SHIFT 0 +#define CC_SR_CTL0_EN_SR_ENG_CLK_SHIFT 1 /* sr_clk to sr_memory enable */ +#define CC_SR_CTL0_RSRC_TRIGGER_SHIFT 2 /* Rising edge resource trigger 0 to + * sr_engine + */ +#define CC_SR_CTL0_MIN_DIV_SHIFT 6 /* Min division value for fast clk + * in sr_engine + */ +#define CC_SR_CTL0_EN_SBC_STBY_SHIFT 16 +#define CC_SR_CTL0_EN_SR_ALP_CLK_MASK_SHIFT 18 +#define CC_SR_CTL0_EN_SR_HT_CLK_SHIFT 19 +#define CC_SR_CTL0_ALLOW_PIC_SHIFT 20 /* Allow pic to separate power + * domains + */ +#define CC_SR_CTL0_MAX_SR_LQ_CLK_CNT_SHIFT 25 +#define CC_SR_CTL0_EN_MEM_DISABLE_FOR_SLEEP 30 + /* pmucapabilities */ #define PCAP_REV_MASK 0x000000ff #define PCAP_RC_MASK 0x00001f00 diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c index 04dd7a936593..5512c7f73fce 100644 --- a/drivers/net/wireless/cisco/airo.c +++ b/drivers/net/wireless/cisco/airo.c @@ -5462,7 +5462,7 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) { we have to add a spin lock... */ rc = readBSSListRid(ai, doLoseSync, &BSSList_rid); while(rc == 0 && BSSList_rid.index != cpu_to_le16(0xffff)) { - ptr += sprintf(ptr, "%pM %*s rssi = %d", + ptr += sprintf(ptr, "%pM %.*s rssi = %d", BSSList_rid.bssid, (int)BSSList_rid.ssidLen, BSSList_rid.ssid, diff --git a/drivers/net/wireless/intel/ipw2x00/Kconfig b/drivers/net/wireless/intel/ipw2x00/Kconfig index d6ec44d7a391..562395517e6c 100644 --- a/drivers/net/wireless/intel/ipw2x00/Kconfig +++ b/drivers/net/wireless/intel/ipw2x00/Kconfig @@ -15,9 +15,9 @@ config IPW2100 A driver for the Intel PRO/Wireless 2100 Network Connection 802.11b wireless network adapter. - See <file:Documentation/networking/README.ipw2100> for information on - the capabilities currently enabled in this driver and for tips - for debugging issues and problems. + See <file:Documentation/networking/device_drivers/intel/ipw2100.txt> + for information on the capabilities currently enabled in this driver + and for tips for debugging issues and problems. In order to use this driver, you will need a firmware image for it. You can obtain the firmware from @@ -77,8 +77,8 @@ config IPW2200 A driver for the Intel PRO/Wireless 2200BG and 2915ABG Network Connection adapters. - See <file:Documentation/networking/README.ipw2200> for - information on the capabilities currently enabled in this + See <file:Documentation/networking/device_drivers/intel/ipw2200.txt> + for information on the capabilities currently enabled in this driver and for tips for debugging issues and problems. In order to use this driver, you will need a firmware image for it. diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c index 910db46db6a1..52e5ed2d3bc2 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c @@ -5603,12 +5603,8 @@ static void shim__set_security(struct net_device *dev, if ((sec->flags & SEC_ACTIVE_KEY) && priv->ieee->sec.active_key != sec->active_key) { - if (sec->active_key <= 3) { - priv->ieee->sec.active_key = sec->active_key; - priv->ieee->sec.flags |= SEC_ACTIVE_KEY; - } else - priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY; - + priv->ieee->sec.active_key = sec->active_key; + priv->ieee->sec.flags |= SEC_ACTIVE_KEY; priv->status |= STATUS_SECURITY_UPDATED; } @@ -8370,7 +8366,7 @@ static int ipw2100_mod_firmware_load(struct ipw2100_fw *fw) if (IPW2100_FW_MAJOR(h->version) != IPW2100_FW_MAJOR_VERSION) { printk(KERN_WARNING DRV_NAME ": Firmware image not compatible " "(detected version id of %u). " - "See Documentation/networking/README.ipw2100\n", + "See Documentation/networking/device_drivers/intel/ipw2100.txt\n", h->version); return 1; } diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c index bbdca13c5a9f..fa400f92d7e2 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c @@ -10722,11 +10722,8 @@ static void shim__set_security(struct net_device *dev, } if (sec->flags & SEC_ACTIVE_KEY) { - if (sec->active_key <= 3) { - priv->ieee->sec.active_key = sec->active_key; - priv->ieee->sec.flags |= SEC_ACTIVE_KEY; - } else - priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY; + priv->ieee->sec.active_key = sec->active_key; + priv->ieee->sec.flags |= SEC_ACTIVE_KEY; priv->status |= STATUS_SECURITY_UPDATED; } else priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY; diff --git a/drivers/net/wireless/intel/iwlegacy/3945-rs.c b/drivers/net/wireless/intel/iwlegacy/3945-rs.c index e8983c6a2b7b..a697edd46e7f 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945-rs.c +++ b/drivers/net/wireless/intel/iwlegacy/3945-rs.c @@ -781,7 +781,7 @@ il3945_rs_get_rate(void *il_r, struct ieee80211_sta *sta, void *il_sta, switch (scale_action) { case -1: - /* Decrese rate */ + /* Decrease rate */ if (low != RATE_INVALID) idx = low; break; diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c index 280cd8ae1696..6b4488a178a7 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c @@ -559,7 +559,7 @@ il4965_translate_rx_status(struct il_priv *il, u32 decrypt_in) decrypt_out |= RX_RES_STATUS_BAD_KEY_TTAK; break; } - /* fall through if TTAK OK */ + /* fall through - if TTAK OK */ default: if (!(decrypt_in & RX_MPDU_RES_STATUS_ICV_OK)) decrypt_out |= RX_RES_STATUS_BAD_ICV_MIC; diff --git a/drivers/net/wireless/intel/iwlegacy/common.c b/drivers/net/wireless/intel/iwlegacy/common.c index 6514baf799fe..a2f86cbcc740 100644 --- a/drivers/net/wireless/intel/iwlegacy/common.c +++ b/drivers/net/wireless/intel/iwlegacy/common.c @@ -2695,6 +2695,7 @@ il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr, if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == RX_RES_STATUS_BAD_KEY_TTAK) break; + /* fall through */ case RX_RES_STATUS_SEC_TYPE_WEP: if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == @@ -2704,6 +2705,7 @@ il_set_decrypted_flag(struct il_priv *il, struct ieee80211_hdr *hdr, D_RX("Packet destroyed\n"); return -1; } + /* fall through */ case RX_RES_STATUS_SEC_TYPE_CCMP: if ((decrypt_res & RX_RES_STATUS_DECRYPT_TYPE_MSK) == RX_RES_STATUS_DECRYPT_OK) { diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig index e5a2fc738ac3..491ca3c8b43c 100644 --- a/drivers/net/wireless/intel/iwlwifi/Kconfig +++ b/drivers/net/wireless/intel/iwlwifi/Kconfig @@ -1,6 +1,6 @@ config IWLWIFI tristate "Intel Wireless WiFi Next Gen AGN - Wireless-N/Advanced-N/Ultimate-N (iwlwifi) " - depends on PCI && MAC80211 && HAS_IOMEM + depends on PCI && HAS_IOMEM select FW_LOADER ---help--- Select to build the driver supporting the: @@ -53,6 +53,7 @@ config IWLWIFI_LEDS config IWLDVM tristate "Intel Wireless WiFi DVM Firmware support" + depends on MAC80211 help This is the driver that supports the DVM firmware. The list of the devices that use this firmware is available here: @@ -61,6 +62,7 @@ config IWLDVM config IWLMVM tristate "Intel Wireless WiFi MVM Firmware support" select WANT_DEV_COREDUMP + depends on MAC80211 help This is the driver that supports the MVM firmware. The list of the devices that use this firmware is available here: diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile index 04e376cc898c..ff41987a7e35 100644 --- a/drivers/net/wireless/intel/iwlwifi/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/Makefile @@ -11,6 +11,7 @@ iwlwifi-objs += pcie/ctxt-info.o pcie/ctxt-info-gen3.o iwlwifi-objs += pcie/trans-gen2.o pcie/tx-gen2.o iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o +iwlwifi-objs += iwl-dbg-tlv.o iwlwifi-objs += iwl-trans.o iwlwifi-objs += fw/notif-wait.o iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o fw/dbg.o diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c index 76b5ddb20248..1ff388b593ad 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/1000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/1000.c @@ -48,7 +48,7 @@ static const struct iwl_base_params iwl1000_base_params = { .num_of_queues = IWLAGN_NUM_QUEUES, .max_tfd_queue_size = 256, - .eeprom_size = OTP_LOW_IMAGE_SIZE, + .eeprom_size = OTP_LOW_IMAGE_SIZE_2K, .pll_cfg = true, .max_ll_items = OTP_MAX_LL_ITEMS_1000, .shadow_ram_support = false, diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c index e7e45846dd07..a6ec7ad39dcb 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c @@ -57,7 +57,7 @@ #define IWL135_MODULE_FIRMWARE(api) IWL135_FW_PRE __stringify(api) ".ucode" static const struct iwl_base_params iwl2000_base_params = { - .eeprom_size = OTP_LOW_IMAGE_SIZE, + .eeprom_size = OTP_LOW_IMAGE_SIZE_2K, .num_of_queues = IWLAGN_NUM_QUEUES, .max_tfd_queue_size = 256, .max_ll_items = OTP_MAX_LL_ITEMS_2x00, @@ -71,7 +71,7 @@ static const struct iwl_base_params iwl2000_base_params = { static const struct iwl_base_params iwl2030_base_params = { - .eeprom_size = OTP_LOW_IMAGE_SIZE, + .eeprom_size = OTP_LOW_IMAGE_SIZE_2K, .num_of_queues = IWLAGN_NUM_QUEUES, .max_tfd_queue_size = 256, .max_ll_items = OTP_MAX_LL_ITEMS_2x00, diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index da5d5f9b2573..7e65073834b7 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -56,14 +56,13 @@ #include "iwl-config.h" /* Highest firmware API version supported */ -#define IWL_22000_UCODE_API_MAX 41 +#define IWL_22000_UCODE_API_MAX 43 /* Lowest firmware API version supported */ #define IWL_22000_UCODE_API_MIN 39 /* NVM versions */ #define IWL_22000_NVM_VERSION 0x0a1d -#define IWL_22000_TX_POWER_VERSION 0xffff /* meaningless */ /* Memory offsets and lengths */ #define IWL_22000_DCCM_OFFSET 0x800000 /* LMAC1 */ @@ -106,10 +105,8 @@ #define IWL_QU_B_JF_B_MODULE_FIRMWARE(api) \ IWL_QU_B_JF_B_FW_PRE __stringify(api) ".ucode" -#define NVM_HW_SECTION_NUM_FAMILY_22000 10 - static const struct iwl_base_params iwl_22000_base_params = { - .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_22000, + .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, .num_of_queues = 512, .max_tfd_queue_size = 256, .shadow_ram_support = true, @@ -121,7 +118,7 @@ static const struct iwl_base_params iwl_22000_base_params = { }; static const struct iwl_base_params iwl_22560_base_params = { - .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_22000, + .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, .num_of_queues = 512, .max_tfd_queue_size = 65536, .shadow_ram_support = true, @@ -142,7 +139,7 @@ static const struct iwl_ht_params iwl_22000_ht_params = { .ucode_api_max = IWL_22000_UCODE_API_MAX, \ .ucode_api_min = IWL_22000_UCODE_API_MIN, \ .led_mode = IWL_LED_RF_STATE, \ - .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_22000, \ + .nvm_hw_section_num = 10, \ .non_shared_ant = ANT_B, \ .dccm_offset = IWL_22000_DCCM_OFFSET, \ .dccm_len = IWL_22000_DCCM_LEN, \ @@ -157,7 +154,6 @@ static const struct iwl_ht_params iwl_22000_ht_params = { .mac_addr_from_csr = true, \ .ht_params = &iwl_22000_ht_params, \ .nvm_ver = IWL_22000_NVM_VERSION, \ - .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ .use_tfh = true, \ .rf_id = true, \ @@ -323,7 +319,6 @@ MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_JF_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_HR_B_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_22000_QU_B_HR_B_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c index 30e62a7c9d52..fbb18d066cd0 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c @@ -66,7 +66,7 @@ #define IWL6030_MODULE_FIRMWARE(api) IWL6030_FW_PRE __stringify(api) ".ucode" static const struct iwl_base_params iwl6000_base_params = { - .eeprom_size = OTP_LOW_IMAGE_SIZE, + .eeprom_size = OTP_LOW_IMAGE_SIZE_2K, .num_of_queues = IWLAGN_NUM_QUEUES, .max_tfd_queue_size = 256, .max_ll_items = OTP_MAX_LL_ITEMS_6x00, @@ -79,7 +79,7 @@ static const struct iwl_base_params iwl6000_base_params = { }; static const struct iwl_base_params iwl6050_base_params = { - .eeprom_size = OTP_LOW_IMAGE_SIZE, + .eeprom_size = OTP_LOW_IMAGE_SIZE_2K, .num_of_queues = IWLAGN_NUM_QUEUES, .max_tfd_queue_size = 256, .max_ll_items = OTP_MAX_LL_ITEMS_6x50, @@ -92,7 +92,7 @@ static const struct iwl_base_params iwl6050_base_params = { }; static const struct iwl_base_params iwl6000_g2_base_params = { - .eeprom_size = OTP_LOW_IMAGE_SIZE, + .eeprom_size = OTP_LOW_IMAGE_SIZE_2K, .num_of_queues = IWLAGN_NUM_QUEUES, .max_tfd_queue_size = 256, .max_ll_items = OTP_MAX_LL_ITEMS_6x00, diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c index c973bfaa3414..289e3c398a12 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c @@ -80,17 +80,11 @@ /* NVM versions */ #define IWL7260_NVM_VERSION 0x0a1d -#define IWL7260_TX_POWER_VERSION 0xffff /* meaningless */ #define IWL3160_NVM_VERSION 0x709 -#define IWL3160_TX_POWER_VERSION 0xffff /* meaningless */ #define IWL3165_NVM_VERSION 0x709 -#define IWL3165_TX_POWER_VERSION 0xffff /* meaningless */ #define IWL3168_NVM_VERSION 0xd01 -#define IWL3168_TX_POWER_VERSION 0xffff /* meaningless */ #define IWL7265_NVM_VERSION 0x0a1d -#define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */ #define IWL7265D_NVM_VERSION 0x0c11 -#define IWL7265_TX_POWER_VERSION 0xffff /* meaningless */ /* DCCM offsets and lengths */ #define IWL7000_DCCM_OFFSET 0x800000 @@ -113,10 +107,8 @@ #define IWL7265D_FW_PRE "iwlwifi-7265D-" #define IWL7265D_MODULE_FIRMWARE(api) IWL7265D_FW_PRE __stringify(api) ".ucode" -#define NVM_HW_SECTION_NUM_FAMILY_7000 0 - static const struct iwl_base_params iwl7000_base_params = { - .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_7000, + .eeprom_size = OTP_LOW_IMAGE_SIZE_16K, .num_of_queues = 31, .max_tfd_queue_size = 256, .shadow_ram_support = true, @@ -159,7 +151,7 @@ static const struct iwl_ht_params iwl7000_ht_params = { .device_family = IWL_DEVICE_FAMILY_7000, \ .base_params = &iwl7000_base_params, \ .led_mode = IWL_LED_RF_STATE, \ - .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_7000, \ + .nvm_hw_section_num = 0, \ .non_shared_ant = ANT_A, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ .dccm_offset = IWL7000_DCCM_OFFSET, \ @@ -191,7 +183,6 @@ const struct iwl_cfg iwl7260_2ac_cfg = { IWL_DEVICE_7000, .ht_params = &iwl7000_ht_params, .nvm_ver = IWL7260_NVM_VERSION, - .nvm_calib_ver = IWL7260_TX_POWER_VERSION, .host_interrupt_operation_mode = true, .lp_xtal_workaround = true, .dccm_len = IWL7260_DCCM_LEN, @@ -203,7 +194,6 @@ const struct iwl_cfg iwl7260_2ac_cfg_high_temp = { IWL_DEVICE_7000, .ht_params = &iwl7000_ht_params, .nvm_ver = IWL7260_NVM_VERSION, - .nvm_calib_ver = IWL7260_TX_POWER_VERSION, .high_temp = true, .host_interrupt_operation_mode = true, .lp_xtal_workaround = true, @@ -217,7 +207,6 @@ const struct iwl_cfg iwl7260_2n_cfg = { IWL_DEVICE_7000, .ht_params = &iwl7000_ht_params, .nvm_ver = IWL7260_NVM_VERSION, - .nvm_calib_ver = IWL7260_TX_POWER_VERSION, .host_interrupt_operation_mode = true, .lp_xtal_workaround = true, .dccm_len = IWL7260_DCCM_LEN, @@ -229,7 +218,6 @@ const struct iwl_cfg iwl7260_n_cfg = { IWL_DEVICE_7000, .ht_params = &iwl7000_ht_params, .nvm_ver = IWL7260_NVM_VERSION, - .nvm_calib_ver = IWL7260_TX_POWER_VERSION, .host_interrupt_operation_mode = true, .lp_xtal_workaround = true, .dccm_len = IWL7260_DCCM_LEN, @@ -241,7 +229,6 @@ const struct iwl_cfg iwl3160_2ac_cfg = { IWL_DEVICE_7000, .ht_params = &iwl7000_ht_params, .nvm_ver = IWL3160_NVM_VERSION, - .nvm_calib_ver = IWL3160_TX_POWER_VERSION, .host_interrupt_operation_mode = true, .dccm_len = IWL3160_DCCM_LEN, }; @@ -252,7 +239,6 @@ const struct iwl_cfg iwl3160_2n_cfg = { IWL_DEVICE_7000, .ht_params = &iwl7000_ht_params, .nvm_ver = IWL3160_NVM_VERSION, - .nvm_calib_ver = IWL3160_TX_POWER_VERSION, .host_interrupt_operation_mode = true, .dccm_len = IWL3160_DCCM_LEN, }; @@ -263,7 +249,6 @@ const struct iwl_cfg iwl3160_n_cfg = { IWL_DEVICE_7000, .ht_params = &iwl7000_ht_params, .nvm_ver = IWL3160_NVM_VERSION, - .nvm_calib_ver = IWL3160_TX_POWER_VERSION, .host_interrupt_operation_mode = true, .dccm_len = IWL3160_DCCM_LEN, }; @@ -291,7 +276,6 @@ const struct iwl_cfg iwl3165_2ac_cfg = { IWL_DEVICE_7005D, .ht_params = &iwl7000_ht_params, .nvm_ver = IWL3165_NVM_VERSION, - .nvm_calib_ver = IWL3165_TX_POWER_VERSION, .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, .dccm_len = IWL7265_DCCM_LEN, }; @@ -302,7 +286,6 @@ const struct iwl_cfg iwl3168_2ac_cfg = { IWL_DEVICE_3008, .ht_params = &iwl7000_ht_params, .nvm_ver = IWL3168_NVM_VERSION, - .nvm_calib_ver = IWL3168_TX_POWER_VERSION, .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, .dccm_len = IWL7265_DCCM_LEN, .nvm_type = IWL_NVM_SDP, @@ -314,7 +297,6 @@ const struct iwl_cfg iwl7265_2ac_cfg = { IWL_DEVICE_7005, .ht_params = &iwl7265_ht_params, .nvm_ver = IWL7265_NVM_VERSION, - .nvm_calib_ver = IWL7265_TX_POWER_VERSION, .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, .dccm_len = IWL7265_DCCM_LEN, }; @@ -325,7 +307,6 @@ const struct iwl_cfg iwl7265_2n_cfg = { IWL_DEVICE_7005, .ht_params = &iwl7265_ht_params, .nvm_ver = IWL7265_NVM_VERSION, - .nvm_calib_ver = IWL7265_TX_POWER_VERSION, .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, .dccm_len = IWL7265_DCCM_LEN, }; @@ -336,7 +317,6 @@ const struct iwl_cfg iwl7265_n_cfg = { IWL_DEVICE_7005, .ht_params = &iwl7265_ht_params, .nvm_ver = IWL7265_NVM_VERSION, - .nvm_calib_ver = IWL7265_TX_POWER_VERSION, .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, .dccm_len = IWL7265_DCCM_LEN, }; @@ -347,7 +327,6 @@ const struct iwl_cfg iwl7265d_2ac_cfg = { IWL_DEVICE_7005D, .ht_params = &iwl7265_ht_params, .nvm_ver = IWL7265D_NVM_VERSION, - .nvm_calib_ver = IWL7265_TX_POWER_VERSION, .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, .dccm_len = IWL7265_DCCM_LEN, }; @@ -358,7 +337,6 @@ const struct iwl_cfg iwl7265d_2n_cfg = { IWL_DEVICE_7005D, .ht_params = &iwl7265_ht_params, .nvm_ver = IWL7265D_NVM_VERSION, - .nvm_calib_ver = IWL7265_TX_POWER_VERSION, .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, .dccm_len = IWL7265_DCCM_LEN, }; @@ -369,7 +347,6 @@ const struct iwl_cfg iwl7265d_n_cfg = { IWL_DEVICE_7005D, .ht_params = &iwl7265_ht_params, .nvm_ver = IWL7265D_NVM_VERSION, - .nvm_calib_ver = IWL7265_TX_POWER_VERSION, .pwr_tx_backoffs = iwl7265_pwr_tx_backoffs, .dccm_len = IWL7265_DCCM_LEN, }; diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c index 348c40fcddcb..d7d17c1cceea 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c @@ -75,7 +75,6 @@ /* NVM versions */ #define IWL8000_NVM_VERSION 0x0a1d -#define IWL8000_TX_POWER_VERSION 0xffff /* meaningless */ /* Memory offsets and lengths */ #define IWL8260_DCCM_OFFSET 0x800000 @@ -93,11 +92,10 @@ #define IWL8265_MODULE_FIRMWARE(api) \ IWL8265_FW_PRE __stringify(api) ".ucode" -#define NVM_HW_SECTION_NUM_FAMILY_8000 10 #define DEFAULT_NVM_FILE_FAMILY_8000C "nvmData-8000C" static const struct iwl_base_params iwl8000_base_params = { - .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_8000, + .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, .num_of_queues = 31, .max_tfd_queue_size = 256, .shadow_ram_support = true, @@ -139,7 +137,7 @@ static const struct iwl_tt_params iwl8000_tt_params = { .device_family = IWL_DEVICE_FAMILY_8000, \ .base_params = &iwl8000_base_params, \ .led_mode = IWL_LED_RF_STATE, \ - .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_8000, \ + .nvm_hw_section_num = 10, \ .features = NETIF_F_RXCSUM, \ .non_shared_ant = ANT_A, \ .dccm_offset = IWL8260_DCCM_OFFSET, \ @@ -177,7 +175,6 @@ const struct iwl_cfg iwl8260_2n_cfg = { IWL_DEVICE_8260, .ht_params = &iwl8000_ht_params, .nvm_ver = IWL8000_NVM_VERSION, - .nvm_calib_ver = IWL8000_TX_POWER_VERSION, }; const struct iwl_cfg iwl8260_2ac_cfg = { @@ -186,7 +183,6 @@ const struct iwl_cfg iwl8260_2ac_cfg = { IWL_DEVICE_8260, .ht_params = &iwl8000_ht_params, .nvm_ver = IWL8000_NVM_VERSION, - .nvm_calib_ver = IWL8000_TX_POWER_VERSION, .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, }; @@ -196,7 +192,6 @@ const struct iwl_cfg iwl8265_2ac_cfg = { IWL_DEVICE_8265, .ht_params = &iwl8000_ht_params, .nvm_ver = IWL8000_NVM_VERSION, - .nvm_calib_ver = IWL8000_TX_POWER_VERSION, .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, .vht_mu_mimo_supported = true, }; @@ -207,7 +202,6 @@ const struct iwl_cfg iwl8275_2ac_cfg = { IWL_DEVICE_8265, .ht_params = &iwl8000_ht_params, .nvm_ver = IWL8000_NVM_VERSION, - .nvm_calib_ver = IWL8000_TX_POWER_VERSION, .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, .vht_mu_mimo_supported = true, }; @@ -218,7 +212,6 @@ const struct iwl_cfg iwl4165_2ac_cfg = { IWL_DEVICE_8000, .ht_params = &iwl8000_ht_params, .nvm_ver = IWL8000_NVM_VERSION, - .nvm_calib_ver = IWL8000_TX_POWER_VERSION, .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, }; diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c index d55fd23cafe6..f2114137c13f 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c @@ -57,14 +57,13 @@ #include "fw/file.h" /* Highest firmware API version supported */ -#define IWL9000_UCODE_API_MAX 41 +#define IWL9000_UCODE_API_MAX 43 /* Lowest firmware API version supported */ #define IWL9000_UCODE_API_MIN 30 /* NVM versions */ #define IWL9000_NVM_VERSION 0x0a1d -#define IWL9000_TX_POWER_VERSION 0xffff /* meaningless */ /* Memory offsets and lengths */ #define IWL9000_DCCM_OFFSET 0x800000 @@ -90,10 +89,8 @@ #define IWL9260B_MODULE_FIRMWARE(api) \ IWL9260B_FW_PRE __stringify(api) ".ucode" -#define NVM_HW_SECTION_NUM_FAMILY_9000 10 - static const struct iwl_base_params iwl9000_base_params = { - .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_9000, + .eeprom_size = OTP_LOW_IMAGE_SIZE_32K, .num_of_queues = 31, .max_tfd_queue_size = 256, .shadow_ram_support = true, @@ -137,7 +134,7 @@ static const struct iwl_tt_params iwl9000_tt_params = { .device_family = IWL_DEVICE_FAMILY_9000, \ .base_params = &iwl9000_base_params, \ .led_mode = IWL_LED_RF_STATE, \ - .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_9000, \ + .nvm_hw_section_num = 10, \ .non_shared_ant = ANT_B, \ .dccm_offset = IWL9000_DCCM_OFFSET, \ .dccm_len = IWL9000_DCCM_LEN, \ @@ -157,17 +154,17 @@ static const struct iwl_tt_params iwl9000_tt_params = { .min_umac_error_event_table = 0x800000, \ .csr = &iwl_csr_v1, \ .d3_debug_data_base_addr = 0x401000, \ - .d3_debug_data_length = 92 * 1024 + .d3_debug_data_length = 92 * 1024, \ + .ht_params = &iwl9000_ht_params, \ + .nvm_ver = IWL9000_NVM_VERSION, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K + const struct iwl_cfg iwl9160_2ac_cfg = { .name = "Intel(R) Dual Band Wireless AC 9160", .fw_name_pre = IWL9260A_FW_PRE, .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE, IWL_DEVICE_9000, - .ht_params = &iwl9000_ht_params, - .nvm_ver = IWL9000_NVM_VERSION, - .nvm_calib_ver = IWL9000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, }; const struct iwl_cfg iwl9260_2ac_cfg = { @@ -175,10 +172,6 @@ const struct iwl_cfg iwl9260_2ac_cfg = { .fw_name_pre = IWL9260A_FW_PRE, .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE, IWL_DEVICE_9000, - .ht_params = &iwl9000_ht_params, - .nvm_ver = IWL9000_NVM_VERSION, - .nvm_calib_ver = IWL9000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, }; const struct iwl_cfg iwl9260_killer_2ac_cfg = { @@ -186,10 +179,6 @@ const struct iwl_cfg iwl9260_killer_2ac_cfg = { .fw_name_pre = IWL9260A_FW_PRE, .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE, IWL_DEVICE_9000, - .ht_params = &iwl9000_ht_params, - .nvm_ver = IWL9000_NVM_VERSION, - .nvm_calib_ver = IWL9000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, }; const struct iwl_cfg iwl9270_2ac_cfg = { @@ -197,10 +186,6 @@ const struct iwl_cfg iwl9270_2ac_cfg = { .fw_name_pre = IWL9260A_FW_PRE, .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE, IWL_DEVICE_9000, - .ht_params = &iwl9000_ht_params, - .nvm_ver = IWL9000_NVM_VERSION, - .nvm_calib_ver = IWL9000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, }; const struct iwl_cfg iwl9460_2ac_cfg = { @@ -208,10 +193,6 @@ const struct iwl_cfg iwl9460_2ac_cfg = { .fw_name_pre = IWL9260A_FW_PRE, .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE, IWL_DEVICE_9000, - .ht_params = &iwl9000_ht_params, - .nvm_ver = IWL9000_NVM_VERSION, - .nvm_calib_ver = IWL9000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, }; const struct iwl_cfg iwl9460_2ac_cfg_soc = { @@ -220,10 +201,6 @@ const struct iwl_cfg iwl9460_2ac_cfg_soc = { .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, IWL_DEVICE_9000, - .ht_params = &iwl9000_ht_params, - .nvm_ver = IWL9000_NVM_VERSION, - .nvm_calib_ver = IWL9000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, .integrated = true, .soc_latency = 5000, }; @@ -234,10 +211,6 @@ const struct iwl_cfg iwl9461_2ac_cfg_soc = { .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, IWL_DEVICE_9000, - .ht_params = &iwl9000_ht_params, - .nvm_ver = IWL9000_NVM_VERSION, - .nvm_calib_ver = IWL9000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, .integrated = true, .soc_latency = 5000, }; @@ -248,10 +221,6 @@ const struct iwl_cfg iwl9462_2ac_cfg_soc = { .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, IWL_DEVICE_9000, - .ht_params = &iwl9000_ht_params, - .nvm_ver = IWL9000_NVM_VERSION, - .nvm_calib_ver = IWL9000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, .integrated = true, .soc_latency = 5000, }; @@ -261,10 +230,6 @@ const struct iwl_cfg iwl9560_2ac_cfg = { .fw_name_pre = IWL9260A_FW_PRE, .fw_name_pre_b_or_c_step = IWL9260B_FW_PRE, IWL_DEVICE_9000, - .ht_params = &iwl9000_ht_params, - .nvm_ver = IWL9000_NVM_VERSION, - .nvm_calib_ver = IWL9000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, }; const struct iwl_cfg iwl9560_2ac_cfg_soc = { @@ -273,10 +238,6 @@ const struct iwl_cfg iwl9560_2ac_cfg_soc = { .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, IWL_DEVICE_9000, - .ht_params = &iwl9000_ht_params, - .nvm_ver = IWL9000_NVM_VERSION, - .nvm_calib_ver = IWL9000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, .integrated = true, .soc_latency = 5000, }; @@ -287,10 +248,6 @@ const struct iwl_cfg iwl9560_killer_2ac_cfg_soc = { .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, IWL_DEVICE_9000, - .ht_params = &iwl9000_ht_params, - .nvm_ver = IWL9000_NVM_VERSION, - .nvm_calib_ver = IWL9000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, .integrated = true, .soc_latency = 5000, }; @@ -301,10 +258,6 @@ const struct iwl_cfg iwl9560_killer_s_2ac_cfg_soc = { .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, IWL_DEVICE_9000, - .ht_params = &iwl9000_ht_params, - .nvm_ver = IWL9000_NVM_VERSION, - .nvm_calib_ver = IWL9000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, .integrated = true, .soc_latency = 5000, }; @@ -315,10 +268,6 @@ const struct iwl_cfg iwl9460_2ac_cfg_shared_clk = { .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, IWL_DEVICE_9000, - .ht_params = &iwl9000_ht_params, - .nvm_ver = IWL9000_NVM_VERSION, - .nvm_calib_ver = IWL9000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, .integrated = true, .soc_latency = 5000, .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK @@ -330,10 +279,6 @@ const struct iwl_cfg iwl9461_2ac_cfg_shared_clk = { .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, IWL_DEVICE_9000, - .ht_params = &iwl9000_ht_params, - .nvm_ver = IWL9000_NVM_VERSION, - .nvm_calib_ver = IWL9000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, .integrated = true, .soc_latency = 5000, .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK @@ -345,10 +290,6 @@ const struct iwl_cfg iwl9462_2ac_cfg_shared_clk = { .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, IWL_DEVICE_9000, - .ht_params = &iwl9000_ht_params, - .nvm_ver = IWL9000_NVM_VERSION, - .nvm_calib_ver = IWL9000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, .integrated = true, .soc_latency = 5000, .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK @@ -360,10 +301,6 @@ const struct iwl_cfg iwl9560_2ac_cfg_shared_clk = { .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, IWL_DEVICE_9000, - .ht_params = &iwl9000_ht_params, - .nvm_ver = IWL9000_NVM_VERSION, - .nvm_calib_ver = IWL9000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, .integrated = true, .soc_latency = 5000, .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK @@ -375,10 +312,6 @@ const struct iwl_cfg iwl9560_killer_2ac_cfg_shared_clk = { .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, IWL_DEVICE_9000, - .ht_params = &iwl9000_ht_params, - .nvm_ver = IWL9000_NVM_VERSION, - .nvm_calib_ver = IWL9000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, .integrated = true, .soc_latency = 5000, .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK @@ -390,10 +323,6 @@ const struct iwl_cfg iwl9560_killer_s_2ac_cfg_shared_clk = { .fw_name_pre_b_or_c_step = IWL9000B_FW_PRE, .fw_name_pre_rf_next_step = IWL9000RFB_FW_PRE, IWL_DEVICE_9000, - .ht_params = &iwl9000_ht_params, - .nvm_ver = IWL9000_NVM_VERSION, - .nvm_calib_ver = IWL9000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, .integrated = true, .soc_latency = 5000, .extra_phy_cfg_flags = FW_PHY_CFG_SHARED_CLK diff --git a/drivers/net/wireless/intel/iwlwifi/dvm/main.c b/drivers/net/wireless/intel/iwlwifi/dvm/main.c index 1088ff036e13..c219bca5cff4 100644 --- a/drivers/net/wireless/intel/iwlwifi/dvm/main.c +++ b/drivers/net/wireless/intel/iwlwifi/dvm/main.c @@ -1224,6 +1224,23 @@ static int iwl_eeprom_init_hw_params(struct iwl_priv *priv) return 0; } +static int iwl_nvm_check_version(struct iwl_nvm_data *data, + struct iwl_trans *trans) +{ + if (data->nvm_version >= trans->cfg->nvm_ver || + data->calib_version >= trans->cfg->nvm_calib_ver) { + IWL_DEBUG_INFO(trans, "device EEPROM VER=0x%x, CALIB=0x%x\n", + data->nvm_version, data->calib_version); + return 0; + } + + IWL_ERR(trans, + "Unsupported (too old) EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n", + data->nvm_version, trans->cfg->nvm_ver, + data->calib_version, trans->cfg->nvm_calib_ver); + return -EINVAL; +} + static struct iwl_op_mode *iwl_op_mode_dvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, const struct iwl_fw *fw, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/config.h b/drivers/net/wireless/intel/iwlwifi/fw/api/config.h index 7f645b62804e..5e88fa2e6fb7 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/config.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/config.h @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -127,17 +129,6 @@ struct iwl_phy_cfg_cmd { struct iwl_calib_ctrl calib_control; } __packed; -#define PHY_CFG_RADIO_TYPE (BIT(0) | BIT(1)) -#define PHY_CFG_RADIO_STEP (BIT(2) | BIT(3)) -#define PHY_CFG_RADIO_DASH (BIT(4) | BIT(5)) -#define PHY_CFG_PRODUCT_NUMBER (BIT(6) | BIT(7)) -#define PHY_CFG_TX_CHAIN_A BIT(8) -#define PHY_CFG_TX_CHAIN_B BIT(9) -#define PHY_CFG_TX_CHAIN_C BIT(10) -#define PHY_CFG_RX_CHAIN_A BIT(12) -#define PHY_CFG_RX_CHAIN_B BIT(13) -#define PHY_CFG_RX_CHAIN_C BIT(14) - /* * enum iwl_dc2dc_config_id - flag ids * diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h index eff3249af48a..fdc54a5dc9de 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h @@ -105,6 +105,11 @@ enum iwl_data_path_subcmd_ids { HE_AIR_SNIFFER_CONFIG_CMD = 0x13, /** + * @RX_NO_DATA_NOTIF: &struct iwl_rx_no_data + */ + RX_NO_DATA_NOTIF = 0xF5, + + /** * @TLC_MNG_UPDATE_NOTIF: &struct iwl_tlc_update_notif */ TLC_MNG_UPDATE_NOTIF = 0xF7, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h new file mode 100644 index 000000000000..ab82b7a67967 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/dbg-tlv.h @@ -0,0 +1,401 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (C) 2018 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless <linuxwifi@intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright (C) 2018 Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_fw_dbg_tlv_h__ +#define __iwl_fw_dbg_tlv_h__ + +#include <linux/bitops.h> + +/* + * struct iwl_fw_ini_header: Common Header for all debug group TLV's structures + * @tlv_version: version info + * @apply_point: &enum iwl_fw_ini_apply_point + * @data: TLV data followed + **/ +struct iwl_fw_ini_header { + __le32 tlv_version; + __le32 apply_point; + u8 data[]; +} __packed; /* FW_INI_HEADER_TLV_S */ + +/** + * struct iwl_fw_ini_allocation_tlv - (IWL_FW_INI_TLV_TYPE_BUFFER_ALLOCATION) + * buffer allocation TLV - for debug + * + * @iwl_fw_ini_header: header + * @allocation_id: &enum iwl_fw_ini_allocation_id - to bind allocation and hcmd + * if needed (DBGC1/DBGC2/SDFX/...) + * @buffer_location: type of iwl_fw_ini_buffer_location + * @size: size in bytes + * @max_fragments: the maximum allowed fragmentation in the desired memory + * allocation above + * @min_frag_size: the minimum allowed fragmentation size in bytes +*/ +struct iwl_fw_ini_allocation_tlv { + struct iwl_fw_ini_header header; + __le32 allocation_id; + __le32 buffer_location; + __le32 size; + __le32 max_fragments; + __le32 min_frag_size; +} __packed; /* FW_INI_BUFFER_ALLOCATION_TLV_S_VER_1 */ + +/** + * struct iwl_fw_ini_hcmd (IWL_FW_INI_TLV_TYPE_HCMD) + * Generic Host command pass through TLV + * + * @id: the debug configuration command type for instance: 0xf6 / 0xf5 / DHC + * @group: the desired cmd group + * @padding: all zeros for dword alignment + * @data: all of the relevant command (0xf6/0xf5) to be sent +*/ +struct iwl_fw_ini_hcmd { + u8 id; + u8 group; + __le16 padding; + u8 data[0]; +} __packed; /* FW_INI_HCMD_S */ + +/** + * struct iwl_fw_ini_hcmd_tlv + * @header: header + * @hcmd: a variable length host-command to be sent to apply the configuration. + */ +struct iwl_fw_ini_hcmd_tlv { + struct iwl_fw_ini_header header; + struct iwl_fw_ini_hcmd hcmd; +} __packed; /* FW_INI_HCMD_TLV_S_VER_1 */ + +/* + * struct iwl_fw_ini_debug_flow_tlv (IWL_FW_INI_TLV_TYPE_DEBUG_FLOW) + * + * @header: header + * @debug_flow_cfg: &enum iwl_fw_ini_debug_flow + */ +struct iwl_fw_ini_debug_flow_tlv { + struct iwl_fw_ini_header header; + __le32 debug_flow_cfg; +} __packed; /* FW_INI_DEBUG_FLOW_TLV_S_VER_1 */ + +#define IWL_FW_INI_MAX_REGION_ID 20 +#define IWL_FW_INI_MAX_NAME 32 +/** + * struct iwl_fw_ini_region_cfg + * @region_id: ID of this dump configuration + * @region_type: &enum iwl_fw_ini_region_type + * @num_regions: amount of regions in the address array. + * @allocation_id: For DRAM type field substitutes for allocation_id. + * @name_len: name length + * @name: file name to use for this region + * @size: size of the data, in bytes.(unused for IWL_FW_INI_REGION_DRAM_BUFFER) + * @start_addr: array of addresses. (unused for IWL_FW_INI_REGION_DRAM_BUFFER) + */ +struct iwl_fw_ini_region_cfg { + __le32 region_id; + __le32 region_type; + __le32 name_len; + u8 name[IWL_FW_INI_MAX_NAME]; + union { + __le32 num_regions; + __le32 allocation_id; + }; + __le32 size; + __le32 start_addr[]; +} __packed; /* FW_INI_REGION_CONFIG_S */ + +/** + * struct iwl_fw_ini_region_tlv - (IWL_FW_INI_TLV_TYPE_REGION_CFG) + * DUMP sections define IDs and triggers that use those IDs TLV + * @header: header + * @num_regions: how many different region section and IDs are coming next + * @iwl_fw_ini_dump dump_config: list of dump configurations + */ +struct iwl_fw_ini_region_tlv { + struct iwl_fw_ini_header header; + __le32 num_regions; + struct iwl_fw_ini_region_cfg region_config[]; +} __packed; /* FW_INI_REGION_CFG_S */ + +/** + * struct iwl_fw_ini_trigger - (IWL_FW_INI_TLV_TYPE_DUMP_CFG) + * Region sections define IDs and triggers that use those IDs TLV + * + * @trigger_id: enum &iwl_fw_ini_tigger_id + * @ignore_default: override FW TLV with binary TLV + * @dump_delay: delay from trigger fire to dump, in usec + * @occurrences: max amount of times to be fired + * @ignore_consec: ignore consecutive triggers, in usec + * @force_restart: force FW restart + * @multi_dut: initiate debug dump data on several DUTs + * @trigger_data: generic data to be utilized per trigger + * @num_regions: number of dump regions defined for this trigger + * @data: region IDs + */ +struct iwl_fw_ini_trigger { + __le32 trigger_id; + __le32 ignore_default; + __le32 dump_delay; + __le32 occurrences; + __le32 ignore_consec; + __le32 force_restart; + __le32 multi_dut; + __le32 trigger_data; + __le32 num_regions; + __le32 data[]; +} __packed; /* FW_INI_TRIGGER_CONFIG_S */ + +/** + * struct iwl_fw_ini_trigger_tlv - (IWL_FW_INI_TLV_TYPE_TRIGGERS_CFG) + * DUMP sections define IDs and triggers that use those IDs TLV + * + * @header: header + * @num_triggers: how many different triggers section and IDs are coming next + * @trigger_config: list of trigger configurations + */ +struct iwl_fw_ini_trigger_tlv { + struct iwl_fw_ini_header header; + __le32 num_triggers; + struct iwl_fw_ini_trigger trigger_config[]; +} __packed; /* FW_INI_TRIGGER_CFG_S */ + +/** + * enum iwl_fw_ini_trigger_id + * @IWL_FW_TRIGGER_ID_FW_ASSERT: FW assert + * @IWL_FW_TRIGGER_ID_FW_TFD_Q_HANG: TFD queue hang + * @IWL_FW_TRIGGER_ID_FW_HW_ERROR: HW assert + * @IWL_FW_TRIGGER_ID_FW_TRIGGER_ERROR: FW error notification + * @IWL_FW_TRIGGER_ID_FW_TRIGGER_WARNING: FW warning notification + * @IWL_FW_TRIGGER_ID_FW_TRIGGER_INFO: FW info notification + * @IWL_FW_TRIGGER_ID_FW_TRIGGER_DEBUG: FW debug notification + * @IWL_FW_TRIGGER_ID_USER_TRIGGER: User trigger + * @IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_INACTIVITY: peer inactivity + * @FW_DEBUG_TLV_TRIGGER_ID_HOST_DID_INITIATED_EVENT: undefined + * @IWL_FW_TRIGGER_ID_HOST_TX_LATENCY_THRESHOLD_CROSSED: TX latency + * threshold was crossed + * @IWL_FW_TRIGGER_ID_HOST_TX_RESPONSE_STATUS_FAILED: TX failed + * @IWL_FW_TRIGGER_ID_HOST_OS_REQ_DEAUTH_PEER: Deauth initiated by host + * @IWL_FW_TRIGGER_ID_HOST_STOP_GO_REQUEST: stop GO request + * @IWL_FW_TRIGGER_ID_HOST_START_GO_REQUEST: start GO request + * @IWL_FW_TRIGGER_ID_HOST_JOIN_GROUP_REQUEST: join P2P group request + * @IWL_FW_TRIGGER_ID_HOST_SCAN_START: scan started event + * @IWL_FW_TRIGGER_ID_HOST_SCAN_SUBMITTED: undefined + * @IWL_FW_TRIGGER_ID_HOST_SCAN_PARAMS: undefined + * @IWL_FW_TRIGGER_ID_HOST_CHECK_FOR_HANG: undefined + * @IWL_FW_TRIGGER_ID_HOST_BAR_RECEIVED: BAR frame was received + * @IWL_FW_TRIGGER_ID_HOST_AGG_TX_RESPONSE_STATUS_FAILED: agg TX failed + * @IWL_FW_TRIGGER_ID_HOST_EAPOL_TX_RESPONSE_FAILED: EAPOL TX failed + * @IWL_FW_TRIGGER_ID_HOST_FAKE_TX_RESPONSE_SUSPECTED: suspicious TX response + * @IWL_FW_TRIGGER_ID_HOST_AUTH_REQ_FROM_ASSOC_CLIENT: received suspicious auth + * @IWL_FW_TRIGGER_ID_HOST_ROAM_COMPLETE: roaming was completed + * @IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAST_FAILED: fast assoc failed + * @IWL_FW_TRIGGER_ID_HOST_D3_START: D3 start + * @IWL_FW_TRIGGER_ID_HOST_D3_END: D3 end + * @IWL_FW_TRIGGER_ID_HOST_BSS_MISSED_BEACONS: missed beacon events + * @IWL_FW_TRIGGER_ID_HOST_P2P_CLIENT_MISSED_BEACONS: P2P missed beacon events + * @IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_TX_FAILURES: undefined + * @IWL_FW_TRIGGER_ID_HOST_TX_WFD_ACTION_FRAME_FAILED: undefined + * @IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAILED: authentication / association + * failed + * @IWL_FW_TRIGGER_ID_HOST_SCAN_COMPLETE: scan complete event + * @IWL_FW_TRIGGER_ID_HOST_SCAN_ABORT: scan abort complete + * @IWL_FW_TRIGGER_ID_HOST_NIC_ALIVE: nic alive message was received + * @IWL_FW_TRIGGER_ID_HOST_CHANNEL_SWITCH_COMPLETE: CSA was completed + * @IWL_FW_TRIGGER_ID_NUM: number of trigger IDs + */ +enum iwl_fw_ini_trigger_id { + /* Errors triggers */ + IWL_FW_TRIGGER_ID_FW_ASSERT = 1, + IWL_FW_TRIGGER_ID_FW_TFD_Q_HANG = 2, + IWL_FW_TRIGGER_ID_FW_HW_ERROR = 3, + /* Generic triggers */ + IWL_FW_TRIGGER_ID_FW_TRIGGER_ERROR = 4, + IWL_FW_TRIGGER_ID_FW_TRIGGER_WARNING = 5, + IWL_FW_TRIGGER_ID_FW_TRIGGER_INFO = 6, + IWL_FW_TRIGGER_ID_FW_TRIGGER_DEBUG = 7, + /* User Trigger */ + IWL_FW_TRIGGER_ID_USER_TRIGGER = 8, + /* Host triggers */ + IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_INACTIVITY = 9, + IWL_FW_TRIGGER_ID_HOST_DID_INITIATED_EVENT = 10, + IWL_FW_TRIGGER_ID_HOST_TX_LATENCY_THRESHOLD_CROSSED = 11, + IWL_FW_TRIGGER_ID_HOST_TX_RESPONSE_STATUS_FAILED = 12, + IWL_FW_TRIGGER_ID_HOST_OS_REQ_DEAUTH_PEER = 13, + IWL_FW_TRIGGER_ID_HOST_STOP_GO_REQUEST = 14, + IWL_FW_TRIGGER_ID_HOST_START_GO_REQUEST = 15, + IWL_FW_TRIGGER_ID_HOST_JOIN_GROUP_REQUEST = 16, + IWL_FW_TRIGGER_ID_HOST_SCAN_START = 17, + IWL_FW_TRIGGER_ID_HOST_SCAN_SUBITTED = 18, + IWL_FW_TRIGGER_ID_HOST_SCAN_PARAMS = 19, + IWL_FW_TRIGGER_ID_HOST_CHECK_FOR_HANG = 20, + IWL_FW_TRIGGER_ID_HOST_BAR_RECEIVED = 21, + IWL_FW_TRIGGER_ID_HOST_AGG_TX_RESPONSE_STATUS_FAILED = 22, + IWL_FW_TRIGGER_ID_HOST_EAPOL_TX_RESPONSE_FAILED = 23, + IWL_FW_TRIGGER_ID_HOST_FAKE_TX_RESPONSE_SUSPECTED = 24, + IWL_FW_TRIGGER_ID_HOST_AUTH_REQ_FROM_ASSOC_CLIENT = 25, + IWL_FW_TRIGGER_ID_HOST_ROAM_COMPLETE = 26, + IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAST_FAILED = 27, + IWL_FW_TRIGGER_ID_HOST_D3_START = 28, + IWL_FW_TRIGGER_ID_HOST_D3_END = 29, + IWL_FW_TRIGGER_ID_HOST_BSS_MISSED_BEACONS = 30, + IWL_FW_TRIGGER_ID_HOST_P2P_CLIENT_MISSED_BEACONS = 31, + IWL_FW_TRIGGER_ID_HOST_PEER_CLIENT_TX_FAILURES = 32, + IWL_FW_TRIGGER_ID_HOST_TX_WFD_ACTION_FRAME_FAILED = 33, + IWL_FW_TRIGGER_ID_HOST_AUTH_ASSOC_FAILED = 34, + IWL_FW_TRIGGER_ID_HOST_SCAN_COMPLETE = 35, + IWL_FW_TRIGGER_ID_HOST_SCAN_ABORT = 36, + IWL_FW_TRIGGER_ID_HOST_NIC_ALIVE = 37, + IWL_FW_TRIGGER_ID_HOST_CHANNEL_SWITCH_COMPLETE = 38, + IWL_FW_TRIGGER_ID_NUM, +}; /* FW_INI_TRIGGER_ID_E_VER_1 */ + +/** + * enum iwl_fw_ini_apply_point + * @IWL_FW_INI_APPLY_INVALID: invalid + * @IWL_FW_INI_APPLY_EARLY: pre loading FW + * @IWL_FW_INI_APPLY_AFTER_ALIVE: first cmd from host after alive + * @IWL_FW_INI_APPLY_POST_INIT: last cmd in initialization sequence + * @IWL_FW_INI_APPLY_MISSED_BEACONS: missed beacons notification + * @IWL_FW_INI_APPLY_SCAN_COMPLETE: scan completed + * @IWL_FW_INI_APPLY_NUM: number of apply points +*/ +enum iwl_fw_ini_apply_point { + IWL_FW_INI_APPLY_INVALID, + IWL_FW_INI_APPLY_EARLY, + IWL_FW_INI_APPLY_AFTER_ALIVE, + IWL_FW_INI_APPLY_POST_INIT, + IWL_FW_INI_APPLY_MISSED_BEACONS, + IWL_FW_INI_APPLY_SCAN_COMPLETE, + IWL_FW_INI_APPLY_NUM, +}; /* FW_INI_APPLY_POINT_E_VER_1 */ + +/** + * enum iwl_fw_ini_allocation_id + * @IWL_FW_INI_ALLOCATION_INVALID: invalid + * @IWL_FW_INI_ALLOCATION_ID_DBGC1: allocation meant for DBGC1 configuration + * @IWL_FW_INI_ALLOCATION_ID_DBGC2: allocation meant for DBGC2 configuration + * @IWL_FW_INI_ALLOCATION_ID_DBGC3: allocation meant for DBGC3 configuration + * @IWL_FW_INI_ALLOCATION_ID_SDFX: for SDFX module + * @IWL_FW_INI_ALLOCATION_ID_FW_DUMP: used for crash and runtime dumps + * @IWL_FW_INI_ALLOCATION_ID_USER_DEFINED: for future user scenarios +*/ +enum iwl_fw_ini_allocation_id { + IWL_FW_INI_ALLOCATION_INVALID, + IWL_FW_INI_ALLOCATION_ID_DBGC1, + IWL_FW_INI_ALLOCATION_ID_DBGC2, + IWL_FW_INI_ALLOCATION_ID_DBGC3, + IWL_FW_INI_ALLOCATION_ID_SDFX, + IWL_FW_INI_ALLOCATION_ID_FW_DUMP, + IWL_FW_INI_ALLOCATION_ID_USER_DEFINED, +}; /* FW_INI_ALLOCATION_ID_E_VER_1 */ + +/** + * enum iwl_fw_ini_buffer_location + * @IWL_FW_INI_LOCATION_INVALID: invalid + * @IWL_FW_INI_LOCATION_SRAM_PATH: SRAM location + * @IWL_FW_INI_LOCATION_DRAM_PATH: DRAM location + */ +enum iwl_fw_ini_buffer_location { + IWL_FW_INI_LOCATION_SRAM_INVALID, + IWL_FW_INI_LOCATION_SRAM_PATH, + IWL_FW_INI_LOCATION_DRAM_PATH, +}; /* FW_INI_BUFFER_LOCATION_E_VER_1 */ + +/** + * enum iwl_fw_ini_debug_flow + * @IWL_FW_INI_DEBUG_INVALID: invalid + * @IWL_FW_INI_DEBUG_DBTR_FLOW: undefined + * @IWL_FW_INI_DEBUG_TB2DTF_FLOW: undefined + */ +enum iwl_fw_ini_debug_flow { + IWL_FW_INI_DEBUG_INVALID, + IWL_FW_INI_DEBUG_DBTR_FLOW, + IWL_FW_INI_DEBUG_TB2DTF_FLOW, +}; /* FW_INI_DEBUG_FLOW_E_VER_1 */ + +/** + * enum iwl_fw_ini_region_type + * @IWL_FW_INI_REGION_INVALID: invalid + * @IWL_FW_INI_REGION_DEVICE_MEMORY: device internal memory + * @IWL_FW_INI_REGION_PERIPHERY_MAC: periphery registers of MAC + * @IWL_FW_INI_REGION_PERIPHERY_PHY: periphery registers of PHY + * @IWL_FW_INI_REGION_PERIPHERY_AUX: periphery registers of AUX + * @IWL_FW_INI_REGION_DRAM_BUFFER: DRAM buffer + * @IWL_FW_INI_REGION_DRAM_IMR: IMR memory + * @IWL_FW_INI_REGION_INTERNAL_BUFFER: undefined + * @IWL_FW_INI_REGION_TXF: TX fifos + * @IWL_FW_INI_REGION_RXF: RX fifo + * @IWL_FW_INI_REGION_PAGING: paging memory + * @IWL_FW_INI_REGION_CSR: CSR registers + * @IWL_FW_INI_REGION_NUM: number of region types + */ +enum iwl_fw_ini_region_type { + IWL_FW_INI_REGION_INVALID, + IWL_FW_INI_REGION_DEVICE_MEMORY, + IWL_FW_INI_REGION_PERIPHERY_MAC, + IWL_FW_INI_REGION_PERIPHERY_PHY, + IWL_FW_INI_REGION_PERIPHERY_AUX, + IWL_FW_INI_REGION_DRAM_BUFFER, + IWL_FW_INI_REGION_DRAM_IMR, + IWL_FW_INI_REGION_INTERNAL_BUFFER, + IWL_FW_INI_REGION_TXF, + IWL_FW_INI_REGION_RXF, + IWL_FW_INI_REGION_PAGING, + IWL_FW_INI_REGION_CSR, + IWL_FW_INI_REGION_NUM +}; /* FW_INI_REGION_TYPE_E_VER_1*/ + +#endif diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h index 1dd23f846fb9..7a3f7b7e6358 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h @@ -151,9 +151,9 @@ enum iwl_tsf_id { * @beacon_time: beacon transmit time in system time * @beacon_tsf: beacon transmit time in TSF * @bi: beacon interval in TU - * @bi_reciprocal: 2^32 / bi + * @reserved1: reserved * @dtim_interval: dtim transmit time in TU - * @dtim_reciprocal: 2^32 / dtim_interval + * @reserved2: reserved * @mcast_qid: queue ID for multicast traffic. * NOTE: obsolete from VER2 and on * @beacon_template: beacon template ID @@ -162,9 +162,9 @@ struct iwl_mac_data_ap { __le32 beacon_time; __le64 beacon_tsf; __le32 bi; - __le32 bi_reciprocal; + __le32 reserved1; __le32 dtim_interval; - __le32 dtim_reciprocal; + __le32 reserved2; __le32 mcast_qid; __le32 beacon_template; } __packed; /* AP_MAC_DATA_API_S_VER_2 */ @@ -174,26 +174,34 @@ struct iwl_mac_data_ap { * @beacon_time: beacon transmit time in system time * @beacon_tsf: beacon transmit time in TSF * @bi: beacon interval in TU - * @bi_reciprocal: 2^32 / bi + * @reserved: reserved * @beacon_template: beacon template ID */ struct iwl_mac_data_ibss { __le32 beacon_time; __le64 beacon_tsf; __le32 bi; - __le32 bi_reciprocal; + __le32 reserved; __le32 beacon_template; } __packed; /* IBSS_MAC_DATA_API_S_VER_1 */ /** + * enum iwl_mac_data_policy - policy of the data path for this MAC + * @TWT_SUPPORTED: twt is supported + */ +enum iwl_mac_data_policy { + TWT_SUPPORTED = BIT(0), +}; + +/** * struct iwl_mac_data_sta - configuration data for station MAC context * @is_assoc: 1 for associated state, 0 otherwise * @dtim_time: DTIM arrival time in system time * @dtim_tsf: DTIM arrival time in TSF * @bi: beacon interval in TU, applicable only when associated - * @bi_reciprocal: 2^32 / bi , applicable only when associated + * @reserved1: reserved * @dtim_interval: DTIM interval in TU, applicable only when associated - * @dtim_reciprocal: 2^32 / dtim_interval , applicable only when associated + * @data_policy: see &enum iwl_mac_data_policy * @listen_interval: in beacon intervals, applicable only when associated * @assoc_id: unique ID assigned by the AP during association * @assoc_beacon_arrive_time: TSF of first beacon after association @@ -203,13 +211,13 @@ struct iwl_mac_data_sta { __le32 dtim_time; __le64 dtim_tsf; __le32 bi; - __le32 bi_reciprocal; + __le32 reserved1; __le32 dtim_interval; - __le32 dtim_reciprocal; + __le32 data_policy; __le32 listen_interval; __le32 assoc_id; __le32 assoc_beacon_arrive_time; -} __packed; /* STA_MAC_DATA_API_S_VER_1 */ +} __packed; /* STA_MAC_DATA_API_S_VER_2 */ /** * struct iwl_mac_data_go - configuration data for P2P GO MAC context @@ -233,7 +241,7 @@ struct iwl_mac_data_go { struct iwl_mac_data_p2p_sta { struct iwl_mac_data_sta sta; __le32 ctwin; -} __packed; /* P2P_STA_MAC_DATA_API_S_VER_1 */ +} __packed; /* P2P_STA_MAC_DATA_API_S_VER_2 */ /** * struct iwl_mac_data_pibss - Pseudo IBSS config data @@ -378,13 +386,6 @@ struct iwl_mac_ctx_cmd { }; } __packed; /* MAC_CONTEXT_CMD_API_S_VER_1 */ -static inline u32 iwl_mvm_reciprocal(u32 v) -{ - if (!v) - return 0; - return 0xFFFFFFFF / v; -} - #define IWL_NONQOS_SEQ_GET 0x1 #define IWL_NONQOS_SEQ_SET 0x2 struct iwl_nonqos_seq_query_cmd { @@ -442,7 +443,7 @@ struct iwl_he_backoff_conf { * Support for Nss x BW (or RU) matrix: * (0=SISO, 1=MIMO2) x (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz) * Each entry contains 2 QAM thresholds for 8us and 16us: - * 0=BPSK, 1=QPSK, 2=16QAM, 3=64QAM, 4=256QAM, 5=1024QAM, 6/7=RES + * 0=BPSK, 1=QPSK, 2=16QAM, 3=64QAM, 4=256QAM, 5=1024QAM, 6=RES, 7=NONE * i.e. QAM_th1 < QAM_th2 such if TX uses QAM_tx: * QAM_tx < QAM_th1 --> PPE=0us * QAM_th1 <= QAM_tx < QAM_th2 --> PPE=8us diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h index 0537496b6eb1..0791a854fc8f 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h @@ -345,66 +345,98 @@ enum iwl_rx_mpdu_mac_info { IWL_RX_MPDU_PHY_PHY_INDEX_MASK = 0xf0, }; -/* - * enum iwl_rx_he_phy - HE PHY data - */ -enum iwl_rx_he_phy { - IWL_RX_HE_PHY_BEAM_CHNG = BIT(0), - IWL_RX_HE_PHY_UPLINK = BIT(1), - IWL_RX_HE_PHY_BSS_COLOR_MASK = 0xfc, - IWL_RX_HE_PHY_SPATIAL_REUSE_MASK = 0xf00, - IWL_RX_HE_PHY_SU_EXT_BW10 = BIT(12), - IWL_RX_HE_PHY_TXOP_DUR_MASK = 0xfe000, - IWL_RX_HE_PHY_LDPC_EXT_SYM = BIT(20), - IWL_RX_HE_PHY_PRE_FEC_PAD_MASK = 0x600000, - IWL_RX_HE_PHY_PE_DISAMBIG = BIT(23), - IWL_RX_HE_PHY_DOPPLER = BIT(24), +/* TSF overload low dword */ +enum iwl_rx_phy_data0 { + /* info type: HE any */ + IWL_RX_PHY_DATA0_HE_BEAM_CHNG = 0x00000001, + IWL_RX_PHY_DATA0_HE_UPLINK = 0x00000002, + IWL_RX_PHY_DATA0_HE_BSS_COLOR_MASK = 0x000000fc, + IWL_RX_PHY_DATA0_HE_SPATIAL_REUSE_MASK = 0x00000f00, + /* 1 bit reserved */ + IWL_RX_PHY_DATA0_HE_TXOP_DUR_MASK = 0x000fe000, + IWL_RX_PHY_DATA0_HE_LDPC_EXT_SYM = 0x00100000, + IWL_RX_PHY_DATA0_HE_PRE_FEC_PAD_MASK = 0x00600000, + IWL_RX_PHY_DATA0_HE_PE_DISAMBIG = 0x00800000, + IWL_RX_PHY_DATA0_HE_DOPPLER = 0x01000000, /* 6 bits reserved */ - IWL_RX_HE_PHY_DELIM_EOF = BIT(31), + IWL_RX_PHY_DATA0_HE_DELIM_EOF = 0x80000000, +}; + +enum iwl_rx_phy_info_type { + IWL_RX_PHY_INFO_TYPE_NONE = 0, + IWL_RX_PHY_INFO_TYPE_CCK = 1, + IWL_RX_PHY_INFO_TYPE_OFDM_LGCY = 2, + IWL_RX_PHY_INFO_TYPE_HT = 3, + IWL_RX_PHY_INFO_TYPE_VHT_SU = 4, + IWL_RX_PHY_INFO_TYPE_VHT_MU = 5, + IWL_RX_PHY_INFO_TYPE_HE_SU = 6, + IWL_RX_PHY_INFO_TYPE_HE_MU = 7, + IWL_RX_PHY_INFO_TYPE_HE_TB = 8, + IWL_RX_PHY_INFO_TYPE_HE_MU_EXT = 9, + IWL_RX_PHY_INFO_TYPE_HE_TB_EXT = 10, +}; - /* second dword - common data */ - IWL_RX_HE_PHY_HE_LTF_NUM_MASK = 0xe000000000ULL, - IWL_RX_HE_PHY_RU_ALLOC_SEC80 = BIT_ULL(32 + 8), +/* TSF overload high dword */ +enum iwl_rx_phy_data1 { + /* + * check this first - if TSF overload is set, + * see &enum iwl_rx_phy_info_type + */ + IWL_RX_PHY_DATA1_INFO_TYPE_MASK = 0xf0000000, + + /* info type: HT/VHT/HE any */ + IWL_RX_PHY_DATA1_LSIG_LEN_MASK = 0x0fff0000, + + /* info type: HE MU/MU-EXT */ + IWL_RX_PHY_DATA1_HE_MU_SIGB_COMPRESSION = 0x00000001, + IWL_RX_PHY_DATA1_HE_MU_SIBG_SYM_OR_USER_NUM_MASK = 0x0000001e, + + /* info type: HE any */ + IWL_RX_PHY_DATA1_HE_LTF_NUM_MASK = 0x000000e0, + IWL_RX_PHY_DATA1_HE_RU_ALLOC_SEC80 = 0x00000100, /* trigger encoded */ - IWL_RX_HE_PHY_RU_ALLOC_MASK = 0xfe0000000000ULL, - IWL_RX_HE_PHY_INFO_TYPE_MASK = 0xf000000000000000ULL, - IWL_RX_HE_PHY_INFO_TYPE_SU = 0x0, /* TSF low valid (first DW) */ - IWL_RX_HE_PHY_INFO_TYPE_MU = 0x1, /* TSF low/high valid (both DWs) */ - IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO = 0x2, /* same + SIGB-common0/1/2 valid */ - IWL_RX_HE_PHY_INFO_TYPE_TB = 0x3, /* TSF low/high valid (both DWs) */ - - /* second dword - MU data */ - IWL_RX_HE_PHY_MU_SIGB_COMPRESSION = BIT_ULL(32 + 0), - IWL_RX_HE_PHY_MU_SIBG_SYM_OR_USER_NUM_MASK = 0x1e00000000ULL, - IWL_RX_HE_PHY_MU_SIGB_MCS_MASK = 0xf000000000000ULL, - IWL_RX_HE_PHY_MU_SIGB_DCM = BIT_ULL(32 + 21), - IWL_RX_HE_PHY_MU_PREAMBLE_PUNC_TYPE_MASK = 0xc0000000000000ULL, - - /* second dword - TB data */ - IWL_RX_HE_PHY_TB_PILOT_TYPE = BIT_ULL(32 + 0), - IWL_RX_HE_PHY_TB_LOW_SS_MASK = 0xe00000000ULL + IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK = 0x0000fe00, + + /* info type: HE TB/TX-EXT */ + IWL_RX_PHY_DATA1_HE_TB_PILOT_TYPE = 0x00000001, + IWL_RX_PHY_DATA1_HE_TB_LOW_SS_MASK = 0x0000000e, }; -enum iwl_rx_he_sigb_common0 { +/* goes into Metadata DW 7 */ +enum iwl_rx_phy_data2 { + /* info type: HE MU-EXT */ /* the a1/a2/... is what the PHY/firmware calls the values */ - IWL_RX_HE_SIGB_COMMON0_CH1_RU0 = 0x000000ff, /* a1 */ - IWL_RX_HE_SIGB_COMMON0_CH1_RU2 = 0x0000ff00, /* a2 */ - IWL_RX_HE_SIGB_COMMON0_CH2_RU0 = 0x00ff0000, /* b1 */ - IWL_RX_HE_SIGB_COMMON0_CH2_RU2 = 0xff000000, /* b2 */ + IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU0 = 0x000000ff, /* a1 */ + IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU2 = 0x0000ff00, /* a2 */ + IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU0 = 0x00ff0000, /* b1 */ + IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU2 = 0xff000000, /* b2 */ + + /* info type: HE TB-EXT */ + IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE1 = 0x0000000f, + IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE2 = 0x000000f0, + IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE3 = 0x00000f00, + IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE4 = 0x0000f000, }; -enum iwl_rx_he_sigb_common1 { - IWL_RX_HE_SIGB_COMMON1_CH1_RU1 = 0x000000ff, /* c1 */ - IWL_RX_HE_SIGB_COMMON1_CH1_RU3 = 0x0000ff00, /* c2 */ - IWL_RX_HE_SIGB_COMMON1_CH2_RU1 = 0x00ff0000, /* d1 */ - IWL_RX_HE_SIGB_COMMON1_CH2_RU3 = 0xff000000, /* d2 */ +/* goes into Metadata DW 8 */ +enum iwl_rx_phy_data3 { + /* info type: HE MU-EXT */ + IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU1 = 0x000000ff, /* c1 */ + IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU3 = 0x0000ff00, /* c2 */ + IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU1 = 0x00ff0000, /* d1 */ + IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU3 = 0xff000000, /* d2 */ }; -enum iwl_rx_he_sigb_common2 { - IWL_RX_HE_SIGB_COMMON2_CH1_CTR_RU = 0x0001, - IWL_RX_HE_SIGB_COMMON2_CH2_CTR_RU = 0x0002, - IWL_RX_HE_SIGB_COMMON2_CH1_CRC_OK = 0x0004, - IWL_RX_HE_SIGB_COMMON2_CH2_CRC_OK = 0x0008, +/* goes into Metadata DW 4 high 16 bits */ +enum iwl_rx_phy_data4 { + /* info type: HE MU-EXT */ + IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CTR_RU = 0x0001, + IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CTR_RU = 0x0002, + IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CRC_OK = 0x0004, + IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CRC_OK = 0x0008, + IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_MCS_MASK = 0x00f0, + IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_DCM = 0x0100, + IWL_RX_PHY_DATA4_HE_MU_EXT_PREAMBLE_PUNC_TYPE_MASK = 0x0600, }; /** @@ -419,9 +451,9 @@ struct iwl_rx_mpdu_desc_v1 { __le32 rss_hash; /** - * @sigb_common0: for HE sniffer, HE-SIG-B common part 0 + * @phy_data2: depends on info type (see @phy_data1) */ - __le32 sigb_common0; + __le32 phy_data2; }; /* DW8 - carries filter_match only when rpa_en == 1 */ @@ -432,9 +464,9 @@ struct iwl_rx_mpdu_desc_v1 { __le32 filter_match; /** - * @sigb_common1: for HE sniffer, HE-SIG-B common part 1 + * @phy_data3: depends on info type (see @phy_data1) */ - __le32 sigb_common1; + __le32 phy_data3; }; /* DW9 */ @@ -472,12 +504,19 @@ struct iwl_rx_mpdu_desc_v1 { * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set */ __le64 tsf_on_air_rise; - /** - * @he_phy_data: - * HE PHY data, see &enum iwl_rx_he_phy, valid - * only if %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set - */ - __le64 he_phy_data; + + struct { + /** + * @phy_data0: depends on info_type, see @phy_data1 + */ + __le32 phy_data0; + /** + * @phy_data1: valid only if + * %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set, + * see &enum iwl_rx_phy_data1. + */ + __le32 phy_data1; + }; }; } __packed; @@ -493,9 +532,9 @@ struct iwl_rx_mpdu_desc_v3 { __le32 filter_match; /** - * @sigb_common0: for HE sniffer, HE-SIG-B common part 0 + * @phy_data2: depends on info type (see @phy_data1) */ - __le32 sigb_common0; + __le32 phy_data2; }; /* DW8 - carries rss_hash only when rpa_en == 1 */ @@ -506,9 +545,9 @@ struct iwl_rx_mpdu_desc_v3 { __le32 rss_hash; /** - * @sigb_common1: for HE sniffer, HE-SIG-B common part 1 + * @phy_data3: depends on info type (see @phy_data1) */ - __le32 sigb_common1; + __le32 phy_data3; }; /* DW9 */ /** @@ -556,12 +595,19 @@ struct iwl_rx_mpdu_desc_v3 { * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set */ __le64 tsf_on_air_rise; - /** - * @he_phy_data: - * HE PHY data, see &enum iwl_rx_he_phy, valid - * only if %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set - */ - __le64 he_phy_data; + + struct { + /** + * @phy_data0: depends on info_type, see @phy_data1 + */ + __le32 phy_data0; + /** + * @phy_data1: valid only if + * %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set, + * see &enum iwl_rx_phy_data1. + */ + __le32 phy_data1; + }; }; /* DW16 & DW17 */ /** @@ -613,9 +659,9 @@ struct iwl_rx_mpdu_desc { __le16 l3l4_flags; /** - * @sigb_common2: for HE sniffer, HE-SIG-B common part 2 + * @phy_data4: depends on info type, see phy_data1 */ - __le16 sigb_common2; + __le16 phy_data4; }; /* DW5 */ /** @@ -651,6 +697,55 @@ struct iwl_rx_mpdu_desc { #define IWL_CD_STTS_WIFI_STATUS_POS 4 #define IWL_CD_STTS_WIFI_STATUS_MSK 0xF0 +#define RX_NO_DATA_CHAIN_A_POS 0 +#define RX_NO_DATA_CHAIN_A_MSK (0xff << RX_NO_DATA_CHAIN_A_POS) +#define RX_NO_DATA_CHAIN_B_POS 8 +#define RX_NO_DATA_CHAIN_B_MSK (0xff << RX_NO_DATA_CHAIN_B_POS) +#define RX_NO_DATA_CHANNEL_POS 16 +#define RX_NO_DATA_CHANNEL_MSK (0xff << RX_NO_DATA_CHANNEL_POS) + +#define RX_NO_DATA_INFO_TYPE_POS 0 +#define RX_NO_DATA_INFO_TYPE_MSK (0xff << RX_NO_DATA_INFO_TYPE_POS) +#define RX_NO_DATA_INFO_TYPE_NONE 0 +#define RX_NO_DATA_INFO_TYPE_RX_ERR 1 +#define RX_NO_DATA_INFO_TYPE_NDP 2 +#define RX_NO_DATA_INFO_TYPE_MU_UNMATCHED 3 +#define RX_NO_DATA_INFO_TYPE_HE_TB_UNMATCHED 4 + +#define RX_NO_DATA_INFO_ERR_POS 8 +#define RX_NO_DATA_INFO_ERR_MSK (0xff << RX_NO_DATA_INFO_ERR_POS) +#define RX_NO_DATA_INFO_ERR_NONE 0 +#define RX_NO_DATA_INFO_ERR_BAD_PLCP 1 +#define RX_NO_DATA_INFO_ERR_UNSUPPORTED_RATE 2 +#define RX_NO_DATA_INFO_ERR_NO_DELIM 3 +#define RX_NO_DATA_INFO_ERR_BAD_MAC_HDR 4 + +#define RX_NO_DATA_FRAME_TIME_POS 0 +#define RX_NO_DATA_FRAME_TIME_MSK (0xfffff << RX_NO_DATA_FRAME_TIME_POS) + +/** + * struct iwl_rx_no_data - RX no data descriptor + * @info: 7:0 frame type, 15:8 RX error type + * @rssi: 7:0 energy chain-A, + * 15:8 chain-B, measured at FINA time (FINA_ENERGY), 16:23 channel + * @on_air_rise_time: GP2 during on air rise + * @fr_time: frame time + * @rate: rate/mcs of frame + * @phy_info: &enum iwl_rx_phy_data0 and &enum iwl_rx_phy_info_type + * @rx_vec: DW-12:9 raw RX vectors from DSP according to modulation type. + * for VHT: OFDM_RX_VECTOR_SIGA1_OUT, OFDM_RX_VECTOR_SIGA2_OUT + * for HE: OFDM_RX_VECTOR_HE_SIGA1_OUT, OFDM_RX_VECTOR_HE_SIGA2_OUT + */ +struct iwl_rx_no_data { + __le32 info; + __le32 rssi; + __le32 on_air_rise_time; + __le32 fr_time; + __le32 rate; + __le32 phy_info[2]; + __le32 rx_vec[3]; +} __packed; /* RX_NO_DATA_NTFY_API_S_VER_1 */ + /** * enum iwl_completion_desc_transfer_status - transfer status (bits 1-3) * @IWL_CD_STTS_UNUSED: unused diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index c16757051f16..2a19b178c5e8 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -225,22 +225,18 @@ static void iwl_fwrt_dump_txf(struct iwl_fw_runtime *fwrt, *dump_data = iwl_fw_error_next_data(*dump_data); } -static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt, - struct iwl_fw_error_dump_data **dump_data) +static void iwl_fw_dump_rxf(struct iwl_fw_runtime *fwrt, + struct iwl_fw_error_dump_data **dump_data) { - struct iwl_fw_error_dump_fifo *fifo_hdr; struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg; - u32 *fifo_data; - u32 fifo_len; unsigned long flags; - int i, j; - IWL_DEBUG_INFO(fwrt, "WRT FIFO dump\n"); + IWL_DEBUG_INFO(fwrt, "WRT RX FIFO dump\n"); if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) return; - if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) { + if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RXF)) { /* Pull RXF1 */ iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[0].rxfifo1_size, 0, 0); @@ -254,7 +250,25 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt, LMAC2_PRPH_OFFSET, 2); } - if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) { + iwl_trans_release_nic_access(fwrt->trans, &flags); +} + +static void iwl_fw_dump_txf(struct iwl_fw_runtime *fwrt, + struct iwl_fw_error_dump_data **dump_data) +{ + struct iwl_fw_error_dump_fifo *fifo_hdr; + struct iwl_fwrt_shared_mem_cfg *cfg = &fwrt->smem_cfg; + u32 *fifo_data; + u32 fifo_len; + unsigned long flags; + int i, j; + + IWL_DEBUG_INFO(fwrt, "WRT TX FIFO dump\n"); + + if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) + return; + + if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_TXF)) { /* Pull TXF data from LMAC1 */ for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) { /* Mark the number of TXF we're pulling now */ @@ -279,7 +293,7 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt, } } - if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF) && + if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_INTERNAL_TXF) && fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { /* Pull UMAC internal TXF data from all TXFs */ @@ -591,20 +605,42 @@ static void iwl_fw_dump_mem(struct iwl_fw_runtime *fwrt, IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n", dump_mem->type); } +static void iwl_fw_dump_named_mem(struct iwl_fw_runtime *fwrt, + struct iwl_fw_error_dump_data **dump_data, + u32 len, u32 ofs, u8 *name, u8 name_len) +{ + struct iwl_fw_error_dump_named_mem *dump_mem; + + if (!len) + return; + + (*dump_data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); + (*dump_data)->len = cpu_to_le32(len + sizeof(*dump_mem)); + dump_mem = (void *)(*dump_data)->data; + dump_mem->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_NAMED_MEM); + dump_mem->offset = cpu_to_le32(ofs); + dump_mem->name_len = name_len; + memcpy(dump_mem->name, name, name_len); + iwl_trans_read_mem_bytes(fwrt->trans, ofs, dump_mem->data, len); + *dump_data = iwl_fw_error_next_data(*dump_data); + + IWL_DEBUG_INFO(fwrt, "WRT memory dump. Type=%u\n", dump_mem->type); +} + #define ADD_LEN(len, item_len, const_len) \ do {size_t item = item_len; len += (!!item) * const_len + item; } \ while (0) -static int iwl_fw_fifo_len(struct iwl_fw_runtime *fwrt, - struct iwl_fwrt_shared_mem_cfg *mem_cfg) +static int iwl_fw_rxf_len(struct iwl_fw_runtime *fwrt, + struct iwl_fwrt_shared_mem_cfg *mem_cfg) { size_t hdr_len = sizeof(struct iwl_fw_error_dump_data) + sizeof(struct iwl_fw_error_dump_fifo); u32 fifo_len = 0; int i; - if (!(fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF))) - goto dump_txf; + if (!iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RXF)) + return 0; /* Count RXF2 size */ ADD_LEN(fifo_len, mem_cfg->rxfifo2_size, hdr_len); @@ -613,8 +649,18 @@ static int iwl_fw_fifo_len(struct iwl_fw_runtime *fwrt, for (i = 0; i < mem_cfg->num_lmacs; i++) ADD_LEN(fifo_len, mem_cfg->lmac[i].rxfifo1_size, hdr_len); -dump_txf: - if (!(fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF))) + return fifo_len; +} + +static int iwl_fw_txf_len(struct iwl_fw_runtime *fwrt, + struct iwl_fwrt_shared_mem_cfg *mem_cfg) +{ + size_t hdr_len = sizeof(struct iwl_fw_error_dump_data) + + sizeof(struct iwl_fw_error_dump_fifo); + u32 fifo_len = 0; + int i; + + if (!iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_TXF)) goto dump_internal_txf; /* Count TXF sizes */ @@ -627,7 +673,7 @@ dump_txf: } dump_internal_txf: - if (!((fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF)) && + if (!(iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_INTERNAL_TXF) && fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))) goto out; @@ -639,6 +685,32 @@ out: return fifo_len; } +static void iwl_dump_paging(struct iwl_fw_runtime *fwrt, + struct iwl_fw_error_dump_data **data) +{ + int i; + + IWL_DEBUG_INFO(fwrt, "WRT paging dump\n"); + for (i = 1; i < fwrt->num_of_paging_blk + 1; i++) { + struct iwl_fw_error_dump_paging *paging; + struct page *pages = + fwrt->fw_paging_db[i].fw_paging_block; + dma_addr_t addr = fwrt->fw_paging_db[i].fw_paging_phys; + + (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING); + (*data)->len = cpu_to_le32(sizeof(*paging) + + PAGING_BLOCK_SIZE); + paging = (void *)(*data)->data; + paging->index = cpu_to_le32(i); + dma_sync_single_for_cpu(fwrt->trans->dev, addr, + PAGING_BLOCK_SIZE, + DMA_BIDIRECTIONAL); + memcpy(paging->data, page_address(pages), + PAGING_BLOCK_SIZE); + (*data) = iwl_fw_error_next_data(*data); + } +} + static struct iwl_fw_error_dump_file * _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt, struct iwl_fw_dump_ptrs *fw_error_dump) @@ -655,13 +727,8 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt, u32 smem_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->cfg->smem_len; u32 sram2_len = fwrt->fw->dbg.n_mem_tlv ? 0 : fwrt->trans->cfg->dccm2_len; - bool monitor_dump_only = false; int i; - if (fwrt->dump.trig && - fwrt->dump.trig->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY) - monitor_dump_only = true; - /* SRAM - include stack CCM if driver knows the values for it */ if (!fwrt->trans->cfg->dccm_offset || !fwrt->trans->cfg->dccm_len) { const struct fw_img *img; @@ -676,26 +743,27 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt, /* reading RXF/TXF sizes */ if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) { - fifo_len = iwl_fw_fifo_len(fwrt, mem_cfg); + fifo_len = iwl_fw_rxf_len(fwrt, mem_cfg); + fifo_len += iwl_fw_txf_len(fwrt, mem_cfg); /* Make room for PRPH registers */ if (!fwrt->trans->cfg->gen2 && - fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH)) + iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PRPH)) prph_len += iwl_fw_get_prph_len(fwrt); if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 && - fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_RADIO_REG)) + iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_RADIO_REG)) radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ; } file_len = sizeof(*dump_file) + fifo_len + prph_len + radio_len; - if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO)) + if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_DEV_FW_INFO)) file_len += sizeof(*dump_data) + sizeof(*dump_info); - if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG)) + if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM_CFG)) file_len += sizeof(*dump_data) + sizeof(*dump_smem_cfg); - if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) { + if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM)) { size_t hdr_len = sizeof(*dump_data) + sizeof(struct iwl_fw_error_dump_mem); @@ -712,10 +780,7 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt, } /* Make room for fw's virtual image pages, if it exists */ - if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) && - !fwrt->trans->cfg->gen2 && - fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size && - fwrt->fw_paging_db[0].fw_paging_block) + if (iwl_fw_dbg_is_paging_enabled(fwrt)) file_len += fwrt->num_of_paging_blk * (sizeof(*dump_data) + sizeof(struct iwl_fw_error_dump_paging) + @@ -727,12 +792,12 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt, } /* If we only want a monitor dump, reset the file length */ - if (monitor_dump_only) { + if (fwrt->dump.monitor_only) { file_len = sizeof(*dump_file) + sizeof(*dump_data) * 2 + sizeof(*dump_info) + sizeof(*dump_smem_cfg); } - if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) && + if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) && fwrt->dump.desc) file_len += sizeof(*dump_data) + sizeof(*dump_trig) + fwrt->dump.desc->len; @@ -746,7 +811,7 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt, dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER); dump_data = (void *)dump_file->data; - if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO)) { + if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_DEV_FW_INFO)) { dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO); dump_data->len = cpu_to_le32(sizeof(*dump_info)); dump_info = (void *)dump_data->data; @@ -763,11 +828,12 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt, sizeof(dump_info->dev_human_readable) - 1); strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name, sizeof(dump_info->bus_human_readable) - 1); + dump_info->rt_status = cpu_to_le32(fwrt->dump.rt_status); dump_data = iwl_fw_error_next_data(dump_data); } - if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG)) { + if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM_CFG)) { /* Dump shared memory configuration */ dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG); dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg)); @@ -799,12 +865,13 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt, /* We only dump the FIFOs if the FW is in error state */ if (fifo_len) { - iwl_fw_dump_fifos(fwrt, &dump_data); + iwl_fw_dump_rxf(fwrt, &dump_data); + iwl_fw_dump_txf(fwrt, &dump_data); if (radio_len) iwl_read_radio_regs(fwrt, &dump_data); } - if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) && + if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_ERROR_INFO) && fwrt->dump.desc) { dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO); dump_data->len = cpu_to_le32(sizeof(*dump_trig) + @@ -817,10 +884,10 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt, } /* In case we only want monitor dump, skip to dump trasport data */ - if (monitor_dump_only) + if (fwrt->dump.monitor_only) goto out; - if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) { + if (iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_MEM)) { const struct iwl_fw_dbg_mem_seg_tlv *fw_dbg_mem = fwrt->fw->dbg.mem_tlv; @@ -865,30 +932,8 @@ _iwl_fw_error_dump(struct iwl_fw_runtime *fwrt, } /* Dump fw's virtual image */ - if (fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) && - !fwrt->trans->cfg->gen2 && - fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size && - fwrt->fw_paging_db[0].fw_paging_block) { - IWL_DEBUG_INFO(fwrt, "WRT paging dump\n"); - for (i = 1; i < fwrt->num_of_paging_blk + 1; i++) { - struct iwl_fw_error_dump_paging *paging; - struct page *pages = - fwrt->fw_paging_db[i].fw_paging_block; - dma_addr_t addr = fwrt->fw_paging_db[i].fw_paging_phys; - - dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING); - dump_data->len = cpu_to_le32(sizeof(*paging) + - PAGING_BLOCK_SIZE); - paging = (void *)dump_data->data; - paging->index = cpu_to_le32(i); - dma_sync_single_for_cpu(fwrt->trans->dev, addr, - PAGING_BLOCK_SIZE, - DMA_BIDIRECTIONAL); - memcpy(paging->data, page_address(pages), - PAGING_BLOCK_SIZE); - dump_data = iwl_fw_error_next_data(dump_data); - } - } + if (iwl_fw_dbg_is_paging_enabled(fwrt)) + iwl_dump_paging(fwrt, &dump_data); if (prph_len) { iwl_dump_prph(fwrt->trans, &dump_data, @@ -906,12 +951,245 @@ out: return dump_file; } +static void iwl_dump_prph_ini(struct iwl_trans *trans, + struct iwl_fw_error_dump_data **data, + struct iwl_fw_ini_region_cfg *reg) +{ + struct iwl_fw_error_dump_prph *prph; + unsigned long flags; + u32 i, size = le32_to_cpu(reg->num_regions); + + IWL_DEBUG_INFO(trans, "WRT PRPH dump\n"); + + if (!iwl_trans_grab_nic_access(trans, &flags)) + return; + + for (i = 0; i < size; i++) { + (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH); + (*data)->len = cpu_to_le32(le32_to_cpu(reg->size) + + sizeof(*prph)); + prph = (void *)(*data)->data; + prph->prph_start = reg->start_addr[i]; + prph->data[0] = cpu_to_le32(iwl_read_prph_no_grab(trans, + le32_to_cpu(prph->prph_start))); + *data = iwl_fw_error_next_data(*data); + } + iwl_trans_release_nic_access(trans, &flags); +} + +static void iwl_dump_csr_ini(struct iwl_trans *trans, + struct iwl_fw_error_dump_data **data, + struct iwl_fw_ini_region_cfg *reg) +{ + int i, num = le32_to_cpu(reg->num_regions); + u32 size = le32_to_cpu(reg->size); + + IWL_DEBUG_INFO(trans, "WRT CSR dump\n"); + + for (i = 0; i < num; i++) { + u32 add = le32_to_cpu(reg->start_addr[i]); + __le32 *val; + int j; + + (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR); + (*data)->len = cpu_to_le32(size); + val = (void *)(*data)->data; + + for (j = 0; j < size; j += 4) + *val++ = cpu_to_le32(iwl_trans_read32(trans, j + add)); + + *data = iwl_fw_error_next_data(*data); + } +} + +static int iwl_fw_ini_get_trigger_len(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_trigger *trigger) +{ + int i, num, size = 0, hdr_len = sizeof(struct iwl_fw_error_dump_data); + + if (!trigger || !trigger->num_regions) + return 0; + + num = le32_to_cpu(trigger->num_regions); + for (i = 0; i < num; i++) { + u32 reg_id = le32_to_cpu(trigger->data[i]); + struct iwl_fw_ini_region_cfg *reg; + enum iwl_fw_ini_region_type type; + u32 num_entries; + + if (WARN_ON(reg_id >= ARRAY_SIZE(fwrt->dump.active_regs))) + continue; + + reg = fwrt->dump.active_regs[reg_id].reg; + if (WARN(!reg, "Unassigned region %d\n", reg_id)) + continue; + + type = le32_to_cpu(reg->region_type); + num_entries = le32_to_cpu(reg->num_regions); + + switch (type) { + case IWL_FW_INI_REGION_DEVICE_MEMORY: + size += hdr_len + + sizeof(struct iwl_fw_error_dump_named_mem) + + le32_to_cpu(reg->size); + break; + case IWL_FW_INI_REGION_PERIPHERY_MAC: + case IWL_FW_INI_REGION_PERIPHERY_PHY: + case IWL_FW_INI_REGION_PERIPHERY_AUX: + size += num_entries * + (hdr_len + + sizeof(struct iwl_fw_error_dump_prph) + + sizeof(u32)); + break; + case IWL_FW_INI_REGION_TXF: + size += iwl_fw_txf_len(fwrt, &fwrt->smem_cfg); + break; + case IWL_FW_INI_REGION_RXF: + size += iwl_fw_rxf_len(fwrt, &fwrt->smem_cfg); + break; + case IWL_FW_INI_REGION_PAGING: + if (!iwl_fw_dbg_is_paging_enabled(fwrt)) + break; + size += fwrt->num_of_paging_blk * + (hdr_len + + sizeof(struct iwl_fw_error_dump_paging) + + PAGING_BLOCK_SIZE); + break; + case IWL_FW_INI_REGION_CSR: + size += num_entries * + (hdr_len + le32_to_cpu(reg->size)); + break; + case IWL_FW_INI_REGION_DRAM_BUFFER: + /* Transport takes care of DRAM dumping */ + case IWL_FW_INI_REGION_INTERNAL_BUFFER: + case IWL_FW_INI_REGION_DRAM_IMR: + /* Undefined yet */ + default: + break; + } + } + return size; +} + +static void iwl_fw_ini_dump_trigger(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_trigger *trigger, + struct iwl_fw_error_dump_data **data, + u32 *dump_mask) +{ + int i, num = le32_to_cpu(trigger->num_regions); + + for (i = 0; i < num; i++) { + u32 reg_id = le32_to_cpu(trigger->data[i]); + enum iwl_fw_ini_region_type type; + struct iwl_fw_ini_region_cfg *reg; + + if (reg_id >= ARRAY_SIZE(fwrt->dump.active_regs)) + continue; + + reg = fwrt->dump.active_regs[reg_id].reg; + /* Don't warn, get_trigger_len already warned */ + if (!reg) + continue; + + type = le32_to_cpu(reg->region_type); + switch (type) { + case IWL_FW_INI_REGION_DEVICE_MEMORY: + if (WARN_ON(le32_to_cpu(reg->num_regions) > 1)) + continue; + iwl_fw_dump_named_mem(fwrt, data, + le32_to_cpu(reg->size), + le32_to_cpu(reg->start_addr[0]), + reg->name, + le32_to_cpu(reg->name_len)); + break; + case IWL_FW_INI_REGION_PERIPHERY_MAC: + case IWL_FW_INI_REGION_PERIPHERY_PHY: + case IWL_FW_INI_REGION_PERIPHERY_AUX: + iwl_dump_prph_ini(fwrt->trans, data, reg); + break; + case IWL_FW_INI_REGION_DRAM_BUFFER: + *dump_mask |= IWL_FW_ERROR_DUMP_FW_MONITOR; + break; + case IWL_FW_INI_REGION_PAGING: + if (iwl_fw_dbg_is_paging_enabled(fwrt)) + iwl_dump_paging(fwrt, data); + else + *dump_mask |= IWL_FW_ERROR_DUMP_PAGING; + break; + case IWL_FW_INI_REGION_TXF: + iwl_fw_dump_txf(fwrt, data); + break; + case IWL_FW_INI_REGION_RXF: + iwl_fw_dump_rxf(fwrt, data); + break; + case IWL_FW_INI_REGION_CSR: + iwl_dump_csr_ini(fwrt->trans, data, reg); + break; + case IWL_FW_INI_REGION_DRAM_IMR: + case IWL_FW_INI_REGION_INTERNAL_BUFFER: + /* This is undefined yet */ + default: + break; + } + } +} + +static struct iwl_fw_error_dump_file * +_iwl_fw_error_ini_dump(struct iwl_fw_runtime *fwrt, + struct iwl_fw_dump_ptrs *fw_error_dump, + u32 *dump_mask) +{ + int size, id = le32_to_cpu(fwrt->dump.desc->trig_desc.type); + struct iwl_fw_error_dump_data *dump_data; + struct iwl_fw_error_dump_file *dump_file; + struct iwl_fw_ini_trigger *trigger, *ext; + + if (id == FW_DBG_TRIGGER_FW_ASSERT) + id = IWL_FW_TRIGGER_ID_FW_ASSERT; + else if (id == FW_DBG_TRIGGER_USER) + id = IWL_FW_TRIGGER_ID_USER_TRIGGER; + else if (id < FW_DBG_TRIGGER_MAX) + return NULL; + + if (WARN_ON(id >= ARRAY_SIZE(fwrt->dump.active_trigs))) + return NULL; + + trigger = fwrt->dump.active_trigs[id].conf; + ext = fwrt->dump.active_trigs[id].conf_ext; + + size = sizeof(*dump_file); + size += iwl_fw_ini_get_trigger_len(fwrt, trigger); + size += iwl_fw_ini_get_trigger_len(fwrt, ext); + + if (!size) + return NULL; + + dump_file = vzalloc(size); + if (!dump_file) + return NULL; + + fw_error_dump->fwrt_ptr = dump_file; + + dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER); + dump_data = (void *)dump_file->data; + dump_file->file_len = cpu_to_le32(size); + + *dump_mask = 0; + if (trigger) + iwl_fw_ini_dump_trigger(fwrt, trigger, &dump_data, dump_mask); + if (ext) + iwl_fw_ini_dump_trigger(fwrt, ext, &dump_data, dump_mask); + + return dump_file; +} + void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) { struct iwl_fw_dump_ptrs *fw_error_dump; struct iwl_fw_error_dump_file *dump_file; struct scatterlist *sg_dump_data; u32 file_len; + u32 dump_mask = fwrt->fw->dbg.dump_mask; IWL_DEBUG_INFO(fwrt, "WRT dump start\n"); @@ -925,14 +1203,21 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) if (!fw_error_dump) goto out; - dump_file = _iwl_fw_error_dump(fwrt, fw_error_dump); + if (fwrt->trans->ini_valid) + dump_file = _iwl_fw_error_ini_dump(fwrt, fw_error_dump, + &dump_mask); + else + dump_file = _iwl_fw_error_dump(fwrt, fw_error_dump); + if (!dump_file) { kfree(fw_error_dump); goto out; } - fw_error_dump->trans_ptr = iwl_trans_dump_data(fwrt->trans, - fwrt->dump.trig); + if (!fwrt->trans->ini_valid && fwrt->dump.monitor_only) + dump_mask &= IWL_FW_ERROR_DUMP_FW_MONITOR; + + fw_error_dump->trans_ptr = iwl_trans_dump_data(fwrt->trans, dump_mask); file_len = le32_to_cpu(dump_file->file_len); fw_error_dump->fwrt_len = file_len; if (fw_error_dump->trans_ptr) { @@ -973,6 +1258,14 @@ const struct iwl_fw_dump_desc iwl_dump_desc_assert = { }; IWL_EXPORT_SYMBOL(iwl_dump_desc_assert); +void iwl_fw_assert_error_dump(struct iwl_fw_runtime *fwrt) +{ + IWL_INFO(fwrt, "error dump due to fw assert\n"); + fwrt->dump.desc = &iwl_dump_desc_assert; + iwl_fw_error_dump(fwrt); +} +IWL_EXPORT_SYMBOL(iwl_fw_assert_error_dump); + void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt) { struct iwl_fw_dump_desc *iwl_dump_desc_no_alive = @@ -998,7 +1291,8 @@ void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt) IWL_EXPORT_SYMBOL(iwl_fw_alive_error_dump); int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, - const struct iwl_fw_dump_desc *desc, void *trigger, + const struct iwl_fw_dump_desc *desc, + bool monitor_only, unsigned int delay) { /* @@ -1028,7 +1322,7 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, le32_to_cpu(desc->trig_desc.type)); fwrt->dump.desc = desc; - fwrt->dump.trig = trigger; + fwrt->dump.monitor_only = monitor_only; schedule_delayed_work(&fwrt->dump.wk, delay); @@ -1036,13 +1330,14 @@ int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, } IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_desc); -int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, - enum iwl_fw_dbg_trigger trig, - const char *str, size_t len, - struct iwl_fw_dbg_trigger_tlv *trigger) +int _iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, + enum iwl_fw_dbg_trigger trig, + const char *str, size_t len, + struct iwl_fw_dbg_trigger_tlv *trigger) { struct iwl_fw_dump_desc *desc; unsigned int delay = 0; + bool monitor_only = false; if (trigger) { u16 occurrences = le16_to_cpu(trigger->occurrences) - 1; @@ -1059,6 +1354,7 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, trigger->occurrences = cpu_to_le16(occurrences); delay = le16_to_cpu(trigger->trig_dis_ms); + monitor_only = trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY; } desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC); @@ -1070,7 +1366,48 @@ int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, desc->trig_desc.type = cpu_to_le32(trig); memcpy(desc->trig_desc.data, str, len); - return iwl_fw_dbg_collect_desc(fwrt, desc, trigger, delay); + return iwl_fw_dbg_collect_desc(fwrt, desc, monitor_only, delay); +} +IWL_EXPORT_SYMBOL(_iwl_fw_dbg_collect); + +int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, + u32 id, const char *str, size_t len) +{ + struct iwl_fw_dump_desc *desc; + u32 occur, delay; + + if (!fwrt->trans->ini_valid) + return _iwl_fw_dbg_collect(fwrt, id, str, len, NULL); + + if (id == FW_DBG_TRIGGER_USER) + id = IWL_FW_TRIGGER_ID_USER_TRIGGER; + + if (WARN_ON(!fwrt->dump.active_trigs[id].active)) + return -EINVAL; + + delay = le32_to_cpu(fwrt->dump.active_trigs[id].conf->ignore_consec); + occur = le32_to_cpu(fwrt->dump.active_trigs[id].conf->occurrences); + if (!occur) + return 0; + + if (le32_to_cpu(fwrt->dump.active_trigs[id].conf->force_restart)) { + IWL_WARN(fwrt, "Force restart: trigger %d fired.\n", id); + iwl_force_nmi(fwrt->trans); + return 0; + } + + desc = kzalloc(sizeof(*desc) + len, GFP_ATOMIC); + if (!desc) + return -ENOMEM; + + occur--; + fwrt->dump.active_trigs[id].conf->occurrences = cpu_to_le32(occur); + + desc->len = len; + desc->trig_desc.type = cpu_to_le32(id); + memcpy(desc->trig_desc.data, str, len); + + return iwl_fw_dbg_collect_desc(fwrt, desc, true, delay); } IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect); @@ -1081,6 +1418,9 @@ int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt, int ret, len = 0; char buf[64]; + if (fwrt->trans->ini_valid) + return 0; + if (fmt) { va_list ap; @@ -1097,8 +1437,8 @@ int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt, len = strlen(buf) + 1; } - ret = iwl_fw_dbg_collect(fwrt, le32_to_cpu(trigger->id), buf, len, - trigger); + ret = _iwl_fw_dbg_collect(fwrt, le32_to_cpu(trigger->id), buf, len, + trigger); if (ret) return ret; @@ -1224,3 +1564,217 @@ void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt) cfg->d3_debug_data_length); } IWL_EXPORT_SYMBOL(iwl_fw_dbg_read_d3_debug_data); + +static void +iwl_fw_dbg_buffer_allocation(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_allocation_tlv *alloc) +{ + struct iwl_trans *trans = fwrt->trans; + struct iwl_continuous_record_cmd cont_rec = {}; + struct iwl_buffer_allocation_cmd *cmd = (void *)&cont_rec.pad[0]; + struct iwl_host_cmd hcmd = { + .id = LDBG_CONFIG_CMD, + .flags = CMD_ASYNC, + .data[0] = &cont_rec, + .len[0] = sizeof(cont_rec), + }; + void *virtual_addr = NULL; + u32 size = le32_to_cpu(alloc->size); + dma_addr_t phys_addr; + + cont_rec.record_mode.enable_recording = cpu_to_le16(BUFFER_ALLOCATION); + + if (!trans->num_blocks && + le32_to_cpu(alloc->buffer_location) != + IWL_FW_INI_LOCATION_DRAM_PATH) + return; + + virtual_addr = dma_alloc_coherent(fwrt->trans->dev, size, + &phys_addr, GFP_KERNEL); + + /* TODO: alloc fragments if needed */ + if (!virtual_addr) + IWL_ERR(fwrt, "Failed to allocate debug memory\n"); + + if (WARN_ON_ONCE(trans->num_blocks == ARRAY_SIZE(trans->fw_mon))) + return; + + trans->fw_mon[trans->num_blocks].block = virtual_addr; + trans->fw_mon[trans->num_blocks].physical = phys_addr; + trans->fw_mon[trans->num_blocks].size = size; + trans->num_blocks++; + + IWL_DEBUG_FW(trans, "Allocated debug block of size %d\n", size); + + /* First block is assigned via registers / context info */ + if (trans->num_blocks == 1) + return; + + cmd->num_frags = cpu_to_le32(1); + cmd->fragments[0].address = cpu_to_le64(phys_addr); + cmd->fragments[0].size = alloc->size; + cmd->allocation_id = alloc->allocation_id; + cmd->buffer_location = alloc->buffer_location; + + iwl_trans_send_cmd(trans, &hcmd); +} + +static void iwl_fw_dbg_send_hcmd(struct iwl_fw_runtime *fwrt, + struct iwl_ucode_tlv *tlv) +{ + struct iwl_fw_ini_hcmd_tlv *hcmd_tlv = (void *)&tlv->data[0]; + struct iwl_fw_ini_hcmd *data = &hcmd_tlv->hcmd; + u16 len = le32_to_cpu(tlv->length) - sizeof(*hcmd_tlv); + + struct iwl_host_cmd hcmd = { + .id = WIDE_ID(data->group, data->id), + .len = { len, }, + .data = { data->data, }, + }; + + iwl_trans_send_cmd(fwrt->trans, &hcmd); +} + +static void iwl_fw_dbg_update_regions(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_region_tlv *tlv, + bool ext, enum iwl_fw_ini_apply_point pnt) +{ + void *iter = (void *)tlv->region_config; + int i, size = le32_to_cpu(tlv->num_regions); + + for (i = 0; i < size; i++) { + struct iwl_fw_ini_region_cfg *reg = iter; + int id = le32_to_cpu(reg->region_id); + struct iwl_fw_ini_active_regs *active; + + if (WARN(id >= ARRAY_SIZE(fwrt->dump.active_regs), + "Invalid region id %d for apply point %d\n", id, pnt)) + break; + + active = &fwrt->dump.active_regs[id]; + + if (ext && active->apply_point == pnt) + IWL_WARN(fwrt->trans, + "External region TLV overrides FW default %x\n", + id); + + IWL_DEBUG_FW(fwrt, + "%s: apply point %d, activating region ID %d\n", + __func__, pnt, id); + + active->reg = reg; + active->apply_point = pnt; + + if (le32_to_cpu(reg->region_type) != + IWL_FW_INI_REGION_DRAM_BUFFER) + iter += le32_to_cpu(reg->num_regions) * sizeof(__le32); + + iter += sizeof(*reg); + } +} + +static void iwl_fw_dbg_update_triggers(struct iwl_fw_runtime *fwrt, + struct iwl_fw_ini_trigger_tlv *tlv, + bool ext, + enum iwl_fw_ini_apply_point apply_point) +{ + int i, size = le32_to_cpu(tlv->num_triggers); + void *iter = (void *)tlv->trigger_config; + + for (i = 0; i < size; i++) { + struct iwl_fw_ini_trigger *trig = iter; + struct iwl_fw_ini_active_triggers *active; + int id = le32_to_cpu(trig->trigger_id); + u32 num; + + if (WARN_ON(id >= ARRAY_SIZE(fwrt->dump.active_trigs))) + break; + + active = &fwrt->dump.active_trigs[id]; + + if (active->apply_point != apply_point) { + active->conf = NULL; + active->conf_ext = NULL; + } + + num = le32_to_cpu(trig->num_regions); + + if (ext && active->apply_point == apply_point) { + num += le32_to_cpu(active->conf->num_regions); + if (trig->ignore_default) { + active->conf_ext = active->conf; + active->conf = trig; + } else { + active->conf_ext = trig; + } + } else { + active->conf = trig; + } + + /* Since zero means infinity - just set to -1 */ + if (!le32_to_cpu(trig->occurrences)) + trig->occurrences = cpu_to_le32(-1); + if (!le32_to_cpu(trig->ignore_consec)) + trig->ignore_consec = cpu_to_le32(-1); + + iter += sizeof(*trig) + + le32_to_cpu(trig->num_regions) * sizeof(__le32); + + active->active = num; + active->apply_point = apply_point; + } +} + +static void _iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt, + struct iwl_apply_point_data *data, + enum iwl_fw_ini_apply_point pnt, + bool ext) +{ + void *iter = data->data; + + while (iter && iter < data->data + data->size) { + struct iwl_ucode_tlv *tlv = iter; + void *ini_tlv = (void *)tlv->data; + u32 type = le32_to_cpu(tlv->type); + + switch (type) { + case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION: + iwl_fw_dbg_buffer_allocation(fwrt, ini_tlv); + break; + case IWL_UCODE_TLV_TYPE_HCMD: + if (pnt < IWL_FW_INI_APPLY_AFTER_ALIVE) { + IWL_ERR(fwrt, + "Invalid apply point %x for host command\n", + pnt); + goto next; + } + iwl_fw_dbg_send_hcmd(fwrt, tlv); + break; + case IWL_UCODE_TLV_TYPE_REGIONS: + iwl_fw_dbg_update_regions(fwrt, ini_tlv, ext, pnt); + break; + case IWL_UCODE_TLV_TYPE_TRIGGERS: + iwl_fw_dbg_update_triggers(fwrt, ini_tlv, ext, pnt); + break; + case IWL_UCODE_TLV_TYPE_DEBUG_FLOW: + break; + default: + WARN_ONCE(1, "Invalid TLV %x for apply point\n", type); + break; + } +next: + iter += sizeof(*tlv) + le32_to_cpu(tlv->length); + } +} + +void iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt, + enum iwl_fw_ini_apply_point apply_point) +{ + void *data = &fwrt->trans->apply_points[apply_point]; + + _iwl_fw_dbg_apply_point(fwrt, data, apply_point, false); + + data = &fwrt->trans->apply_points_ext[apply_point]; + _iwl_fw_dbg_apply_point(fwrt, data, apply_point, true); +} +IWL_EXPORT_SYMBOL(iwl_fw_dbg_apply_point); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h index 6f8d3256f7b0..6aabbdd72326 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.h @@ -72,6 +72,7 @@ #include "file.h" #include "error-dump.h" #include "api/commands.h" +#include "api/dbg-tlv.h" /** * struct iwl_fw_dump_desc - describes the dump @@ -101,17 +102,19 @@ static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime *fwrt) if (fwrt->dump.desc != &iwl_dump_desc_assert) kfree(fwrt->dump.desc); fwrt->dump.desc = NULL; - fwrt->dump.trig = NULL; + fwrt->dump.rt_status = 0; } void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt); int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime *fwrt, const struct iwl_fw_dump_desc *desc, - void *trigger, unsigned int delay); + bool monitor_only, unsigned int delay); +int _iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, + enum iwl_fw_dbg_trigger trig, + const char *str, size_t len, + struct iwl_fw_dbg_trigger_tlv *trigger); int iwl_fw_dbg_collect(struct iwl_fw_runtime *fwrt, - enum iwl_fw_dbg_trigger trig, - const char *str, size_t len, - struct iwl_fw_dbg_trigger_tlv *trigger); + u32 id, const char *str, size_t len); int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime *fwrt, struct iwl_fw_dbg_trigger_tlv *trigger, const char *fmt, ...) __printf(3, 4); @@ -193,6 +196,9 @@ _iwl_fw_dbg_trigger_on(struct iwl_fw_runtime *fwrt, { struct iwl_fw_dbg_trigger_tlv *trig; + if (fwrt->trans->ini_valid) + return NULL; + if (!iwl_fw_dbg_trigger_enabled(fwrt->fw, id)) return NULL; @@ -210,6 +216,37 @@ _iwl_fw_dbg_trigger_on(struct iwl_fw_runtime *fwrt, _iwl_fw_dbg_trigger_on((fwrt), (wdev), (id)); \ }) +static inline bool +_iwl_fw_ini_trigger_on(struct iwl_fw_runtime *fwrt, + const enum iwl_fw_dbg_trigger id) +{ + struct iwl_fw_ini_active_triggers *trig = &fwrt->dump.active_trigs[id]; + u32 ms; + + if (!fwrt->trans->ini_valid) + return false; + + if (!trig || !trig->active) + return false; + + ms = le32_to_cpu(trig->conf->ignore_consec); + if (ms) + ms /= USEC_PER_MSEC; + + if (iwl_fw_dbg_no_trig_window(fwrt, id, ms)) { + IWL_WARN(fwrt, "Trigger %d fired in no-collect window\n", id); + return false; + } + + return true; +} + +#define iwl_fw_ini_trigger_on(fwrt, wdev, id) ({ \ + BUILD_BUG_ON(!__builtin_constant_p(id)); \ + BUILD_BUG_ON((id) >= IWL_FW_TRIGGER_ID_NUM); \ + _iwl_fw_ini_trigger_on((fwrt), (wdev), (id)); \ +}) + static inline void _iwl_fw_dbg_trigger_simple_stop(struct iwl_fw_runtime *fwrt, struct wireless_dev *wdev, @@ -263,6 +300,9 @@ _iwl_fw_dbg_stop_recording(struct iwl_trans *trans, iwl_write_prph(trans, DBGC_IN_SAMPLE, 0); udelay(100); iwl_write_prph(trans, DBGC_OUT_CTRL, 0); +#ifdef CONFIG_IWLWIFI_DEBUGFS + trans->dbg_rec_on = false; +#endif } static inline void @@ -293,6 +333,14 @@ _iwl_fw_dbg_restart_recording(struct iwl_trans *trans, } } +#ifdef CONFIG_IWLWIFI_DEBUGFS +static inline void iwl_fw_set_dbg_rec_on(struct iwl_fw_runtime *fwrt) +{ + if (fwrt->fw->dbg.dest_tlv && fwrt->cur_fw_img == IWL_UCODE_REGULAR) + fwrt->trans->dbg_rec_on = true; +} +#endif + static inline void iwl_fw_dbg_restart_recording(struct iwl_fw_runtime *fwrt, struct iwl_fw_dbg_params *params) @@ -301,6 +349,9 @@ iwl_fw_dbg_restart_recording(struct iwl_fw_runtime *fwrt, _iwl_fw_dbg_restart_recording(fwrt->trans, params); else iwl_fw_dbg_start_stop_hcmd(fwrt, true); +#ifdef CONFIG_IWLWIFI_DEBUGFS + iwl_fw_set_dbg_rec_on(fwrt); +#endif } static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt) @@ -310,12 +361,25 @@ static inline void iwl_fw_dump_conf_clear(struct iwl_fw_runtime *fwrt) void iwl_fw_error_dump_wk(struct work_struct *work); +static inline bool iwl_fw_dbg_type_on(struct iwl_fw_runtime *fwrt, u32 type) +{ + return (fwrt->fw->dbg.dump_mask & BIT(type) || fwrt->trans->ini_valid); +} + static inline bool iwl_fw_dbg_is_d3_debug_enabled(struct iwl_fw_runtime *fwrt) { return fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_D3_DEBUG) && fwrt->trans->cfg->d3_debug_data_length && - fwrt->fw->dbg.dump_mask & BIT(IWL_FW_ERROR_DUMP_D3_DEBUG_DATA); + iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_D3_DEBUG_DATA); +} + +static inline bool iwl_fw_dbg_is_paging_enabled(struct iwl_fw_runtime *fwrt) +{ + return iwl_fw_dbg_type_on(fwrt, IWL_FW_ERROR_DUMP_PAGING) && + !fwrt->trans->cfg->gen2 && + fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size && + fwrt->fw_paging_db[0].fw_paging_block; } void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime *fwrt); @@ -366,6 +430,10 @@ static inline void iwl_fw_resume_timestamp(struct iwl_fw_runtime *fwrt) {} #endif /* CONFIG_IWLWIFI_DEBUGFS */ +void iwl_fw_assert_error_dump(struct iwl_fw_runtime *fwrt); void iwl_fw_alive_error_dump(struct iwl_fw_runtime *fwrt); void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime *fwrt); +void iwl_fw_dbg_apply_point(struct iwl_fw_runtime *fwrt, + enum iwl_fw_ini_apply_point apply_point); + #endif /* __iwl_fw_dbg_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h index 6fede174c664..65faecf552cd 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/error-dump.h @@ -187,6 +187,8 @@ enum iwl_fw_error_dump_family { * @fw_human_readable: human readable FW version * @dev_human_readable: name of the device * @bus_human_readable: name of the bus used + * @rt_status: the error_id/rt_status that that triggered the latest dump + * if the dump collection was not initiated by an assert, the value is 0 */ struct iwl_fw_error_dump_info { __le32 device_family; @@ -194,6 +196,7 @@ struct iwl_fw_error_dump_info { u8 fw_human_readable[FW_VER_HUMAN_READABLE_SZ]; u8 dev_human_readable[64]; u8 bus_human_readable[8]; + __le32 rt_status; } __packed; /** @@ -249,6 +252,7 @@ struct iwl_fw_error_dump_prph { enum iwl_fw_error_dump_mem_type { IWL_FW_ERROR_DUMP_MEM_SRAM, IWL_FW_ERROR_DUMP_MEM_SMEM, + IWL_FW_ERROR_DUMP_MEM_NAMED_MEM = 10, }; /** @@ -264,6 +268,22 @@ struct iwl_fw_error_dump_mem { }; /** + * struct iwl_fw_error_dump_named_mem - chunk of memory + * @type: &enum iwl_fw_error_dump_mem_type + * @offset: the offset from which the memory was read + * @name_len: name length + * @name: file name + * @data: the content of the memory + */ +struct iwl_fw_error_dump_named_mem { + __le32 type; + __le32 offset; + u8 name_len; + u8 name[32]; + u8 data[]; +}; + +/** * struct iwl_fw_error_dump_rb - content of an Receive Buffer * @index: the index of the Receive Buffer in the Rx queue * @rxq: the RB's Rx queue diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index 6005a41c53d1..81f557c0b58d 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -91,6 +91,8 @@ struct iwl_ucode_header { } u; }; +#define IWL_UCODE_INI_TLV_GROUP BIT(24) + /* * new TLV uCode file layout * @@ -141,6 +143,11 @@ enum iwl_ucode_tlv_type { IWL_UCODE_TLV_FW_GSCAN_CAPA = 50, IWL_UCODE_TLV_FW_MEM_SEG = 51, IWL_UCODE_TLV_IML = 52, + IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION = IWL_UCODE_INI_TLV_GROUP | 0x1, + IWL_UCODE_TLV_TYPE_HCMD = IWL_UCODE_INI_TLV_GROUP | 0x2, + IWL_UCODE_TLV_TYPE_REGIONS = IWL_UCODE_INI_TLV_GROUP | 0x3, + IWL_UCODE_TLV_TYPE_TRIGGERS = IWL_UCODE_INI_TLV_GROUP | 0x4, + IWL_UCODE_TLV_TYPE_DEBUG_FLOW = IWL_UCODE_INI_TLV_GROUP | 0x5, /* TLVs 0x1000-0x2000 are for internal driver usage */ IWL_UCODE_TLV_FW_DBG_DUMP_LST = 0x1000, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h index 54dbbd998abf..12333167ea23 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/img.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h @@ -65,6 +65,8 @@ #define __iwl_fw_img_h__ #include <linux/types.h> +#include "api/dbg-tlv.h" + #include "file.h" #include "error-dump.h" @@ -221,6 +223,30 @@ struct iwl_fw_dbg { }; /** + * struct iwl_fw_ini_active_triggers + * @active: is this trigger active + * @apply_point: last apply point that updated this trigger + * @conf: active trigger + * @conf_ext: second trigger, contains extra regions to dump + */ +struct iwl_fw_ini_active_triggers { + bool active; + enum iwl_fw_ini_apply_point apply_point; + struct iwl_fw_ini_trigger *conf; + struct iwl_fw_ini_trigger *conf_ext; +}; + +/** + * struct iwl_fw_ini_active_regs + * @reg: active region from TLV + * @apply_point: apply point where it became active + */ +struct iwl_fw_ini_active_regs { + struct iwl_fw_ini_region_cfg *reg; + enum iwl_fw_ini_apply_point apply_point; +}; + +/** * struct iwl_fw - variables associated with the firmware * * @ucode_ver: ucode version from the ucode file diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index 2b8b50a77990..4f7090f88cb0 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -64,6 +64,7 @@ #include "iwl-trans.h" #include "img.h" #include "fw/api/debug.h" +#include "fw/api/dbg-tlv.h" #include "fw/api/paging.h" #include "iwl-eeprom-parse.h" @@ -131,14 +132,17 @@ struct iwl_fw_runtime { /* debug */ struct { const struct iwl_fw_dump_desc *desc; - const struct iwl_fw_dbg_trigger_tlv *trig; + bool monitor_only; struct delayed_work wk; u8 conf; /* ts of the beginning of a non-collect fw dbg data period */ - unsigned long non_collect_ts_start[FW_DBG_TRIGGER_MAX - 1]; + unsigned long non_collect_ts_start[IWL_FW_TRIGGER_ID_NUM - 1]; u32 *d3_debug_data; + struct iwl_fw_ini_active_regs active_regs[IWL_FW_INI_MAX_REGION_ID]; + struct iwl_fw_ini_active_triggers active_trigs[IWL_FW_TRIGGER_ID_NUM]; + u32 rt_status; } dump; #ifdef CONFIG_IWLWIFI_DEBUGFS struct { diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 5eb906a0d0d2..91861a9cbe57 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -265,11 +265,9 @@ struct iwl_tt_params { #define EEPROM_REGULATORY_BAND_NO_HT40 0 /* lower blocks contain EEPROM image and calibration data */ -#define OTP_LOW_IMAGE_SIZE (2 * 512 * sizeof(u16)) /* 2 KB */ -#define OTP_LOW_IMAGE_SIZE_FAMILY_7000 (16 * 512 * sizeof(u16)) /* 16 KB */ -#define OTP_LOW_IMAGE_SIZE_FAMILY_8000 (32 * 512 * sizeof(u16)) /* 32 KB */ -#define OTP_LOW_IMAGE_SIZE_FAMILY_9000 OTP_LOW_IMAGE_SIZE_FAMILY_8000 -#define OTP_LOW_IMAGE_SIZE_FAMILY_22000 OTP_LOW_IMAGE_SIZE_FAMILY_9000 +#define OTP_LOW_IMAGE_SIZE_2K (2 * 512 * sizeof(u16)) /* 2 KB */ +#define OTP_LOW_IMAGE_SIZE_16K (16 * 512 * sizeof(u16)) /* 16 KB */ +#define OTP_LOW_IMAGE_SIZE_32K (32 * 512 * sizeof(u16)) /* 32 KB */ struct iwl_eeprom_params { const u8 regulatory_bands[7]; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c new file mode 100644 index 000000000000..43d815cb3ce9 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c @@ -0,0 +1,231 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (C) 2018 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless <linuxwifi@intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright (C) 2018 Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include <linux/firmware.h> +#include "iwl-trans.h" +#include "iwl-dbg-tlv.h" + +void iwl_fw_dbg_copy_tlv(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv, + bool ext) +{ + struct iwl_apply_point_data *data; + struct iwl_fw_ini_header *header = (void *)&tlv->data[0]; + u32 apply_point = le32_to_cpu(header->apply_point); + + int copy_size = le32_to_cpu(tlv->length) + sizeof(*tlv); + + if (WARN_ONCE(apply_point >= IWL_FW_INI_APPLY_NUM, + "Invalid apply point id %d\n", apply_point)) + return; + + if (ext) + data = &trans->apply_points_ext[apply_point]; + else + data = &trans->apply_points[apply_point]; + + /* + * Make sure we still have room to copy this TLV. Offset points to the + * location the last copy ended. + */ + if (WARN_ONCE(data->offset + copy_size > data->size, + "Not enough memory for apply point %d\n", + apply_point)) + return; + + memcpy(data->data + data->offset, (void *)tlv, copy_size); + data->offset += copy_size; +} + +void iwl_alloc_dbg_tlv(struct iwl_trans *trans, size_t len, const u8 *data, + bool ext) +{ + struct iwl_ucode_tlv *tlv; + u32 size[IWL_FW_INI_APPLY_NUM] = {0}; + int i; + + while (len >= sizeof(*tlv)) { + u32 tlv_len, tlv_type, apply; + struct iwl_fw_ini_header *hdr; + + len -= sizeof(*tlv); + tlv = (void *)data; + + tlv_len = le32_to_cpu(tlv->length); + tlv_type = le32_to_cpu(tlv->type); + + if (len < tlv_len) + return; + + len -= ALIGN(tlv_len, 4); + data += sizeof(*tlv) + ALIGN(tlv_len, 4); + + if (!(tlv_type & IWL_UCODE_INI_TLV_GROUP)) + continue; + + hdr = (void *)&tlv->data[0]; + apply = le32_to_cpu(hdr->apply_point); + + IWL_DEBUG_FW(trans, "Read TLV %x, apply point %d\n", + le32_to_cpu(tlv->type), apply); + + if (WARN_ON(apply >= IWL_FW_INI_APPLY_NUM)) + continue; + + size[apply] += sizeof(*tlv) + tlv_len; + } + + for (i = 0; i < ARRAY_SIZE(size); i++) { + void *mem; + + if (!size[i]) + continue; + + mem = kzalloc(size[i], GFP_KERNEL); + + if (!mem) { + IWL_ERR(trans, "No memory for apply point %d\n", i); + return; + } + + if (ext) { + trans->apply_points_ext[i].data = mem; + trans->apply_points_ext[i].size = size[i]; + } else { + trans->apply_points[i].data = mem; + trans->apply_points[i].size = size[i]; + } + + trans->ini_valid = true; + } +} + +void iwl_fw_dbg_free(struct iwl_trans *trans) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(trans->apply_points); i++) { + kfree(trans->apply_points[i].data); + trans->apply_points[i].size = 0; + trans->apply_points[i].offset = 0; + + kfree(trans->apply_points_ext[i].data); + trans->apply_points_ext[i].size = 0; + trans->apply_points_ext[i].offset = 0; + } +} + +static int iwl_parse_fw_dbg_tlv(struct iwl_trans *trans, const u8 *data, + size_t len) +{ + struct iwl_ucode_tlv *tlv; + enum iwl_ucode_tlv_type tlv_type; + u32 tlv_len; + + while (len >= sizeof(*tlv)) { + len -= sizeof(*tlv); + tlv = (void *)data; + + tlv_len = le32_to_cpu(tlv->length); + tlv_type = le32_to_cpu(tlv->type); + + if (len < tlv_len) { + IWL_ERR(trans, "invalid TLV len: %zd/%u\n", + len, tlv_len); + return -EINVAL; + } + len -= ALIGN(tlv_len, 4); + data += sizeof(*tlv) + ALIGN(tlv_len, 4); + + switch (tlv_type) { + case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION: + case IWL_UCODE_TLV_TYPE_HCMD: + case IWL_UCODE_TLV_TYPE_REGIONS: + case IWL_UCODE_TLV_TYPE_TRIGGERS: + case IWL_UCODE_TLV_TYPE_DEBUG_FLOW: + iwl_fw_dbg_copy_tlv(trans, tlv, true); + break; + default: + WARN_ONCE(1, "Invalid TLV %x\n", tlv_type); + break; + } + } + + return 0; +} + +void iwl_load_fw_dbg_tlv(struct device *dev, struct iwl_trans *trans) +{ + const struct firmware *fw; + int res; + + if (trans->external_ini_loaded || !iwlwifi_mod_params.enable_ini) + return; + + res = request_firmware(&fw, "iwl-dbg-tlv.ini", dev); + if (res) + return; + + iwl_alloc_dbg_tlv(trans, fw->size, fw->data, true); + iwl_parse_fw_dbg_tlv(trans, fw->data, fw->size); + + trans->external_ini_loaded = true; + release_firmware(fw); +} diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h new file mode 100644 index 000000000000..222cd789e07a --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.h @@ -0,0 +1,87 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright (C) 2018 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. + * + * The full GNU General Public License is included in this distribution + * in the file called COPYING. + * + * Contact Information: + * Intel Linux Wireless <linuxwifi@intel.com> + * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + * + * BSD LICENSE + * + * Copyright (C) 2018 Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_dbg_tlv_h__ +#define __iwl_dbg_tlv_h__ + +#include <linux/device.h> +#include <linux/types.h> + +/** + * struct iwl_apply_point_data + * @data: start address of this apply point data + * @size total size of the data + * @offset: current offset of the copied data + */ +struct iwl_apply_point_data { + void *data; + int size; + int offset; +}; + +struct iwl_trans; +void iwl_load_fw_dbg_tlv(struct device *dev, struct iwl_trans *trans); +void iwl_fw_dbg_free(struct iwl_trans *trans); +void iwl_fw_dbg_copy_tlv(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv, + bool ext); +void iwl_alloc_dbg_tlv(struct iwl_trans *trans, size_t len, const u8 *data, + bool ext); + +#endif /* __iwl_dbg_tlv_h__*/ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index ba41d23b4211..bf1be985f36b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -72,6 +72,7 @@ #include "iwl-op-mode.h" #include "iwl-agn-hw.h" #include "fw/img.h" +#include "iwl-dbg-tlv.h" #include "iwl-config.h" #include "iwl-modparams.h" @@ -645,6 +646,9 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, len -= sizeof(*ucode); + if (iwlwifi_mod_params.enable_ini) + iwl_alloc_dbg_tlv(drv->trans, len, data, false); + while (len >= sizeof(*tlv)) { len -= sizeof(*tlv); tlv = (void *)data; @@ -1086,6 +1090,14 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, return -ENOMEM; break; } + case IWL_UCODE_TLV_TYPE_BUFFER_ALLOCATION: + case IWL_UCODE_TLV_TYPE_HCMD: + case IWL_UCODE_TLV_TYPE_REGIONS: + case IWL_UCODE_TLV_TYPE_TRIGGERS: + case IWL_UCODE_TLV_TYPE_DEBUG_FLOW: + if (iwlwifi_mod_params.enable_ini) + iwl_fw_dbg_copy_tlv(drv->trans, tlv, false); + break; default: IWL_DEBUG_INFO(drv, "unknown TLV: %d\n", tlv_type); break; @@ -1565,7 +1577,7 @@ struct iwl_drv *iwl_drv_start(struct iwl_trans *trans) if (!drv->dbgfs_drv) { IWL_ERR(drv, "failed to create debugfs directory\n"); ret = -ENOMEM; - goto err_free_drv; + goto err_free_tlv; } /* Create transport layer debugfs dir */ @@ -1590,7 +1602,8 @@ err_fw: #ifdef CONFIG_IWLWIFI_DEBUGFS err_free_dbgfs: debugfs_remove_recursive(drv->dbgfs_drv); -err_free_drv: +err_free_tlv: + iwl_fw_dbg_free(drv->trans); #endif kfree(drv); err: @@ -1616,9 +1629,13 @@ void iwl_drv_stop(struct iwl_drv *drv) mutex_unlock(&iwlwifi_opmode_table_mtx); #ifdef CONFIG_IWLWIFI_DEBUGFS + drv->trans->ops->debugfs_cleanup(drv->trans); + debugfs_remove_recursive(drv->dbgfs_drv); #endif + iwl_fw_dbg_free(drv->trans); + kfree(drv); } @@ -1749,6 +1766,10 @@ MODULE_PARM_DESC(lar_disable, "disable LAR functionality (default: N)"); module_param_named(uapsd_disable, iwlwifi_mod_params.uapsd_disable, uint, 0644); MODULE_PARM_DESC(uapsd_disable, "disable U-APSD functionality bitmap 1: BSS 2: P2P Client (default: 3)"); +module_param_named(enable_ini, iwlwifi_mod_params.enable_ini, + bool, S_IRUGO | S_IWUSR); +MODULE_PARM_DESC(enable_ini, + "Enable debug INI TLV FW debug infrastructure (default: 0"); /* * set bt_coex_active to true, uCode will do kill/defer diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c index 4e3422a1c7bb..75940ac406b9 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c @@ -927,22 +927,3 @@ iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg, return NULL; } IWL_EXPORT_SYMBOL(iwl_parse_eeprom_data); - -/* helper functions */ -int iwl_nvm_check_version(struct iwl_nvm_data *data, - struct iwl_trans *trans) -{ - if (data->nvm_version >= trans->cfg->nvm_ver || - data->calib_version >= trans->cfg->nvm_calib_ver) { - IWL_DEBUG_INFO(trans, "device EEPROM VER=0x%x, CALIB=0x%x\n", - data->nvm_version, data->calib_version); - return 0; - } - - IWL_ERR(trans, - "Unsupported (too old) EEPROM VER=0x%x < 0x%x CALIB=0x%x < 0x%x\n", - data->nvm_version, trans->cfg->nvm_ver, - data->calib_version, trans->cfg->nvm_calib_ver); - return -EINVAL; -} -IWL_EXPORT_SYMBOL(iwl_nvm_check_version); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h index d910bda087f7..2375d300a7cd 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.h @@ -7,6 +7,7 @@ * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Mobile Communications GmbH + * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -28,6 +29,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Mobile Communications GmbH + * Copyright (C) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -117,9 +119,6 @@ struct iwl_nvm_data * iwl_parse_eeprom_data(struct device *dev, const struct iwl_cfg *cfg, const u8 *eeprom, size_t eeprom_size); -int iwl_nvm_check_version(struct iwl_nvm_data *data, - struct iwl_trans *trans); - int iwl_init_sband_channels(struct iwl_nvm_data *data, struct ieee80211_supported_band *sband, int n_channels, enum nl80211_band band); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h index 6fc8dac4aab7..73b1c46f1158 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h @@ -122,6 +122,7 @@ enum iwl_uapsd_disable { * @fw_monitor: allow to use firmware monitor * @disable_11ac: disable VHT capabilities, default = false. * @remove_when_gone: remove an inaccessible device from the PCIe bus. + * @enable_ini: enable new FW debug infratructure (INI TLVs) */ struct iwl_mod_params { int swcrypto; @@ -148,6 +149,7 @@ struct iwl_mod_params { */ bool disable_11ax; bool remove_when_gone; + bool enable_ini; }; #endif /* #__iwl_modparams_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index 96e101d79662..d9afedc3d1d9 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -465,101 +465,185 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map; } -static struct ieee80211_sband_iftype_data iwl_he_capa = { - .types_mask = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP), - .he_cap = { - .has_he = true, - .he_cap_elem = { - .mac_cap_info[0] = - IEEE80211_HE_MAC_CAP0_HTC_HE | - IEEE80211_HE_MAC_CAP0_TWT_REQ, - .mac_cap_info[1] = - IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | - IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, - .mac_cap_info[2] = - IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP | - IEEE80211_HE_MAC_CAP2_MU_CASCADING | - IEEE80211_HE_MAC_CAP2_ACK_EN, - .mac_cap_info[3] = - IEEE80211_HE_MAC_CAP3_OMI_CONTROL | - IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2, - .mac_cap_info[4] = - IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU | - IEEE80211_HE_MAC_CAP4_MULTI_TID_AGG_TX_QOS_B39, - .mac_cap_info[5] = - IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B40 | - IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B41 | - IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU, - .phy_cap_info[0] = - IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G | - IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | - IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G, - .phy_cap_info[1] = - IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | - IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | - IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | - IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS, - .phy_cap_info[2] = - IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | - IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | - IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ | - IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO | - IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO, - .phy_cap_info[3] = - IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK | - IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 | - IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK | - IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1, - .phy_cap_info[4] = - IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE | - IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 | - IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8, - .phy_cap_info[5] = - IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 | - IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2 | - IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK | - IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK, - .phy_cap_info[6] = - IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU | - IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU | - IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB | - IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB | - IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB | - IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO | - IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, - .phy_cap_info[7] = - IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR | - IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI | - IEEE80211_HE_PHY_CAP7_MAX_NC_1, - .phy_cap_info[8] = - IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI | - IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G | - IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU | - IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU | - IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_160_OR_80P80_MHZ, - .phy_cap_info[9] = - IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK | - IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB | - IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB, +static struct ieee80211_sband_iftype_data iwl_he_capa[] = { + { + .types_mask = BIT(NL80211_IFTYPE_STATION), + .he_cap = { + .has_he = true, + .he_cap_elem = { + .mac_cap_info[0] = + IEEE80211_HE_MAC_CAP0_HTC_HE | + IEEE80211_HE_MAC_CAP0_TWT_REQ, + .mac_cap_info[1] = + IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | + IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, + .mac_cap_info[2] = + IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP | + IEEE80211_HE_MAC_CAP2_MU_CASCADING | + IEEE80211_HE_MAC_CAP2_ACK_EN, + .mac_cap_info[3] = + IEEE80211_HE_MAC_CAP3_OMI_CONTROL | + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2, + .mac_cap_info[4] = + IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU | + IEEE80211_HE_MAC_CAP4_MULTI_TID_AGG_TX_QOS_B39, + .mac_cap_info[5] = + IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B40 | + IEEE80211_HE_MAC_CAP5_MULTI_TID_AGG_TX_QOS_B41 | + IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU, + .phy_cap_info[0] = + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G, + .phy_cap_info[1] = + IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | + IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | + IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | + IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS, + .phy_cap_info[2] = + IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | + IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | + IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ | + IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO | + IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO, + .phy_cap_info[3] = + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK | + IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 | + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK | + IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1, + .phy_cap_info[4] = + IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE | + IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 | + IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8, + .phy_cap_info[5] = + IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 | + IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2 | + IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK | + IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK, + .phy_cap_info[6] = + IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU | + IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU | + IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB | + IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB | + IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB | + IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO | + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, + .phy_cap_info[7] = + IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR | + IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI | + IEEE80211_HE_PHY_CAP7_MAX_NC_1, + .phy_cap_info[8] = + IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI | + IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G | + IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU | + IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU | + IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_160_OR_80P80_MHZ, + .phy_cap_info[9] = + IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK | + IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB | + IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB, + }, + /* + * Set default Tx/Rx HE MCS NSS Support field. + * Indicate support for up to 2 spatial streams and all + * MCS, without any special cases + */ + .he_mcs_nss_supp = { + .rx_mcs_80 = cpu_to_le16(0xfffa), + .tx_mcs_80 = cpu_to_le16(0xfffa), + .rx_mcs_160 = cpu_to_le16(0xfffa), + .tx_mcs_160 = cpu_to_le16(0xfffa), + .rx_mcs_80p80 = cpu_to_le16(0xffff), + .tx_mcs_80p80 = cpu_to_le16(0xffff), + }, + /* + * Set default PPE thresholds, with PPET16 set to 0, + * PPET8 set to 7 + */ + .ppe_thres = {0x61, 0x1c, 0xc7, 0x71}, }, - /* - * Set default Tx/Rx HE MCS NSS Support field. Indicate support - * for up to 2 spatial streams and all MCS, without any special - * cases - */ - .he_mcs_nss_supp = { - .rx_mcs_80 = cpu_to_le16(0xfffa), - .tx_mcs_80 = cpu_to_le16(0xfffa), - .rx_mcs_160 = cpu_to_le16(0xfffa), - .tx_mcs_160 = cpu_to_le16(0xfffa), - .rx_mcs_80p80 = cpu_to_le16(0xffff), - .tx_mcs_80p80 = cpu_to_le16(0xffff), + }, + { + .types_mask = BIT(NL80211_IFTYPE_AP), + .he_cap = { + .has_he = true, + .he_cap_elem = { + .mac_cap_info[0] = + IEEE80211_HE_MAC_CAP0_HTC_HE | + IEEE80211_HE_MAC_CAP0_TWT_RES, + .mac_cap_info[1] = + IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | + IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, + .mac_cap_info[2] = + IEEE80211_HE_MAC_CAP2_BSR | + IEEE80211_HE_MAC_CAP2_MU_CASCADING | + IEEE80211_HE_MAC_CAP2_ACK_EN, + .mac_cap_info[3] = + IEEE80211_HE_MAC_CAP3_OMI_CONTROL | + IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_VHT_2, + .mac_cap_info[4] = + IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU, + .phy_cap_info[0] = + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G, + .phy_cap_info[1] = + IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | + IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS, + .phy_cap_info[2] = + IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | + IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | + IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ, + .phy_cap_info[3] = + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK | + IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 | + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK | + IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1, + .phy_cap_info[4] = + IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE | + IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 | + IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8, + .phy_cap_info[5] = + IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 | + IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2 | + IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK | + IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK, + .phy_cap_info[6] = + IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU | + IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU | + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, + .phy_cap_info[7] = + IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI | + IEEE80211_HE_PHY_CAP7_MAX_NC_1, + .phy_cap_info[8] = + IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI | + IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G | + IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU | + IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU | + IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_160_OR_80P80_MHZ, + .phy_cap_info[9] = + IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB | + IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB, + }, + /* + * Set default Tx/Rx HE MCS NSS Support field. + * Indicate support for up to 2 spatial streams and all + * MCS, without any special cases + */ + .he_mcs_nss_supp = { + .rx_mcs_80 = cpu_to_le16(0xfffa), + .tx_mcs_80 = cpu_to_le16(0xfffa), + .rx_mcs_160 = cpu_to_le16(0xfffa), + .tx_mcs_160 = cpu_to_le16(0xfffa), + .rx_mcs_80p80 = cpu_to_le16(0xffff), + .tx_mcs_80p80 = cpu_to_le16(0xffff), + }, + /* + * Set default PPE thresholds, with PPET16 set to 0, + * PPET8 set to 7 + */ + .ppe_thres = {0x61, 0x1c, 0xc7, 0x71}, }, - /* - * Set default PPE thresholds, with PPET16 set to 0, PPET8 set - * to 7 - */ - .ppe_thres = {0x61, 0x1c, 0xc7, 0x71}, }, }; @@ -568,20 +652,24 @@ static void iwl_init_he_hw_capab(struct ieee80211_supported_band *sband, { if (sband->band == NL80211_BAND_2GHZ || sband->band == NL80211_BAND_5GHZ) - sband->iftype_data = &iwl_he_capa; + sband->iftype_data = iwl_he_capa; else return; - sband->n_iftype_data = 1; + sband->n_iftype_data = ARRAY_SIZE(iwl_he_capa); /* If not 2x2, we need to indicate 1x1 in the Midamble RX Max NSTS */ if ((tx_chains & rx_chains) != ANT_AB) { - iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[1] &= - ~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS; - iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[2] &= - ~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS; - iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[7] &= - ~IEEE80211_HE_PHY_CAP7_MAX_NC_MASK; + int i; + + for (i = 0; i < sband->n_iftype_data; i++) { + iwl_he_capa[i].he_cap.he_cap_elem.phy_cap_info[1] &= + ~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS; + iwl_he_capa[i].he_cap.he_cap_elem.phy_cap_info[2] &= + ~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_TX_MAX_NSTS; + iwl_he_capa[i].he_cap.he_cap_elem.phy_cap_info[7] &= + ~IEEE80211_HE_PHY_CAP7_MAX_NC_MASK; + } } } diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h index 0f51c7bea8d0..9d89b7d7f9fa 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-prph.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-prph.h @@ -8,6 +8,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,6 +31,7 @@ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH * Copyright(c) 2016 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -360,6 +362,12 @@ #define MON_BUFF_END_ADDR (0xa03c40) #define MON_BUFF_WRPTR (0xa03c44) #define MON_BUFF_CYCLE_CNT (0xa03c48) +/* FW monitor family 8000 and on */ +#define MON_BUFF_BASE_ADDR_VER2 (0xa03c3c) +#define MON_BUFF_END_ADDR_VER2 (0xa03c20) +#define MON_BUFF_WRPTR_VER2 (0xa03c24) +#define MON_BUFF_CYCLE_CNT_VER2 (0xa03c28) +#define MON_BUFF_SHIFT_VER2 (0x8) #define MON_DMARB_RD_CTL_ADDR (0xa03c60) #define MON_DMARB_RD_DATA_ADDR (0xa03c5c) @@ -394,6 +402,7 @@ enum aux_misc_master1_en { #define AUX_MISC_MASTER1_SMPHR_STATUS 0xA20800 #define RSA_ENABLE 0xA24B08 #define PREG_AUX_BUS_WPROT_0 0xA04CC0 +#define PREG_PRPH_WPROT_0 0xA04CE0 #define SB_CPU_1_STATUS 0xA01E30 #define SB_CPU_2_STATUS 0xA01E34 #define UMAG_SB_CPU_1_STATUS 0xA038C0 @@ -420,4 +429,8 @@ enum { #define UREG_CHICK (0xA05C00) #define UREG_CHICK_MSI_ENABLE BIT(24) #define UREG_CHICK_MSIX_ENABLE BIT(25) + +#define HPM_DEBUG 0xA03440 +#define PERSISTENCE_BIT BIT(12) +#define PREG_WFPM_ACCESS BIT(12) #endif /* __iwl_prph_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 26b3c73051ca..a7009cd4232d 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -73,6 +73,8 @@ #include "iwl-op-mode.h" #include "fw/api/cmdhdr.h" #include "fw/api/txq.h" +#include "fw/api/dbg-tlv.h" +#include "iwl-dbg-tlv.h" /** * DOC: Transport layer - what is it ? @@ -534,6 +536,8 @@ struct iwl_trans_rxq_dma_data { * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last * TX'ed commands and similar. The buffer will be vfree'd by the caller. * Note that the transport must fill in the proper file headers. + * @debugfs_cleanup: used in the driver unload flow to make a proper cleanup + * of the trans debugfs */ struct iwl_trans_ops { @@ -602,8 +606,8 @@ struct iwl_trans_ops { void (*resume)(struct iwl_trans *trans); struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans, - const struct iwl_fw_dbg_trigger_tlv - *trigger); + u32 dump_mask); + void (*debugfs_cleanup)(struct iwl_trans *trans); }; /** @@ -679,7 +683,6 @@ enum iwl_plat_pm_mode { * enter/exit (in msecs). */ #define IWL_TRANS_IDLE_TIMEOUT 2000 -#define IWL_MAX_DEBUG_ALLOCATIONS 1 /** * struct iwl_dram_data @@ -734,6 +737,7 @@ struct iwl_dram_data { * @runtime_pm_mode: the runtime power management mode in use. This * mode is set during the initialization phase and is not * supposed to change during runtime. + * @dbg_rec_on: true iff there is a fw debug recording currently active */ struct iwl_trans { const struct iwl_trans_ops *ops; @@ -774,17 +778,23 @@ struct iwl_trans { struct lockdep_map sync_cmd_lockdep_map; #endif + struct iwl_apply_point_data apply_points[IWL_FW_INI_APPLY_NUM]; + struct iwl_apply_point_data apply_points_ext[IWL_FW_INI_APPLY_NUM]; + + bool external_ini_loaded; + bool ini_valid; + const struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv; const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX]; struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv; - u32 dbg_dump_mask; u8 dbg_n_dest_reg; int num_blocks; - struct iwl_dram_data fw_mon[IWL_MAX_DEBUG_ALLOCATIONS]; + struct iwl_dram_data fw_mon[IWL_FW_INI_APPLY_NUM]; enum iwl_plat_pm_mode system_pm_mode; enum iwl_plat_pm_mode runtime_pm_mode; bool suspending; + bool dbg_rec_on; /* pointer to trans specific struct */ /*Ensure that this pointer will always be aligned to sizeof pointer */ @@ -897,12 +907,11 @@ static inline void iwl_trans_resume(struct iwl_trans *trans) } static inline struct iwl_trans_dump_data * -iwl_trans_dump_data(struct iwl_trans *trans, - const struct iwl_fw_dbg_trigger_tlv *trigger) +iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask) { if (!trans->ops->dump_data) return NULL; - return trans->ops->dump_data(trans, trigger); + return trans->ops->dump_data(trans, dump_mask); } static inline struct iwl_device_cmd * diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 843f3b41b72e..01b5338201d6 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -1811,8 +1811,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm, n_matches = 0; } - net_detect = kzalloc(sizeof(*net_detect) + - (n_matches * sizeof(net_detect->matches[0])), + net_detect = kzalloc(struct_size(net_detect, matches, n_matches), GFP_KERNEL); if (!net_detect || !n_matches) goto out_report_nd; @@ -1827,8 +1826,7 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm, for (j = 0; j < SCAN_OFFLOAD_MATCHING_CHANNELS_LEN; j++) n_channels += hweight8(fw_match->matching_channels[j]); - match = kzalloc(sizeof(*match) + - (n_channels * sizeof(*match->channels)), + match = kzalloc(struct_size(match, channels, n_channels), GFP_KERNEL); if (!match) goto out_report_nd; @@ -1956,7 +1954,7 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test) set_bit(STATUS_FW_ERROR, &mvm->trans->status); iwl_mvm_dump_nic_error_log(mvm); iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert, - NULL, 0); + false, 0); ret = 1; goto err; } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c index 1aa6c7e93088..33b0af24a537 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs-vif.c @@ -1299,10 +1299,11 @@ static ssize_t iwl_dbgfs_low_latency_read(struct file *file, int len; len = scnprintf(buf, sizeof(buf) - 1, - "traffic=%d\ndbgfs=%d\nvcmd=%d\n", + "traffic=%d\ndbgfs=%d\nvcmd=%d\nvif_type=%d\n", !!(mvmvif->low_latency & LOW_LATENCY_TRAFFIC), !!(mvmvif->low_latency & LOW_LATENCY_DEBUGFS), - !!(mvmvif->low_latency & LOW_LATENCY_VCMD)); + !!(mvmvif->low_latency & LOW_LATENCY_VCMD), + !!(mvmvif->low_latency & LOW_LATENCY_VIF_TYPE)); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } @@ -1440,15 +1441,6 @@ static ssize_t iwl_dbgfs_quota_min_read(struct file *file, return simple_read_from_buffer(user_buf, count, ppos, buf, len); } -static const char * const chanwidths[] = { - [NL80211_CHAN_WIDTH_20_NOHT] = "noht", - [NL80211_CHAN_WIDTH_20] = "ht20", - [NL80211_CHAN_WIDTH_40] = "ht40", - [NL80211_CHAN_WIDTH_80] = "vht80", - [NL80211_CHAN_WIDTH_80P80] = "vht80p80", - [NL80211_CHAN_WIDTH_160] = "vht160", -}; - #define MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz) \ _MVM_DEBUGFS_WRITE_FILE_OPS(name, bufsz, struct ieee80211_vif) #define MVM_DEBUGFS_READ_WRITE_FILE_OPS(name, bufsz) \ diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 3b6b3d8fb961..52c361a6124c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -1284,7 +1284,7 @@ static ssize_t iwl_dbgfs_fw_dbg_collect_write(struct iwl_mvm *mvm, return 0; iwl_fw_dbg_collect(&mvm->fwrt, FW_DBG_TRIGGER_USER, buf, - (count - 1), NULL); + (count - 1)); iwl_mvm_unref(mvm, IWL_MVM_REF_PRPH_WRITE); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 1689bead1b4f..0d6c313b6669 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -377,6 +377,9 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, atomic_set(&mvm->mac80211_queue_stop_count[i], 0); set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); +#ifdef CONFIG_IWLWIFI_DEBUGFS + iwl_fw_set_dbg_rec_on(&mvm->fwrt); +#endif clear_bit(IWL_FWRT_STATUS_WAIT_ALIVE, &mvm->fwrt.status); return 0; @@ -407,6 +410,7 @@ static int iwl_run_unified_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR); if (ret) { IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); + iwl_fw_assert_error_dump(&mvm->fwrt); goto error; } @@ -543,7 +547,9 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm) if (mvm->nvm_file_name) iwl_mvm_load_nvm_to_nic(mvm); - WARN_ON(iwl_nvm_check_version(mvm->nvm_data, mvm->trans)); + WARN_ONCE(mvm->nvm_data->nvm_version < mvm->trans->cfg->nvm_ver, + "Too old NVM version (0x%0x, required = 0x%0x)", + mvm->nvm_data->nvm_version, mvm->trans->cfg->nvm_ver); /* * abort after reading the nvm in case RF Kill is on, we will complete @@ -1023,10 +1029,14 @@ static int iwl_mvm_load_rt_fw(struct iwl_mvm *mvm) if (ret) return ret; + iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_EARLY); + ret = iwl_mvm_load_ucode_wait_alive(mvm, IWL_UCODE_REGULAR); if (ret) return ret; + iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_AFTER_ALIVE); + return iwl_init_paging(&mvm->fwrt, mvm->fwrt.cur_fw_img); } @@ -1045,6 +1055,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm) ret = iwl_mvm_load_rt_fw(mvm); if (ret) { IWL_ERR(mvm, "Failed to start RT ucode: %d\n", ret); + iwl_fw_assert_error_dump(&mvm->fwrt); goto error; } @@ -1054,11 +1065,13 @@ int iwl_mvm_up(struct iwl_mvm *mvm) if (ret) IWL_ERR(mvm, "Failed to initialize Smart Fifo\n"); - mvm->fwrt.dump.conf = FW_DBG_INVALID; - /* if we have a destination, assume EARLY START */ - if (mvm->fw->dbg.dest_tlv) - mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE; - iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE); + if (!mvm->trans->ini_valid) { + mvm->fwrt.dump.conf = FW_DBG_INVALID; + /* if we have a destination, assume EARLY START */ + if (mvm->fw->dbg.dest_tlv) + mvm->fwrt.dump.conf = FW_DBG_START_FROM_ALIVE; + iwl_fw_start_dbg_conf(&mvm->fwrt, FW_DBG_START_FROM_ALIVE); + } ret = iwl_send_tx_ant_cfg(mvm, iwl_mvm_get_valid_tx_ant(mvm)); if (ret) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 6486cfb33f40..7779951a9533 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -767,13 +767,8 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, } ctxt_sta->bi = cpu_to_le32(vif->bss_conf.beacon_int); - ctxt_sta->bi_reciprocal = - cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int)); ctxt_sta->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int * vif->bss_conf.dtim_period); - ctxt_sta->dtim_reciprocal = - cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int * - vif->bss_conf.dtim_period)); ctxt_sta->listen_interval = cpu_to_le32(mvm->hw->conf.listen_interval); ctxt_sta->assoc_id = cpu_to_le32(vif->bss_conf.aid); @@ -782,8 +777,30 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); if (vif->bss_conf.assoc && vif->bss_conf.he_support && - !iwlwifi_mod_params.disable_11ax) + !iwlwifi_mod_params.disable_11ax) { + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + u8 sta_id = mvmvif->ap_sta_id; + cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX); + if (sta_id != IWL_MVM_INVALID_STA) { + struct ieee80211_sta *sta; + + sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], + lockdep_is_held(&mvm->mutex)); + + /* + * TODO: we should check the ext cap IE but it is + * unclear why the spec requires two bits (one in HE + * cap IE, and one in the ext cap IE). In the meantime + * rely on the HE cap IE only. + */ + if (sta && (sta->he_cap.he_cap_elem.mac_cap_info[0] & + IEEE80211_HE_MAC_CAP0_TWT_RES)) + ctxt_sta->data_policy |= + cpu_to_le32(TWT_SUPPORTED); + } + } + return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); } @@ -832,8 +849,6 @@ static int iwl_mvm_mac_ctxt_cmd_ibss(struct iwl_mvm *mvm, /* cmd.ibss.beacon_time/cmd.ibss.beacon_tsf are curently ignored */ cmd.ibss.bi = cpu_to_le32(vif->bss_conf.beacon_int); - cmd.ibss.bi_reciprocal = - cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int)); /* TODO: Assumes that the beacon id == mac context id */ cmd.ibss.beacon_template = cpu_to_le32(mvmvif->id); @@ -965,11 +980,8 @@ static void iwl_mvm_mac_ctxt_set_tx(struct iwl_mvm *mvm, tx->tx_flags = cpu_to_le32(tx_flags); if (!fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION)) { - mvm->mgmt_last_antenna_idx = - iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm), - mvm->mgmt_last_antenna_idx); - } + IWL_UCODE_TLV_CAPA_BEACON_ANT_SELECTION)) + iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx); tx->rate_n_flags = cpu_to_le32(BIT(mvm->mgmt_last_antenna_idx) << @@ -1182,14 +1194,12 @@ static void iwl_mvm_mac_ctxt_cmd_fill_ap(struct iwl_mvm *mvm, IWL_DEBUG_HC(mvm, "No need to receive beacons\n"); } + if (vif->bss_conf.he_support && !iwlwifi_mod_params.disable_11ax) + cmd->filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX); + ctxt_ap->bi = cpu_to_le32(vif->bss_conf.beacon_int); - ctxt_ap->bi_reciprocal = - cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int)); ctxt_ap->dtim_interval = cpu_to_le32(vif->bss_conf.beacon_int * vif->bss_conf.dtim_period); - ctxt_ap->dtim_reciprocal = - cpu_to_le32(iwl_mvm_reciprocal(vif->bss_conf.beacon_int * - vif->bss_conf.dtim_period)); if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) @@ -1522,6 +1532,8 @@ void iwl_mvm_rx_missed_beacons_notif(struct iwl_mvm *mvm, IEEE80211_IFACE_ITER_NORMAL, iwl_mvm_beacon_loss_iterator, mb); + + iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_MISSED_BEACONS); } void iwl_mvm_rx_stored_beacon_notif(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index 00f831d88366..97dc464379d2 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -423,6 +423,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR); ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP); + ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW); if (iwl_mvm_has_tlc_offload(mvm)) { ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW); @@ -813,6 +814,21 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) sta = NULL; + /* If there is no sta, and it's not offchannel - send through AP */ + if (info->control.vif->type == NL80211_IFTYPE_STATION && + info->hw_queue != IWL_MVM_OFFCHANNEL_QUEUE && !sta) { + struct iwl_mvm_vif *mvmvif = + iwl_mvm_vif_from_mac80211(info->control.vif); + u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id); + + if (ap_sta_id < IWL_MVM_STATION_COUNT) { + /* mac80211 holds rcu read lock */ + sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]); + if (IS_ERR_OR_NULL(sta)) + goto drop; + } + } + if (sta) { if (iwl_mvm_defer_tx(mvm, sta, skb)) return; @@ -1113,6 +1129,8 @@ int __iwl_mvm_mac_start(struct iwl_mvm *mvm) } ret = iwl_mvm_up(mvm); + iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_POST_INIT); + if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { /* Something went wrong - we need to finish some cleanup * that normally iwl_mvm_mac_restart_complete() below @@ -2005,7 +2023,13 @@ static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, if (sta->he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR) sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BQR_SUPP); - /* If PPE Thresholds exist, parse them into a FW-familiar format */ + /* + * Initialize the PPE thresholds to "None" (7), as described in Table + * 9-262ac of 80211.ax/D3.0. + */ + memset(&sta_ctxt_cmd.pkt_ext, 7, sizeof(sta_ctxt_cmd.pkt_ext)); + + /* If PPE Thresholds exist, parse them into a FW-familiar format. */ if (sta->he_cap.he_cap_elem.phy_cap_info[6] & IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) { u8 nss = (sta->he_cap.ppe_thres[0] & @@ -2383,6 +2407,12 @@ static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, /* must be set before quota calculations */ mvmvif->ap_ibss_active = true; + if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) { + iwl_mvm_vif_set_low_latency(mvmvif, true, + LOW_LATENCY_VIF_TYPE); + iwl_mvm_send_low_latency_cmd(mvm, true, mvmvif->id); + } + /* power updated needs to be done before quotas */ iwl_mvm_power_update_mac(mvm); @@ -2445,6 +2475,12 @@ static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, mvmvif->ap_ibss_active = false; mvm->ap_last_beacon_gp2 = 0; + if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) { + iwl_mvm_vif_set_low_latency(mvmvif, false, + LOW_LATENCY_VIF_TYPE); + iwl_mvm_send_low_latency_cmd(mvm, false, mvmvif->id); + } + iwl_mvm_bt_coex_vif_change(mvm); iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS); @@ -2945,6 +2981,9 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, if (vif->type == NL80211_IFTYPE_AP) { mvmvif->ap_assoc_sta_count++; iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); + if (vif->bss_conf.he_support && + !iwlwifi_mod_params.disable_11ax) + iwl_mvm_cfg_he_sta(mvm, vif, mvm_sta->sta_id); } iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, @@ -3355,7 +3394,7 @@ static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait, resp = (void *)pkt->data; IWL_DEBUG_TE(mvm, - "Aux ROC: Recieved response from ucode: status=%d uid=%d\n", + "Aux ROC: Received response from ucode: status=%d uid=%d\n", resp->status, resp->event_unique_id); te_data->uid = le32_to_cpu(resp->event_unique_id); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 7ba5bc2ed1c4..1aa690e081ff 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -303,11 +303,13 @@ enum iwl_bt_force_ant_mode { * @LOW_LATENCY_TRAFFIC: indicates low latency traffic was detected * @LOW_LATENCY_DEBUGFS: low latency mode set from debugfs * @LOW_LATENCY_VCMD: low latency mode set from vendor command +* @LOW_LATENCY_VIF_TYPE: low latency mode set because of vif type (ap) */ enum iwl_mvm_low_latency_cause { LOW_LATENCY_TRAFFIC = BIT(0), LOW_LATENCY_DEBUGFS = BIT(1), LOW_LATENCY_VCMD = BIT(2), + LOW_LATENCY_VIF_TYPE = BIT(3), }; /** @@ -844,7 +846,6 @@ struct iwl_mvm { u16 hw_queue_to_mac80211[IWL_MAX_TVQM_QUEUES]; struct iwl_mvm_dqa_txq_info queue_info[IWL_MAX_HW_QUEUES]; - spinlock_t queue_info_lock; /* For syncing queue mgmt operations */ struct work_struct add_stream_wk; /* To add streams to queues */ atomic_t mac80211_queue_stop_count[IEEE80211_MAX_QUEUES]; @@ -1521,6 +1522,11 @@ static inline u8 iwl_mvm_get_valid_rx_ant(struct iwl_mvm *mvm) mvm->fw->valid_rx_ant; } +static inline void iwl_mvm_toggle_tx_ant(struct iwl_mvm *mvm, u8 *ant) +{ + *ant = iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm), *ant); +} + static inline u32 iwl_mvm_get_phy_config(struct iwl_mvm *mvm) { u32 phy_config = ~(FW_PHY_CFG_TX_CHAIN | @@ -1550,6 +1556,8 @@ void iwl_mvm_rx_rx_mpdu(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb); void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); +void iwl_mvm_rx_monitor_ndp(struct iwl_mvm *mvm, struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb, int queue); void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue); int iwl_mvm_notify_rx_queue(struct iwl_mvm *mvm, u32 rxq_mask, @@ -1846,6 +1854,8 @@ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, /* get SystemLowLatencyMode - only needed for beacon threshold? */ bool iwl_mvm_low_latency(struct iwl_mvm *mvm); bool iwl_mvm_low_latency_band(struct iwl_mvm *mvm, enum nl80211_band band); +void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm, bool low_latency, + u16 mac_id); /* get VMACLowLatencyMode */ static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index af3fba10abc1..30c5127034a0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -676,7 +676,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, INIT_LIST_HEAD(&mvm->aux_roc_te_list); INIT_LIST_HEAD(&mvm->async_handlers_list); spin_lock_init(&mvm->time_event_lock); - spin_lock_init(&mvm->queue_info_lock); INIT_WORK(&mvm->async_handlers_wk, iwl_mvm_async_handlers_wk); INIT_WORK(&mvm->roc_done_wk, iwl_mvm_roc_done_wk); @@ -770,7 +769,6 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, memcpy(trans->dbg_conf_tlv, mvm->fw->dbg.conf_tlv, sizeof(trans->dbg_conf_tlv)); trans->dbg_trigger_tlv = mvm->fw->dbg.trigger_tlv; - trans->dbg_dump_mask = mvm->fw->dbg.dump_mask; trans->iml = mvm->fw->iml; trans->iml_len = mvm->fw->iml_len; @@ -846,6 +844,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, iwl_mvm_tof_init(mvm); + iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx); + return op_mode; out_unregister: @@ -1073,6 +1073,8 @@ static void iwl_mvm_rx_mq(struct iwl_op_mode *op_mode, iwl_mvm_rx_queue_notif(mvm, rxb, 0); else if (cmd == WIDE_ID(LEGACY_GROUP, FRAME_RELEASE)) iwl_mvm_rx_frame_release(mvm, napi, rxb, 0); + else if (cmd == WIDE_ID(DATA_PATH_GROUP, RX_NO_DATA_NOTIF)) + iwl_mvm_rx_monitor_ndp(mvm, napi, rxb, 0); else iwl_mvm_rx_common(mvm, rxb, pkt); } @@ -1110,11 +1112,7 @@ static void iwl_mvm_async_cb(struct iwl_op_mode *op_mode, static void iwl_mvm_stop_sw_queue(struct iwl_op_mode *op_mode, int hw_queue) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - unsigned long mq; - - spin_lock_bh(&mvm->queue_info_lock); - mq = mvm->hw_queue_to_mac80211[hw_queue]; - spin_unlock_bh(&mvm->queue_info_lock); + unsigned long mq = mvm->hw_queue_to_mac80211[hw_queue]; iwl_mvm_stop_mac_queues(mvm, mq); } @@ -1140,11 +1138,7 @@ void iwl_mvm_start_mac_queues(struct iwl_mvm *mvm, unsigned long mq) static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue) { struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); - unsigned long mq; - - spin_lock_bh(&mvm->queue_info_lock); - mq = mvm->hw_queue_to_mac80211[hw_queue]; - spin_unlock_bh(&mvm->queue_info_lock); + unsigned long mq = mvm->hw_queue_to_mac80211[hw_queue]; iwl_mvm_start_mac_queues(mvm, mq); } @@ -1242,7 +1236,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error) */ if (!mvm->fw_restart && fw_error) { iwl_fw_dbg_collect_desc(&mvm->fwrt, &iwl_dump_desc_assert, - NULL, 0); + false, 0); } else if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { struct iwl_mvm_reprobe *reprobe; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c index 7a98e1a1dc40..dabbc04853ac 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c @@ -98,8 +98,12 @@ static u8 rs_fw_sgi_cw_support(struct ieee80211_sta *sta) { struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; + struct ieee80211_sta_he_cap *he_cap = &sta->he_cap; u8 supp = 0; + if (he_cap && he_cap->has_he) + return 0; + if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20) supp |= BIT(IWL_TLC_MNG_CH_WIDTH_20MHZ); if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c index ef624833cf1b..6653a238f32e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rx.c @@ -593,31 +593,28 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac, int hyst = vif->bss_conf.cqm_rssi_hyst; u16 id = le32_to_cpu(data->mac_id); struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + u16 vif_id = mvmvif->id; /* This doesn't need the MAC ID check since it's not taking the * data copied into the "data" struct, but rather the data from * the notification directly. */ - if (data->general) { - u16 vif_id = mvmvif->id; - - if (iwl_mvm_is_cdb_supported(mvm)) { - struct mvm_statistics_general_cdb *general = - data->general; - - mvmvif->beacon_stats.num_beacons = - le32_to_cpu(general->beacon_counter[vif_id]); - mvmvif->beacon_stats.avg_signal = - -general->beacon_average_energy[vif_id]; - } else { - struct mvm_statistics_general_v8 *general = - data->general; - - mvmvif->beacon_stats.num_beacons = - le32_to_cpu(general->beacon_counter[vif_id]); - mvmvif->beacon_stats.avg_signal = - -general->beacon_average_energy[vif_id]; - } + if (iwl_mvm_is_cdb_supported(mvm)) { + struct mvm_statistics_general_cdb *general = + data->general; + + mvmvif->beacon_stats.num_beacons = + le32_to_cpu(general->beacon_counter[vif_id]); + mvmvif->beacon_stats.avg_signal = + -general->beacon_average_energy[vif_id]; + } else { + struct mvm_statistics_general_v8 *general = + data->general; + + mvmvif->beacon_stats.num_beacons = + le32_to_cpu(general->beacon_counter[vif_id]); + mvmvif->beacon_stats.avg_signal = + -general->beacon_average_energy[vif_id]; } if (mvmvif->id != id) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 26ac9402568d..7bd8676508f5 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -200,7 +200,8 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, { struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); - if (iwl_mvm_check_pn(mvm, skb, queue, sta)) { + if (!(rx_status->flag & RX_FLAG_NO_PSDU) && + iwl_mvm_check_pn(mvm, skb, queue, sta)) { kfree_skb(skb); } else { unsigned int radiotap_len = 0; @@ -863,68 +864,66 @@ static void iwl_mvm_flip_address(u8 *addr) ether_addr_copy(addr, mac_addr); } -static void iwl_mvm_decode_he_sigb(struct iwl_mvm *mvm, - struct iwl_rx_mpdu_desc *desc, - u32 rate_n_flags, - struct ieee80211_radiotap_he_mu *he_mu) -{ - u32 sigb0, sigb1; - u16 sigb2; - - if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { - sigb0 = le32_to_cpu(desc->v3.sigb_common0); - sigb1 = le32_to_cpu(desc->v3.sigb_common1); - } else { - sigb0 = le32_to_cpu(desc->v1.sigb_common0); - sigb1 = le32_to_cpu(desc->v1.sigb_common1); - } +struct iwl_mvm_rx_phy_data { + enum iwl_rx_phy_info_type info_type; + __le32 d0, d1, d2, d3; + __le16 d4; +}; - sigb2 = le16_to_cpu(desc->sigb_common2); +static void iwl_mvm_decode_he_mu_ext(struct iwl_mvm *mvm, + struct iwl_mvm_rx_phy_data *phy_data, + u32 rate_n_flags, + struct ieee80211_radiotap_he_mu *he_mu) +{ + u32 phy_data2 = le32_to_cpu(phy_data->d2); + u32 phy_data3 = le32_to_cpu(phy_data->d3); + u16 phy_data4 = le16_to_cpu(phy_data->d4); - if (FIELD_GET(IWL_RX_HE_SIGB_COMMON2_CH1_CRC_OK, sigb2)) { + if (FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CRC_OK, phy_data4)) { he_mu->flags1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN | IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU_KNOWN); he_mu->flags1 |= - le16_encode_bits(FIELD_GET(IWL_RX_HE_SIGB_COMMON2_CH1_CTR_RU, - sigb2), + le16_encode_bits(FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH1_CTR_RU, + phy_data4), IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU); - he_mu->ru_ch1[0] = FIELD_GET(IWL_RX_HE_SIGB_COMMON0_CH1_RU0, - sigb0); - he_mu->ru_ch1[1] = FIELD_GET(IWL_RX_HE_SIGB_COMMON1_CH1_RU1, - sigb1); - he_mu->ru_ch1[2] = FIELD_GET(IWL_RX_HE_SIGB_COMMON0_CH1_RU2, - sigb0); - he_mu->ru_ch1[3] = FIELD_GET(IWL_RX_HE_SIGB_COMMON1_CH1_RU3, - sigb1); + he_mu->ru_ch1[0] = FIELD_GET(IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU0, + phy_data2); + he_mu->ru_ch1[1] = FIELD_GET(IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU1, + phy_data3); + he_mu->ru_ch1[2] = FIELD_GET(IWL_RX_PHY_DATA2_HE_MU_EXT_CH1_RU2, + phy_data2); + he_mu->ru_ch1[3] = FIELD_GET(IWL_RX_PHY_DATA3_HE_MU_EXT_CH1_RU3, + phy_data3); } - if (FIELD_GET(IWL_RX_HE_SIGB_COMMON2_CH2_CRC_OK, sigb2) && + if (FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CRC_OK, phy_data4) && (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) != RATE_MCS_CHAN_WIDTH_20) { he_mu->flags1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_RU_KNOWN | IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_CTR_26T_RU_KNOWN); he_mu->flags2 |= - le16_encode_bits(FIELD_GET(IWL_RX_HE_SIGB_COMMON2_CH2_CTR_RU, - sigb2), + le16_encode_bits(FIELD_GET(IWL_RX_PHY_DATA4_HE_MU_EXT_CH2_CTR_RU, + phy_data4), IEEE80211_RADIOTAP_HE_MU_FLAGS2_CH2_CTR_26T_RU); - he_mu->ru_ch2[0] = FIELD_GET(IWL_RX_HE_SIGB_COMMON0_CH2_RU0, - sigb0); - he_mu->ru_ch2[1] = FIELD_GET(IWL_RX_HE_SIGB_COMMON1_CH2_RU1, - sigb1); - he_mu->ru_ch2[2] = FIELD_GET(IWL_RX_HE_SIGB_COMMON0_CH2_RU2, - sigb0); - he_mu->ru_ch2[3] = FIELD_GET(IWL_RX_HE_SIGB_COMMON1_CH2_RU3, - sigb1); + he_mu->ru_ch2[0] = FIELD_GET(IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU0, + phy_data2); + he_mu->ru_ch2[1] = FIELD_GET(IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU1, + phy_data3); + he_mu->ru_ch2[2] = FIELD_GET(IWL_RX_PHY_DATA2_HE_MU_EXT_CH2_RU2, + phy_data2); + he_mu->ru_ch2[3] = FIELD_GET(IWL_RX_PHY_DATA3_HE_MU_EXT_CH2_RU3, + phy_data3); } } static void -iwl_mvm_decode_he_phy_ru_alloc(u64 he_phy_data, u32 rate_n_flags, +iwl_mvm_decode_he_phy_ru_alloc(struct iwl_mvm_rx_phy_data *phy_data, + u32 rate_n_flags, struct ieee80211_radiotap_he *he, struct ieee80211_radiotap_he_mu *he_mu, struct ieee80211_rx_status *rx_status) @@ -937,7 +936,7 @@ iwl_mvm_decode_he_phy_ru_alloc(u64 he_phy_data, u32 rate_n_flags, * happen though as management frames where we need * the TSF/timers are not be transmitted in HE-MU. */ - u8 ru = FIELD_GET(IWL_RX_HE_PHY_RU_ALLOC_MASK, he_phy_data); + u8 ru = le32_get_bits(phy_data->d1, IWL_RX_PHY_DATA1_HE_RU_ALLOC_MASK); u8 offs = 0; rx_status->bw = RATE_INFO_BW_HE_RU; @@ -976,7 +975,7 @@ iwl_mvm_decode_he_phy_ru_alloc(u64 he_phy_data, u32 rate_n_flags, IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET); he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN | IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN); - if (he_phy_data & IWL_RX_HE_PHY_RU_ALLOC_SEC80) + if (phy_data->d1 & cpu_to_le32(IWL_RX_PHY_DATA1_HE_RU_ALLOC_SEC80)) he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC); @@ -996,106 +995,122 @@ iwl_mvm_decode_he_phy_ru_alloc(u64 he_phy_data, u32 rate_n_flags, } static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm, - struct iwl_rx_mpdu_desc *desc, + struct iwl_mvm_rx_phy_data *phy_data, struct ieee80211_radiotap_he *he, struct ieee80211_radiotap_he_mu *he_mu, struct ieee80211_rx_status *rx_status, - u64 he_phy_data, u32 rate_n_flags, - int queue) + u32 rate_n_flags, int queue) { - u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; - bool sigb_data; - u16 d1known = IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN | - IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN | - IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN | - IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN | - IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN; - u16 d2known = IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN | - IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN | - IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN; - - he->data1 |= cpu_to_le16(d1known); - he->data2 |= cpu_to_le16(d2known); - he->data3 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_BSS_COLOR_MASK, - he_phy_data), - IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR); - he->data3 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_UPLINK, - he_phy_data), - IEEE80211_RADIOTAP_HE_DATA3_UL_DL); - he->data3 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_LDPC_EXT_SYM, - he_phy_data), - IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG); - he->data4 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SPATIAL_REUSE_MASK, - he_phy_data), - IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE); - he->data5 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_PRE_FEC_PAD_MASK, - he_phy_data), - IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD); - he->data5 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_PE_DISAMBIG, - he_phy_data), - IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG); - he->data6 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_TXOP_DUR_MASK, - he_phy_data), - IEEE80211_RADIOTAP_HE_DATA6_TXOP); - he->data6 |= le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_DOPPLER, - he_phy_data), - IEEE80211_RADIOTAP_HE_DATA6_DOPPLER); - - switch (he_type) { - case RATE_MCS_HE_TYPE_MU: + switch (phy_data->info_type) { + case IWL_RX_PHY_INFO_TYPE_NONE: + case IWL_RX_PHY_INFO_TYPE_CCK: + case IWL_RX_PHY_INFO_TYPE_OFDM_LGCY: + case IWL_RX_PHY_INFO_TYPE_HT: + case IWL_RX_PHY_INFO_TYPE_VHT_SU: + case IWL_RX_PHY_INFO_TYPE_VHT_MU: + return; + case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT: + he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE2_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE3_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE4_KNOWN); + he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d0, + IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE1), + IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE1); + he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d0, + IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE2), + IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE2); + he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d0, + IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE3), + IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE3); + he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d0, + IWL_RX_PHY_DATA2_HE_TB_EXT_SPTL_REUSE4), + IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE4); + /* fall through */ + case IWL_RX_PHY_INFO_TYPE_HE_SU: + case IWL_RX_PHY_INFO_TYPE_HE_MU: + case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT: + case IWL_RX_PHY_INFO_TYPE_HE_TB: + /* HE common */ + he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN); + he->data2 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN | + IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN | + IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN | + IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN); + he->data3 |= le16_encode_bits(le32_get_bits(phy_data->d0, + IWL_RX_PHY_DATA0_HE_BSS_COLOR_MASK), + IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR); + if (phy_data->info_type != IWL_RX_PHY_INFO_TYPE_HE_TB && + phy_data->info_type != IWL_RX_PHY_INFO_TYPE_HE_TB_EXT) { + he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN); + he->data3 |= le16_encode_bits(le32_get_bits(phy_data->d0, + IWL_RX_PHY_DATA0_HE_UPLINK), + IEEE80211_RADIOTAP_HE_DATA3_UL_DL); + } + he->data3 |= le16_encode_bits(le32_get_bits(phy_data->d0, + IWL_RX_PHY_DATA0_HE_LDPC_EXT_SYM), + IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG); + he->data4 |= le16_encode_bits(le32_get_bits(phy_data->d0, + IWL_RX_PHY_DATA0_HE_SPATIAL_REUSE_MASK), + IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE); + he->data5 |= le16_encode_bits(le32_get_bits(phy_data->d0, + IWL_RX_PHY_DATA0_HE_PRE_FEC_PAD_MASK), + IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD); + he->data5 |= le16_encode_bits(le32_get_bits(phy_data->d0, + IWL_RX_PHY_DATA0_HE_PE_DISAMBIG), + IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG); + he->data5 |= le16_encode_bits(le32_get_bits(phy_data->d1, + IWL_RX_PHY_DATA1_HE_LTF_NUM_MASK), + IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS); + he->data6 |= le16_encode_bits(le32_get_bits(phy_data->d0, + IWL_RX_PHY_DATA0_HE_TXOP_DUR_MASK), + IEEE80211_RADIOTAP_HE_DATA6_TXOP); + he->data6 |= le16_encode_bits(le32_get_bits(phy_data->d0, + IWL_RX_PHY_DATA0_HE_DOPPLER), + IEEE80211_RADIOTAP_HE_DATA6_DOPPLER); + break; + } + + switch (phy_data->info_type) { + case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT: he_mu->flags1 |= - le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_DCM, - he_phy_data), + le16_encode_bits(le16_get_bits(phy_data->d4, + IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_DCM), IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM); he_mu->flags1 |= - le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_MCS_MASK, - he_phy_data), + le16_encode_bits(le16_get_bits(phy_data->d4, + IWL_RX_PHY_DATA4_HE_MU_EXT_SIGB_MCS_MASK), IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS); he_mu->flags2 |= - le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIBG_SYM_OR_USER_NUM_MASK, - he_phy_data), - IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS); + le16_encode_bits(le16_get_bits(phy_data->d4, + IWL_RX_PHY_DATA4_HE_MU_EXT_PREAMBLE_PUNC_TYPE_MASK), + IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW); + iwl_mvm_decode_he_mu_ext(mvm, phy_data, rate_n_flags, he_mu); + /* fall through */ + case IWL_RX_PHY_INFO_TYPE_HE_MU: he_mu->flags2 |= - le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_SIGB_COMPRESSION, - he_phy_data), - IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP); + le16_encode_bits(le32_get_bits(phy_data->d1, + IWL_RX_PHY_DATA1_HE_MU_SIBG_SYM_OR_USER_NUM_MASK), + IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS); he_mu->flags2 |= - le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_MU_PREAMBLE_PUNC_TYPE_MASK, - he_phy_data), - IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW); - - sigb_data = FIELD_GET(IWL_RX_HE_PHY_INFO_TYPE_MASK, - he_phy_data) == - IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO; - if (sigb_data) - iwl_mvm_decode_he_sigb(mvm, desc, rate_n_flags, he_mu); + le16_encode_bits(le32_get_bits(phy_data->d1, + IWL_RX_PHY_DATA1_HE_MU_SIGB_COMPRESSION), + IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP); /* fall through */ - case RATE_MCS_HE_TYPE_TRIG: - he->data2 |= - cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN); - he->data5 |= - le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_HE_LTF_NUM_MASK, - he_phy_data), - IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS); - break; - case RATE_MCS_HE_TYPE_SU: - case RATE_MCS_HE_TYPE_EXT_SU: - he->data1 |= - cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN); - he->data3 |= - le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_BEAM_CHNG, - he_phy_data), - IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE); - break; - } - - switch (FIELD_GET(IWL_RX_HE_PHY_INFO_TYPE_MASK, he_phy_data)) { - case IWL_RX_HE_PHY_INFO_TYPE_MU: - case IWL_RX_HE_PHY_INFO_TYPE_MU_EXT_INFO: - case IWL_RX_HE_PHY_INFO_TYPE_TB: - iwl_mvm_decode_he_phy_ru_alloc(he_phy_data, rate_n_flags, + case IWL_RX_PHY_INFO_TYPE_HE_TB: + case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT: + iwl_mvm_decode_he_phy_ru_alloc(phy_data, rate_n_flags, he, he_mu, rx_status); break; + case IWL_RX_PHY_INFO_TYPE_HE_SU: + he->data1 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN); + he->data3 |= le16_encode_bits(le32_get_bits(phy_data->d0, + IWL_RX_PHY_DATA0_HE_BEAM_CHNG), + IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE); + break; default: /* nothing */ break; @@ -1103,13 +1118,10 @@ static void iwl_mvm_decode_he_phy_data(struct iwl_mvm *mvm, } static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb, - struct iwl_rx_mpdu_desc *desc, + struct iwl_mvm_rx_phy_data *phy_data, u32 rate_n_flags, u16 phy_info, int queue) { struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); - /* this is invalid e.g. because puncture type doesn't allow 0b11 */ -#define HE_PHY_DATA_INVAL ((u64)-1) - u64 he_phy_data = HE_PHY_DATA_INVAL; struct ieee80211_radiotap_he *he = NULL; struct ieee80211_radiotap_he_mu *he_mu = NULL; u32 he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; @@ -1136,49 +1148,41 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb, radiotap_len += sizeof(known); rx_status->flag |= RX_FLAG_RADIOTAP_HE; - if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) { - if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) - he_phy_data = le64_to_cpu(desc->v3.he_phy_data); - else - he_phy_data = le64_to_cpu(desc->v1.he_phy_data); - - if (he_type == RATE_MCS_HE_TYPE_MU) { - he_mu = skb_put_data(skb, &mu_known, sizeof(mu_known)); - radiotap_len += sizeof(mu_known); - rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU; - } + if (phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_MU || + phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_MU_EXT) { + he_mu = skb_put_data(skb, &mu_known, sizeof(mu_known)); + radiotap_len += sizeof(mu_known); + rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU; } /* temporarily hide the radiotap data */ __skb_pull(skb, radiotap_len); - if (he_phy_data != HE_PHY_DATA_INVAL && - he_type == RATE_MCS_HE_TYPE_SU) { + if (phy_data->info_type == IWL_RX_PHY_INFO_TYPE_HE_SU) { /* report the AMPDU-EOF bit on single frames */ if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) { rx_status->flag |= RX_FLAG_AMPDU_DETAILS; rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; - if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF, he_phy_data)) + if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_HE_DELIM_EOF)) rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; } } - if (he_phy_data != HE_PHY_DATA_INVAL) - iwl_mvm_decode_he_phy_data(mvm, desc, he, he_mu, rx_status, - he_phy_data, rate_n_flags, queue); + if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) + iwl_mvm_decode_he_phy_data(mvm, phy_data, he, he_mu, rx_status, + rate_n_flags, queue); /* update aggregation data for monitor sake on default queue */ - if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) { + if (!queue && (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) && + (phy_info & IWL_RX_MPDU_PHY_AMPDU)) { bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE; /* toggle is switched whenever new aggregation starts */ if (toggle_bit != mvm->ampdu_toggle && - he_phy_data != HE_PHY_DATA_INVAL && (he_type == RATE_MCS_HE_TYPE_MU || he_type == RATE_MCS_HE_TYPE_SU)) { rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; - if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF, - he_phy_data)) + if (phy_data->d0 & cpu_to_le32(IWL_RX_PHY_DATA0_HE_DELIM_EOF)) rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; } } @@ -1261,43 +1265,34 @@ static void iwl_mvm_rx_he(struct iwl_mvm *mvm, struct sk_buff *skb, break; } - he->data5 |= le16_encode_bits(ltf, IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE); - - if (he_type == RATE_MCS_HE_TYPE_SU || - he_type == RATE_MCS_HE_TYPE_EXT_SU) { - u16 val; - - /* LTF syms correspond to streams */ - he->data2 |= - cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN); - switch (rx_status->nss) { - case 1: - val = 0; - break; - case 2: - val = 1; - break; - case 3: - case 4: - val = 2; - break; - case 5: - case 6: - val = 3; - break; - case 7: - case 8: - val = 4; - break; - default: - WARN_ONCE(1, "invalid nss: %d\n", - rx_status->nss); - val = 0; - } + he->data5 |= le16_encode_bits(ltf, + IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE); +} - he->data5 |= - le16_encode_bits(val, - IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS); +static void iwl_mvm_decode_lsig(struct sk_buff *skb, + struct iwl_mvm_rx_phy_data *phy_data) +{ + struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); + struct ieee80211_radiotap_lsig *lsig; + + switch (phy_data->info_type) { + case IWL_RX_PHY_INFO_TYPE_HT: + case IWL_RX_PHY_INFO_TYPE_VHT_SU: + case IWL_RX_PHY_INFO_TYPE_VHT_MU: + case IWL_RX_PHY_INFO_TYPE_HE_TB_EXT: + case IWL_RX_PHY_INFO_TYPE_HE_SU: + case IWL_RX_PHY_INFO_TYPE_HE_MU: + case IWL_RX_PHY_INFO_TYPE_HE_MU_EXT: + case IWL_RX_PHY_INFO_TYPE_HE_TB: + lsig = skb_put(skb, sizeof(*lsig)); + lsig->data1 = cpu_to_le16(IEEE80211_RADIOTAP_LSIG_DATA1_LENGTH_KNOWN); + lsig->data2 = le16_encode_bits(le32_get_bits(phy_data->d1, + IWL_RX_PHY_DATA1_LSIG_LEN_MASK), + IEEE80211_RADIOTAP_LSIG_DATA2_LENGTH); + rx_status->flag |= RX_FLAG_RADIOTAP_LSIG; + break; + default: + break; } } @@ -1315,6 +1310,10 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, struct sk_buff *skb; u8 crypt_len = 0, channel, energy_a, energy_b; size_t desc_size; + struct iwl_mvm_rx_phy_data phy_data = { + .d4 = desc->phy_data4, + .info_type = IWL_RX_PHY_INFO_TYPE_NONE, + }; if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))) return; @@ -1326,6 +1325,11 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, energy_a = desc->v3.energy_a; energy_b = desc->v3.energy_b; desc_size = sizeof(*desc); + + phy_data.d0 = desc->v3.phy_data0; + phy_data.d1 = desc->v3.phy_data1; + phy_data.d2 = desc->v3.phy_data2; + phy_data.d3 = desc->v3.phy_data3; } else { rate_n_flags = le32_to_cpu(desc->v1.rate_n_flags); channel = desc->v1.channel; @@ -1333,8 +1337,18 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, energy_a = desc->v1.energy_a; energy_b = desc->v1.energy_b; desc_size = IWL_RX_DESC_SIZE_V1; + + phy_data.d0 = desc->v1.phy_data0; + phy_data.d1 = desc->v1.phy_data1; + phy_data.d2 = desc->v1.phy_data2; + phy_data.d3 = desc->v1.phy_data3; } + if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) + phy_data.info_type = + le32_get_bits(phy_data.d1, + IWL_RX_PHY_DATA1_INFO_TYPE_MASK); + hdr = (void *)(pkt->data + desc_size); /* Dont use dev_alloc_skb(), we'll have enough headroom once * ieee80211_hdr pulled. @@ -1373,7 +1387,10 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, } if (rate_n_flags & RATE_MCS_HE_MSK) - iwl_mvm_rx_he(mvm, skb, desc, rate_n_flags, phy_info, queue); + iwl_mvm_rx_he(mvm, skb, &phy_data, rate_n_flags, + phy_info, queue); + + iwl_mvm_decode_lsig(skb, &phy_data); rx_status = IEEE80211_SKB_RXCB(skb); @@ -1422,12 +1439,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, /* update aggregation data for monitor sake on default queue */ if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) { bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE; - u64 he_phy_data; - - if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) - he_phy_data = le64_to_cpu(desc->v3.he_phy_data); - else - he_phy_data = le64_to_cpu(desc->v1.he_phy_data); rx_status->flag |= RX_FLAG_AMPDU_DETAILS; rx_status->ampdu_reference = mvm->ampdu_ref; @@ -1596,6 +1607,129 @@ out: rcu_read_unlock(); } +void iwl_mvm_rx_monitor_ndp(struct iwl_mvm *mvm, struct napi_struct *napi, + struct iwl_rx_cmd_buffer *rxb, int queue) +{ + struct ieee80211_rx_status *rx_status; + struct iwl_rx_packet *pkt = rxb_addr(rxb); + struct iwl_rx_no_data *desc = (void *)pkt->data; + u32 rate_n_flags = le32_to_cpu(desc->rate); + u32 gp2_on_air_rise = le32_to_cpu(desc->on_air_rise_time); + u32 rssi = le32_to_cpu(desc->rssi); + u32 info_type = le32_to_cpu(desc->info) & RX_NO_DATA_INFO_TYPE_MSK; + u16 phy_info = IWL_RX_MPDU_PHY_TSF_OVERLOAD; + struct ieee80211_sta *sta = NULL; + struct sk_buff *skb; + u8 channel, energy_a, energy_b; + struct iwl_mvm_rx_phy_data phy_data = { + .d0 = desc->phy_info[0], + .info_type = IWL_RX_PHY_INFO_TYPE_NONE, + }; + + if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))) + return; + + /* Currently only NDP type is supported */ + if (info_type != RX_NO_DATA_INFO_TYPE_NDP) + return; + + energy_a = (rssi & RX_NO_DATA_CHAIN_A_MSK) >> RX_NO_DATA_CHAIN_A_POS; + energy_b = (rssi & RX_NO_DATA_CHAIN_B_MSK) >> RX_NO_DATA_CHAIN_B_POS; + channel = (rssi & RX_NO_DATA_CHANNEL_MSK) >> RX_NO_DATA_CHANNEL_POS; + + phy_data.info_type = + le32_get_bits(desc->phy_info[1], + IWL_RX_PHY_DATA1_INFO_TYPE_MASK); + + /* Dont use dev_alloc_skb(), we'll have enough headroom once + * ieee80211_hdr pulled. + */ + skb = alloc_skb(128, GFP_ATOMIC); + if (!skb) { + IWL_ERR(mvm, "alloc_skb failed\n"); + return; + } + + rx_status = IEEE80211_SKB_RXCB(skb); + + /* 0-length PSDU */ + rx_status->flag |= RX_FLAG_NO_PSDU; + /* currently this is the only type for which we get this notif */ + rx_status->zero_length_psdu_type = + IEEE80211_RADIOTAP_ZERO_LEN_PSDU_SOUNDING; + + /* This may be overridden by iwl_mvm_rx_he() to HE_RU */ + switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { + case RATE_MCS_CHAN_WIDTH_20: + break; + case RATE_MCS_CHAN_WIDTH_40: + rx_status->bw = RATE_INFO_BW_40; + break; + case RATE_MCS_CHAN_WIDTH_80: + rx_status->bw = RATE_INFO_BW_80; + break; + case RATE_MCS_CHAN_WIDTH_160: + rx_status->bw = RATE_INFO_BW_160; + break; + } + + if (rate_n_flags & RATE_MCS_HE_MSK) + iwl_mvm_rx_he(mvm, skb, &phy_data, rate_n_flags, + phy_info, queue); + + iwl_mvm_decode_lsig(skb, &phy_data); + + rx_status->device_timestamp = gp2_on_air_rise; + rx_status->band = channel > 14 ? NL80211_BAND_5GHZ : + NL80211_BAND_2GHZ; + rx_status->freq = ieee80211_channel_to_frequency(channel, + rx_status->band); + iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, energy_a, + energy_b); + + rcu_read_lock(); + + if (!(rate_n_flags & RATE_MCS_CCK_MSK) && + rate_n_flags & RATE_MCS_SGI_MSK) + rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; + if (rate_n_flags & RATE_HT_MCS_GF_MSK) + rx_status->enc_flags |= RX_ENC_FLAG_HT_GF; + if (rate_n_flags & RATE_MCS_LDPC_MSK) + rx_status->enc_flags |= RX_ENC_FLAG_LDPC; + if (rate_n_flags & RATE_MCS_HT_MSK) { + u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> + RATE_MCS_STBC_POS; + rx_status->encoding = RX_ENC_HT; + rx_status->rate_idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK; + rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; + } else if (rate_n_flags & RATE_MCS_VHT_MSK) { + u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> + RATE_MCS_STBC_POS; + rx_status->nss = + ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> + RATE_VHT_MCS_NSS_POS) + 1; + rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; + rx_status->encoding = RX_ENC_VHT; + rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; + if (rate_n_flags & RATE_MCS_BF_MSK) + rx_status->enc_flags |= RX_ENC_FLAG_BF; + } else if (!(rate_n_flags & RATE_MCS_HE_MSK)) { + int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, + rx_status->band); + + if (WARN(rate < 0 || rate > 0xFF, + "Invalid rate flags 0x%x, band %d,\n", + rate_n_flags, rx_status->band)) { + kfree_skb(skb); + goto out; + } + rx_status->rate_idx = rate; + } + + iwl_mvm_pass_packet_to_mac80211(mvm, napi, skb, queue, sta); +out: + rcu_read_unlock(); +} void iwl_mvm_rx_frame_release(struct iwl_mvm *mvm, struct napi_struct *napi, struct iwl_rx_cmd_buffer *rxb, int queue) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c index cfb784fea77b..86d598d5b68f 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c @@ -205,9 +205,7 @@ iwl_mvm_scan_rate_n_flags(struct iwl_mvm *mvm, enum nl80211_band band, { u32 tx_ant; - mvm->scan_last_antenna_idx = - iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm), - mvm->scan_last_antenna_idx); + iwl_mvm_toggle_tx_ant(mvm, &mvm->scan_last_antenna_idx); tx_ant = BIT(mvm->scan_last_antenna_idx) << RATE_MCS_ANT_POS; if (band == NL80211_BAND_2GHZ && !no_cck) @@ -1895,6 +1893,8 @@ void iwl_mvm_rx_umac_scan_complete_notif(struct iwl_mvm *mvm, mvm->last_ebs_successful = false; mvm->scan_uid_status[uid] = 0; + + iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_SCAN_COMPLETE); } void iwl_mvm_rx_umac_scan_iter_complete_notif(struct iwl_mvm *mvm, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 1887d2b9f185..e28009832da0 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -319,9 +319,7 @@ static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue, if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; - spin_lock_bh(&mvm->queue_info_lock); sta_id = mvm->queue_info[queue].ra_sta_id; - spin_unlock_bh(&mvm->queue_info_lock); rcu_read_lock(); @@ -372,25 +370,17 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, return -EINVAL; if (iwl_mvm_has_new_tx_api(mvm)) { - spin_lock_bh(&mvm->queue_info_lock); - if (remove_mac_queue) mvm->hw_queue_to_mac80211[queue] &= ~BIT(mac80211_queue); - spin_unlock_bh(&mvm->queue_info_lock); - iwl_trans_txq_free(mvm->trans, queue); return 0; } - spin_lock_bh(&mvm->queue_info_lock); - - if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0)) { - spin_unlock_bh(&mvm->queue_info_lock); + if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0)) return 0; - } mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); @@ -426,10 +416,8 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, mvm->hw_queue_to_mac80211[queue]); /* If the queue is still enabled - nothing left to do in this func */ - if (cmd.action == SCD_CFG_ENABLE_QUEUE) { - spin_unlock_bh(&mvm->queue_info_lock); + if (cmd.action == SCD_CFG_ENABLE_QUEUE) return 0; - } cmd.sta_id = mvm->queue_info[queue].ra_sta_id; cmd.tid = mvm->queue_info[queue].txq_tid; @@ -448,8 +436,6 @@ static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, int queue, /* Regardless if this is a reserved TXQ for a STA - mark it as false */ mvm->queue_info[queue].reserved = false; - spin_unlock_bh(&mvm->queue_info_lock); - iwl_trans_txq_disable(mvm->trans, queue, false); ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags, sizeof(struct iwl_scd_txq_cfg_cmd), &cmd); @@ -474,10 +460,8 @@ static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue) if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; - spin_lock_bh(&mvm->queue_info_lock); sta_id = mvm->queue_info[queue].ra_sta_id; tid_bitmap = mvm->queue_info[queue].tid_bitmap; - spin_unlock_bh(&mvm->queue_info_lock); sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], lockdep_is_held(&mvm->mutex)); @@ -516,10 +500,8 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; - spin_lock_bh(&mvm->queue_info_lock); sta_id = mvm->queue_info[queue].ra_sta_id; tid_bitmap = mvm->queue_info[queue].tid_bitmap; - spin_unlock_bh(&mvm->queue_info_lock); rcu_read_lock(); @@ -545,6 +527,16 @@ static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) rcu_read_unlock(); + /* + * The TX path may have been using this TXQ_ID from the tid_data, + * so make sure it's no longer running so that we can safely reuse + * this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE + * above, but nothing guarantees we've stopped using them. Thus, + * without this, we could get to iwl_mvm_disable_txq() and remove + * the queue while still sending frames to it. + */ + synchronize_net(); + return disable_agg_tids; } @@ -562,11 +554,9 @@ static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; - spin_lock_bh(&mvm->queue_info_lock); txq_curr_ac = mvm->queue_info[queue].mac80211_ac; sta_id = mvm->queue_info[queue].ra_sta_id; tid = mvm->queue_info[queue].txq_tid; - spin_unlock_bh(&mvm->queue_info_lock); same_sta = sta_id == new_sta_id; @@ -610,7 +600,6 @@ static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm, * by the inactivity checker. */ lockdep_assert_held(&mvm->mutex); - lockdep_assert_held(&mvm->queue_info_lock); if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; @@ -696,10 +685,7 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, * value 3 and VO with value 0, so to check if ac X is lower than ac Y * we need to check if the numerical value of X is LARGER than of Y. */ - spin_lock_bh(&mvm->queue_info_lock); if (ac <= mvm->queue_info[queue].mac80211_ac && !force) { - spin_unlock_bh(&mvm->queue_info_lock); - IWL_DEBUG_TX_QUEUES(mvm, "No redirection needed on TXQ #%d\n", queue); @@ -711,7 +697,6 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, cmd.tid = mvm->queue_info[queue].txq_tid; mq = mvm->hw_queue_to_mac80211[queue]; shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1; - spin_unlock_bh(&mvm->queue_info_lock); IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n", queue, iwl_mvm_ac_to_tx_fifo[ac]); @@ -737,9 +722,7 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout); /* Update the TID "owner" of the queue */ - spin_lock_bh(&mvm->queue_info_lock); mvm->queue_info[queue].txq_tid = tid; - spin_unlock_bh(&mvm->queue_info_lock); /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */ @@ -748,9 +731,7 @@ static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn); /* Update AC marking of the queue */ - spin_lock_bh(&mvm->queue_info_lock); mvm->queue_info[queue].mac80211_ac = ac; - spin_unlock_bh(&mvm->queue_info_lock); /* * Mark queue as shared in transport if shared @@ -773,7 +754,7 @@ static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, { int i; - lockdep_assert_held(&mvm->queue_info_lock); + lockdep_assert_held(&mvm->mutex); /* This should not be hit with new TX path */ if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) @@ -853,11 +834,8 @@ static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue, { bool enable_queue = true; - spin_lock_bh(&mvm->queue_info_lock); - /* Make sure this TID isn't already enabled */ if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) { - spin_unlock_bh(&mvm->queue_info_lock); IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n", queue, tid); return false; @@ -893,8 +871,6 @@ static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, int queue, queue, mvm->queue_info[queue].tid_bitmap, mvm->hw_queue_to_mac80211[queue]); - spin_unlock_bh(&mvm->queue_info_lock); - return enable_queue; } @@ -949,9 +925,7 @@ static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue) if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return; - spin_lock_bh(&mvm->queue_info_lock); tid_bitmap = mvm->queue_info[queue].tid_bitmap; - spin_unlock_bh(&mvm->queue_info_lock); if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue)) return; @@ -968,9 +942,7 @@ static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue) return; } - spin_lock_bh(&mvm->queue_info_lock); mvm->queue_info[queue].txq_tid = tid; - spin_unlock_bh(&mvm->queue_info_lock); IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n", queue, tid); } @@ -992,10 +964,8 @@ static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue) lockdep_assert_held(&mvm->mutex); - spin_lock_bh(&mvm->queue_info_lock); sta_id = mvm->queue_info[queue].ra_sta_id; tid_bitmap = mvm->queue_info[queue].tid_bitmap; - spin_unlock_bh(&mvm->queue_info_lock); /* Find TID for queue, and make sure it is the only one on the queue */ tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1); @@ -1052,9 +1022,7 @@ static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue) } } - spin_lock_bh(&mvm->queue_info_lock); mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; - spin_unlock_bh(&mvm->queue_info_lock); } /* @@ -1073,7 +1041,7 @@ static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, int tid; lockdep_assert_held(&mvmsta->lock); - lockdep_assert_held(&mvm->queue_info_lock); + lockdep_assert_held(&mvm->mutex); if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return false; @@ -1174,8 +1142,6 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta) if (iwl_mvm_has_new_tx_api(mvm)) return -ENOSPC; - spin_lock_bh(&mvm->queue_info_lock); - rcu_read_lock(); /* we skip the CMD queue below by starting at 1 */ @@ -1230,12 +1196,7 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta) mvmsta = iwl_mvm_sta_from_mac80211(sta); - /* this isn't so nice, but works OK due to the way we loop */ - spin_unlock(&mvm->queue_info_lock); - - /* and we need this locking order */ - spin_lock(&mvmsta->lock); - spin_lock(&mvm->queue_info_lock); + spin_lock_bh(&mvmsta->lock); ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i, inactive_tid_bitmap, &unshare_queues, @@ -1243,11 +1204,10 @@ static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta) if (ret >= 0 && free_queue < 0) free_queue = ret; /* only unlock sta lock - we still need the queue info lock */ - spin_unlock(&mvmsta->lock); + spin_unlock_bh(&mvmsta->lock); } rcu_read_unlock(); - spin_unlock_bh(&mvm->queue_info_lock); /* Reconfigure queues requiring reconfiguation */ for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES) @@ -1294,10 +1254,9 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, spin_lock_bh(&mvmsta->lock); tfd_queue_mask = mvmsta->tfd_queue_msk; + ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number); spin_unlock_bh(&mvmsta->lock); - spin_lock_bh(&mvm->queue_info_lock); - /* * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one * exists @@ -1327,12 +1286,8 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, IWL_MVM_DQA_MIN_DATA_QUEUE, IWL_MVM_DQA_MAX_DATA_QUEUE); if (queue < 0) { - spin_unlock_bh(&mvm->queue_info_lock); - /* try harder - perhaps kill an inactive queue */ queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id); - - spin_lock_bh(&mvm->queue_info_lock); } /* No free queue - we'll have to share */ @@ -1353,8 +1308,6 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, if (queue > 0 && !shared_queue) mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; - spin_unlock_bh(&mvm->queue_info_lock); - /* This shouldn't happen - out of queues */ if (WARN_ON(queue <= 0)) { IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n", @@ -1388,13 +1341,8 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, } } - ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); inc_ssn = iwl_mvm_enable_txq(mvm, queue, mac_queue, ssn, &cfg, wdg_timeout); - if (inc_ssn) { - ssn = (ssn + 1) & IEEE80211_SCTL_SEQ; - le16_add_cpu(&hdr->seq_ctrl, 0x10); - } /* * Mark queue as shared in transport if shared @@ -1411,8 +1359,10 @@ static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, * this ra/tid in our Tx path since we stop the Qdisc when we * need to allocate a new TFD queue. */ - if (inc_ssn) + if (inc_ssn) { mvmsta->tid_data[tid].seq_number += 0x10; + ssn = (ssn + 1) & IEEE80211_SCTL_SEQ; + } mvmsta->tid_data[tid].txq_id = queue; mvmsta->tfd_queue_msk |= BIT(queue); queue_state = mvmsta->tid_data[tid].state; @@ -1556,8 +1506,6 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, /* run the general cleanup/unsharing of queues */ iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA); - spin_lock_bh(&mvm->queue_info_lock); - /* Make sure we have free resources for this STA */ if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls && !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap && @@ -1569,19 +1517,15 @@ static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, IWL_MVM_DQA_MIN_DATA_QUEUE, IWL_MVM_DQA_MAX_DATA_QUEUE); if (queue < 0) { - spin_unlock_bh(&mvm->queue_info_lock); /* try again - this time kick out a queue if needed */ queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id); if (queue < 0) { IWL_ERR(mvm, "No available queues for new station\n"); return -ENOSPC; } - spin_lock_bh(&mvm->queue_info_lock); } mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; - spin_unlock_bh(&mvm->queue_info_lock); - mvmsta->reserved_queue = queue; IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n", @@ -1822,6 +1766,8 @@ int iwl_mvm_add_sta(struct iwl_mvm *mvm, if (iwl_mvm_has_tlc_offload(mvm)) iwl_mvm_rs_add_sta(mvm, mvm_sta); + iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant); + update_fw: ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags); if (ret) @@ -2004,18 +1950,14 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm, * is still marked as IWL_MVM_QUEUE_RESERVED, and * should be manually marked as free again */ - spin_lock_bh(&mvm->queue_info_lock); status = &mvm->queue_info[reserved_txq].status; if (WARN((*status != IWL_MVM_QUEUE_RESERVED) && (*status != IWL_MVM_QUEUE_FREE), "sta_id %d reserved txq %d status %d", - sta_id, reserved_txq, *status)) { - spin_unlock_bh(&mvm->queue_info_lock); + sta_id, reserved_txq, *status)) return -EINVAL; - } *status = IWL_MVM_QUEUE_FREE; - spin_unlock_bh(&mvm->queue_info_lock); } if (vif->type == NL80211_IFTYPE_STATION && @@ -2873,8 +2815,6 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, return -EIO; } - spin_lock(&mvm->queue_info_lock); - /* * Note the possible cases: * 1. An enabled TXQ - TXQ needs to become agg'ed @@ -2889,7 +2829,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (txq_id < 0) { ret = txq_id; IWL_ERR(mvm, "Failed to allocate agg queue\n"); - goto release_locks; + goto out; } /* TXQ hasn't yet been enabled, so mark it only as reserved */ @@ -2900,11 +2840,9 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, IWL_DEBUG_TX_QUEUES(mvm, "Can't start tid %d agg on shared queue!\n", tid); - goto release_locks; + goto out; } - spin_unlock(&mvm->queue_info_lock); - IWL_DEBUG_TX_QUEUES(mvm, "AGG for tid %d will be on queue #%d\n", tid, txq_id); @@ -2935,10 +2873,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, } ret = 0; - goto out; -release_locks: - spin_unlock(&mvm->queue_info_lock); out: spin_unlock_bh(&mvmsta->lock); @@ -3007,9 +2942,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; - spin_lock_bh(&mvm->queue_info_lock); queue_status = mvm->queue_info[queue].status; - spin_unlock_bh(&mvm->queue_info_lock); /* Maybe there is no need to even alloc a queue... */ if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY) @@ -3055,9 +2988,7 @@ int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, } /* No need to mark as reserved */ - spin_lock_bh(&mvm->queue_info_lock); mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; - spin_unlock_bh(&mvm->queue_info_lock); out: /* @@ -3083,10 +3014,11 @@ static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm, { u16 txq_id = tid_data->txq_id; + lockdep_assert_held(&mvm->mutex); + if (iwl_mvm_has_new_tx_api(mvm)) return; - spin_lock_bh(&mvm->queue_info_lock); /* * The TXQ is marked as reserved only if no traffic came through yet * This means no traffic has been sent on this TID (agg'd or not), so @@ -3098,8 +3030,6 @@ static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm, mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; tid_data->txq_id = IWL_MVM_INVALID_QUEUE; } - - spin_unlock_bh(&mvm->queue_info_lock); } int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index de1a0a2d8723..d52cd888f77d 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -397,6 +397,9 @@ struct iwl_mvm_rxq_dup_data { * @ptk_pn: per-queue PTK PN data structures * @dup_data: per queue duplicate packet detection data * @deferred_traffic_tid_map: indication bitmap of deferred traffic per-TID + * @tx_ant: the index of the antenna to use for data tx to this station. Only + * used during connection establishment (e.g. for the 4 way handshake + * exchange). * * When mac80211 creates a station it reserves some space (hw->sta_data_size) * in the structure for use by driver. This structure is placed in that @@ -439,6 +442,7 @@ struct iwl_mvm_sta { u8 agg_tids; u8 sleep_tx_count; u8 avg_energy; + u8 tx_ant; }; u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index ec57682efe54..995fe2a6abbb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -302,13 +302,30 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, offload_assist)); } +static u32 iwl_mvm_get_tx_ant(struct iwl_mvm *mvm, + struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, __le16 fc) +{ + if (info->band == NL80211_BAND_2GHZ && + !iwl_mvm_bt_coex_is_shared_ant_avail(mvm)) + return mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS; + + if (sta && ieee80211_is_data(fc)) { + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + + return BIT(mvmsta->tx_ant) << RATE_MCS_ANT_POS; + } + + return BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; +} + static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm, struct ieee80211_tx_info *info, struct ieee80211_sta *sta) { int rate_idx; u8 rate_plcp; - u32 rate_flags; + u32 rate_flags = 0; /* HT rate doesn't make sense for a non data frame */ WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS, @@ -332,13 +349,6 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm, /* Get PLCP rate for tx_cmd->rate_n_flags */ rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(rate_idx); - if (info->band == NL80211_BAND_2GHZ && - !iwl_mvm_bt_coex_is_shared_ant_avail(mvm)) - rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS; - else - rate_flags = - BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; - /* Set CCK flag as needed */ if ((rate_idx >= IWL_FIRST_CCK_RATE) && (rate_idx <= IWL_LAST_CCK_RATE)) rate_flags |= RATE_MCS_CCK_MSK; @@ -346,6 +356,14 @@ static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm, return (u32)rate_plcp | rate_flags; } +static u32 iwl_mvm_get_tx_rate_n_flags(struct iwl_mvm *mvm, + struct ieee80211_tx_info *info, + struct ieee80211_sta *sta, __le16 fc) +{ + return iwl_mvm_get_tx_rate(mvm, info, sta) | + iwl_mvm_get_tx_ant(mvm, info, sta, fc); +} + /* * Sets the fields in the Tx cmd that are rate related */ @@ -373,20 +391,21 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd, */ if (ieee80211_is_data(fc) && sta) { - tx_cmd->initial_rate_index = 0; - tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); - return; + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); + + if (mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED) { + tx_cmd->initial_rate_index = 0; + tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); + return; + } } else if (ieee80211_is_back_req(fc)) { tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR); } - mvm->mgmt_last_antenna_idx = - iwl_mvm_next_antenna(mvm, iwl_mvm_get_valid_tx_ant(mvm), - mvm->mgmt_last_antenna_idx); - /* Set the rate in the TX cmd */ - tx_cmd->rate_n_flags = cpu_to_le32(iwl_mvm_get_tx_rate(mvm, info, sta)); + tx_cmd->rate_n_flags = + cpu_to_le32(iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, fc)); } static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info, @@ -491,6 +510,8 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, u16 offload_assist = 0; u32 rate_n_flags = 0; u16 flags = 0; + struct iwl_mvm_sta *mvmsta = sta ? + iwl_mvm_sta_from_mac80211(sta) : NULL; if (ieee80211_is_data_qos(hdr->frame_control)) { u8 *qc = ieee80211_get_qos_ctl(hdr); @@ -510,10 +531,16 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, if (!info->control.hw_key) flags |= IWL_TX_FLAGS_ENCRYPT_DIS; - /* For data packets rate info comes from the fw */ - if (!(ieee80211_is_data(hdr->frame_control) && sta)) { + /* + * For data packets rate info comes from the fw. Only + * set rate/antenna during connection establishment. + */ + if (sta && (!ieee80211_is_data(hdr->frame_control) || + mvmsta->sta_state < IEEE80211_STA_AUTHORIZED)) { flags |= IWL_TX_FLAGS_CMD_RATE; - rate_n_flags = iwl_mvm_get_tx_rate(mvm, info, sta); + rate_n_flags = + iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, + hdr->frame_control); } if (mvm->trans->cfg->device_family >= @@ -681,22 +708,12 @@ out: int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; - struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_info info; struct iwl_device_cmd *dev_cmd; u8 sta_id; int hdrlen = ieee80211_hdrlen(hdr->frame_control); __le16 fc = hdr->frame_control; - int queue; - - /* IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets that can be used - * in 2 different types of vifs, P2P & STATION. P2P uses the offchannel - * queue. STATION (HS2.0) uses the auxiliary context of the FW, - * and hence needs to be sent on the aux queue - */ - if (skb_info->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE && - skb_info->control.vif->type == NL80211_IFTYPE_STATION) - skb_info->hw_queue = mvm->aux_queue; + int queue = -1; memcpy(&info, skb->cb, sizeof(info)); @@ -708,18 +725,6 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) info.hw_queue != info.control.vif->cab_queue))) return -1; - queue = info.hw_queue; - - /* - * If the interface on which the frame is sent is the P2P_DEVICE - * or an AP/GO interface use the broadcast station associated - * with it; otherwise if the interface is a managed interface - * use the AP station associated with it for multicast traffic - * (this is not possible for unicast packets as a TLDS discovery - * response are sent without a station entry); otherwise use the - * AUX station. - */ - sta_id = mvm->aux_sta.sta_id; if (info.control.vif) { struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(info.control.vif); @@ -734,20 +739,28 @@ int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb) queue = iwl_mvm_get_ctrl_vif_queue(mvm, &info, hdr->frame_control); - if (queue < 0) - return -1; - } else if (info.control.vif->type == NL80211_IFTYPE_STATION && - is_multicast_ether_addr(hdr->addr1)) { - u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id); - if (ap_sta_id != IWL_MVM_INVALID_STA) - sta_id = ap_sta_id; } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) { queue = mvm->snif_queue; sta_id = mvm->snif_sta.sta_id; + } else if (info.control.vif->type == NL80211_IFTYPE_STATION && + info.hw_queue == IWL_MVM_OFFCHANNEL_QUEUE) { + /* + * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets + * that can be used in 2 different types of vifs, P2P & + * STATION. + * P2P uses the offchannel queue. + * STATION (HS2.0) uses the auxiliary context of the FW, + * and hence needs to be sent on the aux queue. + */ + sta_id = mvm->aux_sta.sta_id; + queue = mvm->aux_queue; } } + if (queue < 0) + return -1; + if (unlikely(ieee80211_is_probe_resp(fc))) iwl_mvm_probe_resp_set_noa(mvm, skb); @@ -1160,11 +1173,11 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb, * If we have timed-out TIDs - schedule the worker that will * reconfig the queues and update them * - * Note that the mvm->queue_info_lock isn't being taken here in - * order to not serialize the TX flow. This isn't dangerous - * because scheduling mvm->add_stream_wk can't ruin the state, - * and if we DON'T schedule it due to some race condition then - * next TX we get here we will. + * Note that the no lock is taken here in order to not serialize + * the TX flow. This isn't dangerous because scheduling + * mvm->add_stream_wk can't ruin the state, and if we DON'T + * schedule it due to some race condition then next TX we get + * here we will. */ if (unlikely(mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_SHARED && @@ -1451,7 +1464,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, iwl_mvm_get_agg_status(mvm, tx_resp); u32 status = le16_to_cpu(agg_status->status); u16 ssn = iwl_mvm_get_scd_ssn(mvm, tx_resp); - struct iwl_mvm_sta *mvmsta; struct sk_buff_head skbs; u8 skb_freed = 0; u8 lq_color; @@ -1501,6 +1513,10 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, break; } + if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS && + ieee80211_is_mgmt(hdr->frame_control)) + iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx); + /* * If we are freeing multiple frames, mark all the frames * but the first one as acked, since they were acknowledged @@ -1595,11 +1611,15 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, goto out; if (!IS_ERR(sta)) { - mvmsta = iwl_mvm_sta_from_mac80211(sta); + struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); iwl_mvm_tx_airtime(mvm, mvmsta, le16_to_cpu(tx_resp->wireless_media_time)); + if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS && + mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) + iwl_mvm_toggle_tx_ant(mvm, &mvmsta->tx_ant); + if (sta->wme && tid != IWL_MGMT_TID) { struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; @@ -1654,10 +1674,7 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm, mvmsta->next_status_eosp = false; ieee80211_sta_eosp(sta); } - } else { - mvmsta = NULL; } - out: rcu_read_unlock(); } @@ -1800,8 +1817,6 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid, return; } - spin_lock_bh(&mvmsta->lock); - __skb_queue_head_init(&reclaimed_skbs); /* @@ -1811,6 +1826,8 @@ static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid, */ iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs); + spin_lock_bh(&mvmsta->lock); + tid_data->next_reclaimed = index; iwl_mvm_check_ratid_empty(mvm, sta, tid); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c index 818e1180bbdd..d116c6ae18ff 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/utils.c @@ -285,6 +285,7 @@ u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx) return last_idx; } +#define FW_SYSASSERT_CPU_MASK 0xf0000000 static const struct { const char *name; u8 num; @@ -301,6 +302,9 @@ static const struct { { "NMI_INTERRUPT_WDG_RXF_FULL", 0x5C }, { "NMI_INTERRUPT_WDG_NO_RBD_RXF_FULL", 0x64 }, { "NMI_INTERRUPT_HOST", 0x66 }, + { "NMI_INTERRUPT_LMAC_FATAL", 0x70 }, + { "NMI_INTERRUPT_UMAC_FATAL", 0x71 }, + { "NMI_INTERRUPT_OTHER_LMAC_FATAL", 0x73 }, { "NMI_INTERRUPT_ACTION_PT", 0x7C }, { "NMI_INTERRUPT_UNKNOWN", 0x84 }, { "NMI_INTERRUPT_INST_ACTION_PT", 0x86 }, @@ -312,7 +316,7 @@ static const char *desc_lookup(u32 num) int i; for (i = 0; i < ARRAY_SIZE(advanced_lookup) - 1; i++) - if (advanced_lookup[i].num == num) + if (advanced_lookup[i].num == (num & ~FW_SYSASSERT_CPU_MASK)) return advanced_lookup[i].name; /* No entry matches 'num', so it is the last: ADVANCED_SYSASSERT */ @@ -536,6 +540,9 @@ static void iwl_mvm_dump_lmac_error_log(struct iwl_mvm *mvm, u32 base) iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table)); + if (table.valid) + mvm->fwrt.dump.rt_status = table.error_id; + if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { IWL_ERR(trans, "Start IWL Error Log Dump:\n"); IWL_ERR(trans, "Status: 0x%08lX, count: %d\n", @@ -618,13 +625,9 @@ int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, int sta_id, if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) return -EINVAL; - spin_lock_bh(&mvm->queue_info_lock); if (WARN(mvm->queue_info[queue].tid_bitmap == 0, - "Trying to reconfig unallocated queue %d\n", queue)) { - spin_unlock_bh(&mvm->queue_info_lock); + "Trying to reconfig unallocated queue %d\n", queue)) return -ENXIO; - } - spin_unlock_bh(&mvm->queue_info_lock); IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue); @@ -768,6 +771,29 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm) return result; } +void iwl_mvm_send_low_latency_cmd(struct iwl_mvm *mvm, + bool low_latency, u16 mac_id) +{ + struct iwl_mac_low_latency_cmd cmd = { + .mac_id = cpu_to_le32(mac_id) + }; + + if (!fw_has_capa(&mvm->fw->ucode_capa, + IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) + return; + + if (low_latency) { + /* currently we don't care about the direction */ + cmd.low_latency_rx = 1; + cmd.low_latency_tx = 1; + } + + if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(LOW_LATENCY_CMD, + MAC_CONF_GROUP, 0), + 0, sizeof(cmd), &cmd)) + IWL_ERR(mvm, "Failed to send low latency command\n"); +} + int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, bool low_latency, enum iwl_mvm_low_latency_cause cause) @@ -786,24 +812,7 @@ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif, if (low_latency == prev) return 0; - if (fw_has_capa(&mvm->fw->ucode_capa, - IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA)) { - struct iwl_mac_low_latency_cmd cmd = { - .mac_id = cpu_to_le32(mvmvif->id) - }; - - if (low_latency) { - /* currently we don't care about the direction */ - cmd.low_latency_rx = 1; - cmd.low_latency_tx = 1; - } - res = iwl_mvm_send_cmd_pdu(mvm, - iwl_cmd_id(LOW_LATENCY_CMD, - MAC_CONF_GROUP, 0), - 0, sizeof(cmd), &cmd); - if (res) - IWL_ERR(mvm, "Failed to send low latency command\n"); - } + iwl_mvm_send_low_latency_cmd(mvm, low_latency, mvmvif->id); res = iwl_mvm_update_quotas(mvm, false, NULL); if (res) @@ -1372,6 +1381,7 @@ void iwl_mvm_pause_tcm(struct iwl_mvm *mvm, bool with_cancel) void iwl_mvm_resume_tcm(struct iwl_mvm *mvm) { int mac; + bool low_latency = false; spin_lock_bh(&mvm->tcm.lock); mvm->tcm.ts = jiffies; @@ -1383,10 +1393,23 @@ void iwl_mvm_resume_tcm(struct iwl_mvm *mvm) memset(&mdata->tx.pkts, 0, sizeof(mdata->tx.pkts)); memset(&mdata->rx.airtime, 0, sizeof(mdata->rx.airtime)); memset(&mdata->tx.airtime, 0, sizeof(mdata->tx.airtime)); + + if (mvm->tcm.result.low_latency[mac]) + low_latency = true; } /* The TCM data needs to be reset before "paused" flag changes */ smp_mb(); mvm->tcm.paused = false; + + /* + * if the current load is not low or low latency is active, force + * re-evaluation to cover the case of no traffic. + */ + if (mvm->tcm.result.global_load > IWL_MVM_TRAFFIC_LOW) + schedule_delayed_work(&mvm->tcm.work, MVM_TCM_PERIOD); + else if (low_latency) + schedule_delayed_work(&mvm->tcm.work, MVM_LL_PERIOD); + spin_unlock_bh(&mvm->tcm.lock); } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c index 05ed4fb88e0c..ceb3aa03d561 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c @@ -94,11 +94,14 @@ int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, cpu_to_le64(trans_pcie->rxq->bd_dma); /* Configure debug, for integration */ - iwl_pcie_alloc_fw_monitor(trans, 0); - prph_sc_ctrl->hwm_cfg.hwm_base_addr = - cpu_to_le64(trans->fw_mon[0].physical); - prph_sc_ctrl->hwm_cfg.hwm_size = - cpu_to_le32(trans->fw_mon[0].size); + if (!trans->ini_valid) + iwl_pcie_alloc_fw_monitor(trans, 0); + if (trans->num_blocks) { + prph_sc_ctrl->hwm_cfg.hwm_base_addr = + cpu_to_le64(trans->fw_mon[0].physical); + prph_sc_ctrl->hwm_cfg.hwm_size = + cpu_to_le32(trans->fw_mon[0].size); + } /* allocate ucode sections in dram and set addresses */ ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c index 6f45a0303ddd..7f4aaa810ea1 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c @@ -227,7 +227,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, iwl_enable_interrupts(trans); /* Configure debug, if exists */ - if (trans->dbg_dest_tlv) + if (iwl_pcie_dbg_on(trans)) iwl_pcie_apply_destination(trans); /* kick FW self load */ diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 9e015212c2c0..353581ccc01e 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -513,6 +513,56 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x24FD, 0x9074, iwl8265_2ac_cfg)}, /* 9000 Series */ + {IWL_PCI_DEVICE(0x02F0, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x0034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x0038, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x0060, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x0064, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x00A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x00A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x0230, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x0234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x0238, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x023C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x0260, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x0264, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x02A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x02A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x2030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x2034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x4030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x40A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x4234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x02F0, 0x42A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x0030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x0034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x0038, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x003C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x0060, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x0064, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x00A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x00A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x0230, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x0234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x0238, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x023C, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x0260, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x0264, iwl9461_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x02A0, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x02A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x1551, iwl9560_killer_s_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x1552, iwl9560_killer_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x2030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x2034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x4030, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x4034, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x40A4, iwl9462_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x4234, iwl9560_2ac_cfg_soc)}, + {IWL_PCI_DEVICE(0x06F0, 0x42A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0x2526, 0x0010, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0014, iwl9260_2ac_cfg)}, {IWL_PCI_DEVICE(0x2526, 0x0018, iwl9260_2ac_cfg)}, @@ -832,7 +882,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0x34F0, 0x0040, iwl22000_2ax_cfg_hr)}, {IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22000_2ax_cfg_hr)}, {IWL_PCI_DEVICE(0x34F0, 0x0078, iwl22000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ac_cfg_jf)}, + {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ax_cfg_hr)}, {IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22560_2ax_cfg_su_cdb)}, {IWL_PCI_DEVICE(0x40C0, 0x0010, iwl22560_2ax_cfg_su_cdb)}, {IWL_PCI_DEVICE(0x40c0, 0x0090, iwl22560_2ax_cfg_su_cdb)}, diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index f9c4c64dee66..d6fc6ce73e0a 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -378,6 +378,23 @@ struct iwl_tso_hdr_page { u8 *pos; }; +#ifdef CONFIG_IWLWIFI_DEBUGFS +/** + * enum iwl_fw_mon_dbgfs_state - the different states of the monitor_data + * debugfs file + * + * @IWL_FW_MON_DBGFS_STATE_CLOSED: the file is closed. + * @IWL_FW_MON_DBGFS_STATE_OPEN: the file is open. + * @IWL_FW_MON_DBGFS_STATE_DISABLED: the file is disabled, once this state is + * set the file can no longer be used. + */ +enum iwl_fw_mon_dbgfs_state { + IWL_FW_MON_DBGFS_STATE_CLOSED, + IWL_FW_MON_DBGFS_STATE_OPEN, + IWL_FW_MON_DBGFS_STATE_DISABLED, +}; +#endif + /** * enum iwl_shared_irq_flags - level of sharing for irq * @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes. @@ -415,6 +432,26 @@ struct iwl_self_init_dram { }; /** + * struct cont_rec: continuous recording data structure + * @prev_wr_ptr: the last address that was read in monitor_data + * debugfs file + * @prev_wrap_cnt: the wrap count that was used during the last read in + * monitor_data debugfs file + * @state: the state of monitor_data debugfs file as described + * in &iwl_fw_mon_dbgfs_state enum + * @mutex: locked while reading from monitor_data debugfs file + */ +#ifdef CONFIG_IWLWIFI_DEBUGFS +struct cont_rec { + u32 prev_wr_ptr; + u32 prev_wrap_cnt; + u8 state; + /* Used to sync monitor_data debugfs file with driver unload flow */ + struct mutex mutex; +}; +#endif + +/** * struct iwl_trans_pcie - PCIe transport specific data * @rxq: all the RX queue data * @rx_pool: initial pool of iwl_rx_mem_buffer for all the queues @@ -451,6 +488,9 @@ struct iwl_self_init_dram { * @reg_lock: protect hw register access * @mutex: to protect stop_device / start_fw / start_hw * @cmd_in_flight: true when we have a host command in flight +#ifdef CONFIG_IWLWIFI_DEBUGFS + * @fw_mon_data: fw continuous recording data +#endif * @msix_entries: array of MSI-X entries * @msix_enabled: true if managed to enable MSI-X * @shared_vec_mask: the type of causes the shared vector handles @@ -538,6 +578,10 @@ struct iwl_trans_pcie { bool cmd_hold_nic_awake; bool ref_cmd_in_flight; +#ifdef CONFIG_IWLWIFI_DEBUGFS + struct cont_rec fw_mon_data; +#endif + struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES]; bool msix_enabled; u8 shared_vec_mask; @@ -965,6 +1009,11 @@ static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans, __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask); } +static inline bool iwl_pcie_dbg_on(struct iwl_trans *trans) +{ + return (trans->dbg_dest_tlv || trans->ini_valid); +} + void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state); void iwl_trans_pcie_dump_regs(struct iwl_trans *trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 5bafb3f46eb8..f97aea5ffc44 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -71,6 +71,7 @@ #include <linux/vmalloc.h> #include <linux/pm_runtime.h> #include <linux/module.h> +#include <linux/wait.h> #include "iwl-drv.h" #include "iwl-trans.h" @@ -923,6 +924,20 @@ void iwl_pcie_apply_destination(struct iwl_trans *trans) const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg_dest_tlv; int i; + if (trans->ini_valid) { + if (!trans->num_blocks) + return; + + iwl_write_prph(trans, MON_BUFF_BASE_ADDR_VER2, + trans->fw_mon[0].physical >> + MON_BUFF_SHIFT_VER2); + iwl_write_prph(trans, MON_BUFF_END_ADDR_VER2, + (trans->fw_mon[0].physical + + trans->fw_mon[0].size - 256) >> + MON_BUFF_SHIFT_VER2); + return; + } + IWL_INFO(trans, "Applying debug destination %s\n", get_fw_dbg_mode_string(dest->monitor_mode)); @@ -1025,7 +1040,7 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans, (trans->fw_mon[0].physical + trans->fw_mon[0].size) >> 4); } - } else if (trans->dbg_dest_tlv) { + } else if (iwl_pcie_dbg_on(trans)) { iwl_pcie_apply_destination(trans); } @@ -1046,7 +1061,7 @@ static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans, IWL_DEBUG_FW(trans, "working with %s CPU\n", image->is_dual_cpus ? "Dual" : "Single"); - if (trans->dbg_dest_tlv) + if (iwl_pcie_dbg_on(trans)) iwl_pcie_apply_destination(trans); IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n", @@ -1729,6 +1744,7 @@ static int iwl_pcie_init_msix_handler(struct pci_dev *pdev, static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u32 hpm; int err; lockdep_assert_held(&trans_pcie->mutex); @@ -1739,6 +1755,17 @@ static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power) return err; } + hpm = iwl_trans_read_prph(trans, HPM_DEBUG); + if (hpm != 0xa5a5a5a0 && (hpm & PERSISTENCE_BIT)) { + if (iwl_trans_read_prph(trans, PREG_PRPH_WPROT_0) & + PREG_WFPM_ACCESS) { + IWL_ERR(trans, + "Error, can not clear persistence bit\n"); + return -EPERM; + } + iwl_trans_write_prph(trans, HPM_DEBUG, hpm & ~PERSISTENCE_BIT); + } + iwl_trans_pcie_sw_reset(trans); err = iwl_pcie_apm_init(trans); @@ -2697,6 +2724,137 @@ static ssize_t iwl_dbgfs_rfkill_write(struct file *file, return count; } +static int iwl_dbgfs_monitor_data_open(struct inode *inode, + struct file *file) +{ + struct iwl_trans *trans = inode->i_private; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (!trans->dbg_dest_tlv || + trans->dbg_dest_tlv->monitor_mode != EXTERNAL_MODE) { + IWL_ERR(trans, "Debug destination is not set to DRAM\n"); + return -ENOENT; + } + + if (trans_pcie->fw_mon_data.state != IWL_FW_MON_DBGFS_STATE_CLOSED) + return -EBUSY; + + trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_OPEN; + return simple_open(inode, file); +} + +static int iwl_dbgfs_monitor_data_release(struct inode *inode, + struct file *file) +{ + struct iwl_trans_pcie *trans_pcie = + IWL_TRANS_GET_PCIE_TRANS(inode->i_private); + + if (trans_pcie->fw_mon_data.state == IWL_FW_MON_DBGFS_STATE_OPEN) + trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED; + return 0; +} + +static bool iwl_write_to_user_buf(char __user *user_buf, ssize_t count, + void *buf, ssize_t *size, + ssize_t *bytes_copied) +{ + int buf_size_left = count - *bytes_copied; + + buf_size_left = buf_size_left - (buf_size_left % sizeof(u32)); + if (*size > buf_size_left) + *size = buf_size_left; + + *size -= copy_to_user(user_buf, buf, *size); + *bytes_copied += *size; + + if (buf_size_left == *size) + return true; + return false; +} + +static ssize_t iwl_dbgfs_monitor_data_read(struct file *file, + char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct iwl_trans *trans = file->private_data; + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + void *cpu_addr = (void *)trans->fw_mon[0].block, *curr_buf; + struct cont_rec *data = &trans_pcie->fw_mon_data; + u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt; + ssize_t size, bytes_copied = 0; + bool b_full; + + if (trans->dbg_dest_tlv) { + write_ptr_addr = + le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg); + wrap_cnt_addr = le32_to_cpu(trans->dbg_dest_tlv->wrap_count); + } else { + write_ptr_addr = MON_BUFF_WRPTR; + wrap_cnt_addr = MON_BUFF_CYCLE_CNT; + } + + if (unlikely(!trans->dbg_rec_on)) + return 0; + + mutex_lock(&data->mutex); + if (data->state == + IWL_FW_MON_DBGFS_STATE_DISABLED) { + mutex_unlock(&data->mutex); + return 0; + } + + /* write_ptr position in bytes rather then DW */ + write_ptr = iwl_read_prph(trans, write_ptr_addr) * sizeof(u32); + wrap_cnt = iwl_read_prph(trans, wrap_cnt_addr); + + if (data->prev_wrap_cnt == wrap_cnt) { + size = write_ptr - data->prev_wr_ptr; + curr_buf = cpu_addr + data->prev_wr_ptr; + b_full = iwl_write_to_user_buf(user_buf, count, + curr_buf, &size, + &bytes_copied); + data->prev_wr_ptr += size; + + } else if (data->prev_wrap_cnt == wrap_cnt - 1 && + write_ptr < data->prev_wr_ptr) { + size = trans->fw_mon[0].size - data->prev_wr_ptr; + curr_buf = cpu_addr + data->prev_wr_ptr; + b_full = iwl_write_to_user_buf(user_buf, count, + curr_buf, &size, + &bytes_copied); + data->prev_wr_ptr += size; + + if (!b_full) { + size = write_ptr; + b_full = iwl_write_to_user_buf(user_buf, count, + cpu_addr, &size, + &bytes_copied); + data->prev_wr_ptr = size; + data->prev_wrap_cnt++; + } + } else { + if (data->prev_wrap_cnt == wrap_cnt - 1 && + write_ptr > data->prev_wr_ptr) + IWL_WARN(trans, + "write pointer passed previous write pointer, start copying from the beginning\n"); + else if (!unlikely(data->prev_wrap_cnt == 0 && + data->prev_wr_ptr == 0)) + IWL_WARN(trans, + "monitor data is out of sync, start copying from the beginning\n"); + + size = write_ptr; + b_full = iwl_write_to_user_buf(user_buf, count, + cpu_addr, &size, + &bytes_copied); + data->prev_wr_ptr = size; + data->prev_wrap_cnt = wrap_cnt; + } + + mutex_unlock(&data->mutex); + + return bytes_copied; +} + DEBUGFS_READ_WRITE_FILE_OPS(interrupt); DEBUGFS_READ_FILE_OPS(fh_reg); DEBUGFS_READ_FILE_OPS(rx_queue); @@ -2704,6 +2862,12 @@ DEBUGFS_READ_FILE_OPS(tx_queue); DEBUGFS_WRITE_FILE_OPS(csr); DEBUGFS_READ_WRITE_FILE_OPS(rfkill); +static const struct file_operations iwl_dbgfs_monitor_data_ops = { + .read = iwl_dbgfs_monitor_data_read, + .open = iwl_dbgfs_monitor_data_open, + .release = iwl_dbgfs_monitor_data_release, +}; + /* Create the debugfs files and directories */ int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) { @@ -2715,12 +2879,23 @@ int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans) DEBUGFS_ADD_FILE(csr, dir, 0200); DEBUGFS_ADD_FILE(fh_reg, dir, 0400); DEBUGFS_ADD_FILE(rfkill, dir, 0600); + DEBUGFS_ADD_FILE(monitor_data, dir, 0400); return 0; err: IWL_ERR(trans, "failed to create the trans debugfs entry\n"); return -ENOMEM; } + +static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct cont_rec *data = &trans_pcie->fw_mon_data; + + mutex_lock(&data->mutex); + data->state = IWL_FW_MON_DBGFS_STATE_DISABLED; + mutex_unlock(&data->mutex); +} #endif /*CONFIG_IWLWIFI_DEBUGFS */ static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd) @@ -2854,6 +3029,34 @@ iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans, return monitor_len; } +static void +iwl_trans_pcie_dump_pointers(struct iwl_trans *trans, + struct iwl_fw_error_dump_fw_mon *fw_mon_data) +{ + u32 base, write_ptr, wrap_cnt; + + /* If there was a dest TLV - use the values from there */ + if (trans->ini_valid) { + base = MON_BUFF_BASE_ADDR_VER2; + write_ptr = MON_BUFF_WRPTR_VER2; + wrap_cnt = MON_BUFF_CYCLE_CNT_VER2; + } else if (trans->dbg_dest_tlv) { + write_ptr = le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg); + wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count); + base = le32_to_cpu(trans->dbg_dest_tlv->base_reg); + } else { + base = MON_BUFF_BASE_ADDR; + write_ptr = MON_BUFF_WRPTR; + wrap_cnt = MON_BUFF_CYCLE_CNT; + } + fw_mon_data->fw_mon_wr_ptr = + cpu_to_le32(iwl_read_prph(trans, write_ptr)); + fw_mon_data->fw_mon_cycle_cnt = + cpu_to_le32(iwl_read_prph(trans, wrap_cnt)); + fw_mon_data->fw_mon_base_ptr = + cpu_to_le32(iwl_read_prph(trans, base)); +} + static u32 iwl_trans_pcie_dump_monitor(struct iwl_trans *trans, struct iwl_fw_error_dump_data **data, @@ -2863,30 +3066,14 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans, if ((trans->num_blocks && trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) || - trans->dbg_dest_tlv) { + (trans->dbg_dest_tlv && !trans->ini_valid) || + (trans->ini_valid && trans->num_blocks)) { struct iwl_fw_error_dump_fw_mon *fw_mon_data; - u32 base, write_ptr, wrap_cnt; - - /* If there was a dest TLV - use the values from there */ - if (trans->dbg_dest_tlv) { - write_ptr = - le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg); - wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count); - base = le32_to_cpu(trans->dbg_dest_tlv->base_reg); - } else { - base = MON_BUFF_BASE_ADDR; - write_ptr = MON_BUFF_WRPTR; - wrap_cnt = MON_BUFF_CYCLE_CNT; - } (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR); fw_mon_data = (void *)(*data)->data; - fw_mon_data->fw_mon_wr_ptr = - cpu_to_le32(iwl_read_prph(trans, write_ptr)); - fw_mon_data->fw_mon_cycle_cnt = - cpu_to_le32(iwl_read_prph(trans, wrap_cnt)); - fw_mon_data->fw_mon_base_ptr = - cpu_to_le32(iwl_read_prph(trans, base)); + + iwl_trans_pcie_dump_pointers(trans, fw_mon_data); len += sizeof(**data) + sizeof(*fw_mon_data); if (trans->num_blocks) { @@ -2896,6 +3083,7 @@ iwl_trans_pcie_dump_monitor(struct iwl_trans *trans, monitor_len = trans->fw_mon[0].size; } else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) { + u32 base = le32_to_cpu(fw_mon_data->fw_mon_base_ptr); /* * Update pointers to reflect actual values after * shifting @@ -2978,7 +3166,7 @@ static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, int *len) static struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans, - const struct iwl_fw_dbg_trigger_tlv *trigger) + u32 dump_mask) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_fw_error_dump_data *data; @@ -2990,7 +3178,10 @@ static struct iwl_trans_dump_data int i, ptr; bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) && !trans->cfg->mq_rx_supported && - trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RB); + dump_mask & BIT(IWL_FW_ERROR_DUMP_RB); + + if (!dump_mask) + return NULL; /* transport dump header */ len = sizeof(*dump_data); @@ -3002,11 +3193,7 @@ static struct iwl_trans_dump_data /* FW monitor */ monitor_len = iwl_trans_get_fw_monitor_len(trans, &len); - if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) { - if (!(trans->dbg_dump_mask & - BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))) - return NULL; - + if (dump_mask == BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)) { dump_data = vzalloc(len); if (!dump_data) return NULL; @@ -3019,11 +3206,11 @@ static struct iwl_trans_dump_data } /* CSR registers */ - if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR)) + if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR)) len += sizeof(*data) + IWL_CSR_TO_DUMP; /* FH registers */ - if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) { + if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) { if (trans->cfg->gen2) len += sizeof(*data) + (FH_MEM_UPPER_BOUND_GEN2 - @@ -3048,8 +3235,7 @@ static struct iwl_trans_dump_data } /* Paged memory for gen2 HW */ - if (trans->cfg->gen2 && - trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) + if (trans->cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++) len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_paging) + @@ -3062,7 +3248,7 @@ static struct iwl_trans_dump_data len = 0; data = (void *)dump_data->data; - if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD)) { + if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD)) { u16 tfd_size = trans_pcie->tfd_size; data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD); @@ -3096,16 +3282,15 @@ static struct iwl_trans_dump_data data = iwl_fw_error_next_data(data); } - if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR)) + if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR)) len += iwl_trans_pcie_dump_csr(trans, &data); - if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) + if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) len += iwl_trans_pcie_fh_regs_dump(trans, &data); if (dump_rbs) len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs); /* Paged memory for gen2 HW */ - if (trans->cfg->gen2 && - trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) { + if (trans->cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) { for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++) { struct iwl_fw_error_dump_paging *paging; dma_addr_t addr = @@ -3125,7 +3310,7 @@ static struct iwl_trans_dump_data len += sizeof(*data) + sizeof(*paging) + page_len; } } - if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)) + if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)) len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len); dump_data->len = len; @@ -3202,6 +3387,9 @@ static const struct iwl_trans_ops trans_ops_pcie = { .freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer, .block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs, +#ifdef CONFIG_IWLWIFI_DEBUGFS + .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup, +#endif }; static const struct iwl_trans_ops trans_ops_pcie_gen2 = { @@ -3221,6 +3409,9 @@ static const struct iwl_trans_ops trans_ops_pcie_gen2 = { .txq_free = iwl_trans_pcie_dyn_txq_free, .wait_txq_empty = iwl_trans_pcie_wait_txq_empty, .rxq_dma_data = iwl_trans_pcie_rxq_dma_data, +#ifdef CONFIG_IWLWIFI_DEBUGFS + .debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup, +#endif }; struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, @@ -3392,8 +3583,26 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, #if IS_ENABLED(CONFIG_IWLMVM) trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID); - if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == - CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) { + if (cfg == &iwl22000_2ax_cfg_hr) { + if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == + CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) { + trans->cfg = &iwl22000_2ax_cfg_hr; + } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == + CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_JF)) { + trans->cfg = &iwl22000_2ax_cfg_jf; + } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == + CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HRCDB)) { + IWL_ERR(trans, "RF ID HRCDB is not supported\n"); + ret = -EINVAL; + goto out_no_pci; + } else { + IWL_ERR(trans, "Unrecognized RF ID 0x%08x\n", + CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id)); + ret = -EINVAL; + goto out_no_pci; + } + } else if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == + CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) { u32 hw_status; hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS); @@ -3454,6 +3663,11 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, trans->runtime_pm_mode = IWL_PLAT_PM_MODE_DISABLED; #endif /* CONFIG_IWLWIFI_PCIE_RTPM */ +#ifdef CONFIG_IWLWIFI_DEBUGFS + trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED; + mutex_init(&trans_pcie->fw_mon_data.mutex); +#endif + return trans; out_free_ict: diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index e880f69eac26..156ca1b1f621 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -238,7 +238,7 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans, { #ifdef CONFIG_INET struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; + struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload; struct ieee80211_hdr *hdr = (void *)skb->data; unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; unsigned int mss = skb_shinfo(skb)->gso_size; @@ -583,18 +583,6 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, spin_lock(&txq->lock); - if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { - struct iwl_tx_cmd_gen3 *tx_cmd_gen3 = - (void *)dev_cmd->payload; - - cmd_len = le16_to_cpu(tx_cmd_gen3->len); - } else { - struct iwl_tx_cmd_gen2 *tx_cmd_gen2 = - (void *)dev_cmd->payload; - - cmd_len = le16_to_cpu(tx_cmd_gen2->len); - } - if (iwl_queue_space(trans, txq) < txq->high_mark) { iwl_stop_queue(trans, txq); @@ -632,6 +620,18 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, return -1; } + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { + struct iwl_tx_cmd_gen3 *tx_cmd_gen3 = + (void *)dev_cmd->payload; + + cmd_len = le16_to_cpu(tx_cmd_gen3->len); + } else { + struct iwl_tx_cmd_gen2 *tx_cmd_gen2 = + (void *)dev_cmd->payload; + + cmd_len = le16_to_cpu(tx_cmd_gen2->len); + } + /* Set up entry for this TFD in Tx byte-count array */ iwl_pcie_gen2_update_byte_tbl(trans_pcie, txq, cmd_len, iwl_pcie_gen2_get_num_tbs(trans, tfd)); @@ -1228,8 +1228,7 @@ int iwl_trans_pcie_txq_alloc_response(struct iwl_trans *trans, /* Place first TFD at index corresponding to start sequence number */ txq->read_ptr = wr_ptr; txq->write_ptr = wr_ptr; - iwl_write_direct32(trans, HBUS_TARG_WRPTR, - (txq->write_ptr) | (qid << 16)); + IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid); iwl_free_resp(hcmd); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 87b7225fe289..ee990a7a5411 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -1160,10 +1160,11 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, */ iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id); } - spin_lock_bh(&txq->lock); if (iwl_queue_space(trans, txq) > txq->low_mark) iwl_wake_queue(trans, txq); + + spin_lock_bh(&txq->lock); } if (txq->read_ptr == txq->write_ptr) { @@ -1245,11 +1246,11 @@ void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) if (idx >= trans->cfg->base_params->max_tfd_queue_size || (!iwl_queue_used(txq, idx))) { - IWL_ERR(trans, - "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", - __func__, txq_id, idx, - trans->cfg->base_params->max_tfd_queue_size, - txq->write_ptr, txq->read_ptr); + WARN_ONCE(test_bit(txq_id, trans_pcie->queue_used), + "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", + __func__, txq_id, idx, + trans->cfg->base_params->max_tfd_queue_size, + txq->write_ptr, txq->read_ptr); return; } diff --git a/drivers/net/wireless/intersil/hostap/hostap_main.c b/drivers/net/wireless/intersil/hostap/hostap_main.c index 012930d35434..b0e7c0a0617e 100644 --- a/drivers/net/wireless/intersil/hostap/hostap_main.c +++ b/drivers/net/wireless/intersil/hostap/hostap_main.c @@ -690,7 +690,7 @@ static int prism2_open(struct net_device *dev) /* Master radio interface is needed for all operation, so open * it automatically when any virtual net_device is opened. */ local->master_dev_auto_open = 1; - dev_open(local->dev); + dev_open(local->dev, NULL); } netif_device_attach(dev); diff --git a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c index 21bb68457cfe..40a8b941ad5c 100644 --- a/drivers/net/wireless/intersil/orinoco/orinoco_usb.c +++ b/drivers/net/wireless/intersil/orinoco/orinoco_usb.c @@ -908,6 +908,7 @@ static int ezusb_access_ltv(struct ezusb_priv *upriv, case EZUSB_CTX_REQ_SUBMITTED: if (!ctx->in_rid) break; + /* fall through */ default: err("%s: Unexpected context state %d", __func__, state); diff --git a/drivers/net/wireless/intersil/prism54/isl_38xx.c b/drivers/net/wireless/intersil/prism54/isl_38xx.c index ce9d4db0d9ca..b0eb58a62c90 100644 --- a/drivers/net/wireless/intersil/prism54/isl_38xx.c +++ b/drivers/net/wireless/intersil/prism54/isl_38xx.c @@ -235,6 +235,7 @@ isl38xx_in_queue(isl38xx_control_block *cb, int queue) /* send queues */ case ISL38XX_CB_TX_MGMTQ: BUG_ON(delta > ISL38XX_CB_MGMT_QSIZE); + /* fall through */ case ISL38XX_CB_TX_DATA_LQ: case ISL38XX_CB_TX_DATA_HQ: diff --git a/drivers/net/wireless/intersil/prism54/isl_ioctl.c b/drivers/net/wireless/intersil/prism54/isl_ioctl.c index 334717b0a2be..3ccf2a4b548c 100644 --- a/drivers/net/wireless/intersil/prism54/isl_ioctl.c +++ b/drivers/net/wireless/intersil/prism54/isl_ioctl.c @@ -1691,6 +1691,7 @@ static int prism54_get_encodeext(struct net_device *ndev, case DOT11_AUTH_BOTH: case DOT11_AUTH_SK: wrqu->encoding.flags |= IW_ENCODE_RESTRICTED; + /* fall through */ case DOT11_AUTH_OS: default: wrqu->encoding.flags |= IW_ENCODE_OPEN; diff --git a/drivers/net/wireless/intersil/prism54/islpci_dev.c b/drivers/net/wireless/intersil/prism54/islpci_dev.c index 325176d4d796..ad6d3a56ae06 100644 --- a/drivers/net/wireless/intersil/prism54/islpci_dev.c +++ b/drivers/net/wireless/intersil/prism54/islpci_dev.c @@ -932,6 +932,7 @@ islpci_set_state(islpci_private *priv, islpci_state_t new_state) switch (new_state) { case PRV_STATE_OFF: priv->state_off++; + /* fall through */ default: priv->state = new_state; break; diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index d1464e3e1be2..3a4b8786f7ea 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -374,6 +374,20 @@ static const struct ieee80211_rate hwsim_rates[] = { { .bitrate = 540 } }; +static const u32 hwsim_ciphers[] = { + WLAN_CIPHER_SUITE_WEP40, + WLAN_CIPHER_SUITE_WEP104, + WLAN_CIPHER_SUITE_TKIP, + WLAN_CIPHER_SUITE_CCMP, + WLAN_CIPHER_SUITE_CCMP_256, + WLAN_CIPHER_SUITE_GCMP, + WLAN_CIPHER_SUITE_GCMP_256, + WLAN_CIPHER_SUITE_AES_CMAC, + WLAN_CIPHER_SUITE_BIP_CMAC_256, + WLAN_CIPHER_SUITE_BIP_GMAC_128, + WLAN_CIPHER_SUITE_BIP_GMAC_256, +}; + #define OUI_QCA 0x001374 #define QCA_NL80211_SUBCMD_TEST 1 enum qca_nl80211_vendor_subcmds { @@ -451,48 +465,6 @@ static const struct nl80211_vendor_cmd_info mac80211_hwsim_vendor_events[] = { { .vendor_id = OUI_QCA, .subcmd = 1 }, }; -static const struct ieee80211_iface_limit hwsim_if_limits[] = { - { .max = 1, .types = BIT(NL80211_IFTYPE_ADHOC) }, - { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_P2P_CLIENT) | -#ifdef CONFIG_MAC80211_MESH - BIT(NL80211_IFTYPE_MESH_POINT) | -#endif - BIT(NL80211_IFTYPE_AP) | - BIT(NL80211_IFTYPE_P2P_GO) }, - /* must be last, see hwsim_if_comb */ - { .max = 1, .types = BIT(NL80211_IFTYPE_P2P_DEVICE) } -}; - -static const struct ieee80211_iface_combination hwsim_if_comb[] = { - { - .limits = hwsim_if_limits, - /* remove the last entry which is P2P_DEVICE */ - .n_limits = ARRAY_SIZE(hwsim_if_limits) - 1, - .max_interfaces = 2048, - .num_different_channels = 1, - .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | - BIT(NL80211_CHAN_WIDTH_20) | - BIT(NL80211_CHAN_WIDTH_40) | - BIT(NL80211_CHAN_WIDTH_80) | - BIT(NL80211_CHAN_WIDTH_160), - }, -}; - -static const struct ieee80211_iface_combination hwsim_if_comb_p2p_dev[] = { - { - .limits = hwsim_if_limits, - .n_limits = ARRAY_SIZE(hwsim_if_limits), - .max_interfaces = 2048, - .num_different_channels = 1, - .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | - BIT(NL80211_CHAN_WIDTH_20) | - BIT(NL80211_CHAN_WIDTH_40) | - BIT(NL80211_CHAN_WIDTH_80) | - BIT(NL80211_CHAN_WIDTH_160), - }, -}; - static spinlock_t hwsim_radio_lock; static LIST_HEAD(hwsim_radios); static struct rhashtable hwsim_radios_rht; @@ -515,6 +487,10 @@ struct mac80211_hwsim_data { struct ieee80211_channel channels_5ghz[ARRAY_SIZE(hwsim_channels_5ghz)]; struct ieee80211_rate rates[ARRAY_SIZE(hwsim_rates)]; struct ieee80211_iface_combination if_combination; + struct ieee80211_iface_limit if_limits[3]; + int n_if_limits; + + u32 ciphers[ARRAY_SIZE(hwsim_ciphers)]; struct mac_address addresses[2]; int channels, idx; @@ -642,6 +618,8 @@ static const struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = { [HWSIM_ATTR_NO_VIF] = { .type = NLA_FLAG }, [HWSIM_ATTR_FREQ] = { .type = NLA_U32 }, [HWSIM_ATTR_PERM_ADDR] = { .type = NLA_UNSPEC, .len = ETH_ALEN }, + [HWSIM_ATTR_IFTYPE_SUPPORT] = { .type = NLA_U32 }, + [HWSIM_ATTR_CIPHER_SUPPORT] = { .type = NLA_BINARY }, }; static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw, @@ -2414,6 +2392,9 @@ struct hwsim_new_radio_params { const char *hwname; bool no_vif; const u8 *perm_addr; + u32 iftypes; + u32 *ciphers; + u8 n_ciphers; }; static void hwsim_mcast_config_msg(struct sk_buff *mcast_skb, @@ -2630,6 +2611,27 @@ static void mac80211_hswim_he_capab(struct ieee80211_supported_band *sband) sband->n_iftype_data = 1; } +#ifdef CONFIG_MAC80211_MESH +#define HWSIM_MESH_BIT BIT(NL80211_IFTYPE_MESH_POINT) +#else +#define HWSIM_MESH_BIT 0 +#endif + +#define HWSIM_DEFAULT_IF_LIMIT \ + (BIT(NL80211_IFTYPE_STATION) | \ + BIT(NL80211_IFTYPE_P2P_CLIENT) | \ + BIT(NL80211_IFTYPE_AP) | \ + BIT(NL80211_IFTYPE_P2P_GO) | \ + HWSIM_MESH_BIT) + +#define HWSIM_IFTYPE_SUPPORT_MASK \ + (BIT(NL80211_IFTYPE_STATION) | \ + BIT(NL80211_IFTYPE_AP) | \ + BIT(NL80211_IFTYPE_P2P_CLIENT) | \ + BIT(NL80211_IFTYPE_P2P_GO) | \ + BIT(NL80211_IFTYPE_ADHOC) | \ + BIT(NL80211_IFTYPE_MESH_POINT)) + static int mac80211_hwsim_new_radio(struct genl_info *info, struct hwsim_new_radio_params *param) { @@ -2641,6 +2643,7 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, const struct ieee80211_ops *ops = &mac80211_hwsim_ops; struct net *net; int idx; + int n_limits = 0; if (WARN_ON(param->channels > 1 && !param->use_chanctx)) return -EINVAL; @@ -2716,26 +2719,60 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, if (info) data->portid = info->snd_portid; + /* setup interface limits, only on interface types we support */ + if (param->iftypes & BIT(NL80211_IFTYPE_ADHOC)) { + data->if_limits[n_limits].max = 1; + data->if_limits[n_limits].types = BIT(NL80211_IFTYPE_ADHOC); + n_limits++; + } + + if (param->iftypes & HWSIM_DEFAULT_IF_LIMIT) { + data->if_limits[n_limits].max = 2048; + /* + * For this case, we may only support a subset of + * HWSIM_DEFAULT_IF_LIMIT, therefore we only want to add the + * bits that both param->iftype & HWSIM_DEFAULT_IF_LIMIT have. + */ + data->if_limits[n_limits].types = + HWSIM_DEFAULT_IF_LIMIT & param->iftypes; + n_limits++; + } + + if (param->iftypes & BIT(NL80211_IFTYPE_P2P_DEVICE)) { + data->if_limits[n_limits].max = 1; + data->if_limits[n_limits].types = + BIT(NL80211_IFTYPE_P2P_DEVICE); + n_limits++; + } + if (data->use_chanctx) { hw->wiphy->max_scan_ssids = 255; hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; hw->wiphy->max_remain_on_channel_duration = 1000; - hw->wiphy->iface_combinations = &data->if_combination; - if (param->p2p_device) - data->if_combination = hwsim_if_comb_p2p_dev[0]; - else - data->if_combination = hwsim_if_comb[0]; - hw->wiphy->n_iface_combinations = 1; - /* For channels > 1 DFS is not allowed */ data->if_combination.radar_detect_widths = 0; data->if_combination.num_different_channels = data->channels; - } else if (param->p2p_device) { - hw->wiphy->iface_combinations = hwsim_if_comb_p2p_dev; - hw->wiphy->n_iface_combinations = - ARRAY_SIZE(hwsim_if_comb_p2p_dev); } else { - hw->wiphy->iface_combinations = hwsim_if_comb; - hw->wiphy->n_iface_combinations = ARRAY_SIZE(hwsim_if_comb); + data->if_combination.num_different_channels = 1; + data->if_combination.radar_detect_widths = + BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20) | + BIT(NL80211_CHAN_WIDTH_40) | + BIT(NL80211_CHAN_WIDTH_80) | + BIT(NL80211_CHAN_WIDTH_160); + } + + data->if_combination.n_limits = n_limits; + data->if_combination.max_interfaces = 2048; + data->if_combination.limits = data->if_limits; + + hw->wiphy->iface_combinations = &data->if_combination; + hw->wiphy->n_iface_combinations = 1; + + if (param->ciphers) { + memcpy(data->ciphers, param->ciphers, + param->n_ciphers * sizeof(u32)); + hw->wiphy->cipher_suites = data->ciphers; + hw->wiphy->n_cipher_suites = param->n_ciphers; } INIT_DELAYED_WORK(&data->roc_start, hw_roc_start); @@ -2744,15 +2781,6 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, hw->queues = 5; hw->offchannel_tx_hw_queue = 4; - hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_AP) | - BIT(NL80211_IFTYPE_P2P_CLIENT) | - BIT(NL80211_IFTYPE_P2P_GO) | - BIT(NL80211_IFTYPE_ADHOC) | - BIT(NL80211_IFTYPE_MESH_POINT); - - if (param->p2p_device) - hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_P2P_DEVICE); ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); ieee80211_hw_set(hw, CHANCTX_STA_CSA); @@ -2778,6 +2806,8 @@ static int mac80211_hwsim_new_radio(struct genl_info *info, NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS); + hw->wiphy->interface_modes = param->iftypes; + /* ask mac80211 to reserve space for magic */ hw->vif_data_size = sizeof(struct hwsim_vif_priv); hw->sta_data_size = sizeof(struct hwsim_sta_priv); @@ -3293,6 +3323,29 @@ static int hwsim_register_received_nl(struct sk_buff *skb_2, return 0; } +/* ensures ciphers only include ciphers listed in 'hwsim_ciphers' array */ +static bool hwsim_known_ciphers(const u32 *ciphers, int n_ciphers) +{ + int i; + + for (i = 0; i < n_ciphers; i++) { + int j; + int found = 0; + + for (j = 0; j < ARRAY_SIZE(hwsim_ciphers); j++) { + if (ciphers[i] == hwsim_ciphers[j]) { + found = 1; + break; + } + } + + if (!found) + return false; + } + + return true; +} + static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) { struct hwsim_new_radio_params param = { 0 }; @@ -3321,15 +3374,6 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) if (info->attrs[HWSIM_ATTR_NO_VIF]) param.no_vif = true; - if (info->attrs[HWSIM_ATTR_RADIO_NAME]) { - hwname = kasprintf(GFP_KERNEL, "%.*s", - nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]), - (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME])); - if (!hwname) - return -ENOMEM; - param.hwname = hwname; - } - if (info->attrs[HWSIM_ATTR_USE_CHANCTX]) param.use_chanctx = true; else @@ -3342,10 +3386,8 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) if (info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]) { u32 idx = nla_get_u32(info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]); - if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom)) { - kfree(hwname); + if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom)) return -EINVAL; - } idx = array_index_nospec(idx, ARRAY_SIZE(hwsim_world_regdom_custom)); @@ -3358,14 +3400,72 @@ static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) GENL_SET_ERR_MSG(info,"MAC is no valid source addr"); NL_SET_BAD_ATTR(info->extack, info->attrs[HWSIM_ATTR_PERM_ADDR]); - kfree(hwname); return -EINVAL; } - param.perm_addr = nla_data(info->attrs[HWSIM_ATTR_PERM_ADDR]); } + if (info->attrs[HWSIM_ATTR_IFTYPE_SUPPORT]) { + param.iftypes = + nla_get_u32(info->attrs[HWSIM_ATTR_IFTYPE_SUPPORT]); + + if (param.iftypes & ~HWSIM_IFTYPE_SUPPORT_MASK) { + NL_SET_ERR_MSG_ATTR(info->extack, + info->attrs[HWSIM_ATTR_IFTYPE_SUPPORT], + "cannot support more iftypes than kernel"); + return -EINVAL; + } + } else { + param.iftypes = HWSIM_IFTYPE_SUPPORT_MASK; + } + + /* ensure both flag and iftype support is honored */ + if (param.p2p_device || + param.iftypes & BIT(NL80211_IFTYPE_P2P_DEVICE)) { + param.iftypes |= BIT(NL80211_IFTYPE_P2P_DEVICE); + param.p2p_device = true; + } + + if (info->attrs[HWSIM_ATTR_CIPHER_SUPPORT]) { + u32 len = nla_len(info->attrs[HWSIM_ATTR_CIPHER_SUPPORT]); + + param.ciphers = + nla_data(info->attrs[HWSIM_ATTR_CIPHER_SUPPORT]); + + if (len % sizeof(u32)) { + NL_SET_ERR_MSG_ATTR(info->extack, + info->attrs[HWSIM_ATTR_CIPHER_SUPPORT], + "bad cipher list length"); + return -EINVAL; + } + + param.n_ciphers = len / sizeof(u32); + + if (param.n_ciphers > ARRAY_SIZE(hwsim_ciphers)) { + NL_SET_ERR_MSG_ATTR(info->extack, + info->attrs[HWSIM_ATTR_CIPHER_SUPPORT], + "too many ciphers specified"); + return -EINVAL; + } + + if (!hwsim_known_ciphers(param.ciphers, param.n_ciphers)) { + NL_SET_ERR_MSG_ATTR(info->extack, + info->attrs[HWSIM_ATTR_CIPHER_SUPPORT], + "unsupported ciphers specified"); + return -EINVAL; + } + } + + if (info->attrs[HWSIM_ATTR_RADIO_NAME]) { + hwname = kasprintf(GFP_KERNEL, "%.*s", + nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]), + (char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME])); + if (!hwname) + return -ENOMEM; + param.hwname = hwname; + } + ret = mac80211_hwsim_new_radio(info, ¶m); kfree(hwname); return ret; @@ -3785,6 +3885,7 @@ static int __init init_mac80211_hwsim(void) param.p2p_device = support_p2p_device; param.use_chanctx = channels > 1; + param.iftypes = HWSIM_IFTYPE_SUPPORT_MASK; err = mac80211_hwsim_new_radio(NULL, ¶m); if (err < 0) diff --git a/drivers/net/wireless/mac80211_hwsim.h b/drivers/net/wireless/mac80211_hwsim.h index 0fe3199f8c72..a1ef8457fad4 100644 --- a/drivers/net/wireless/mac80211_hwsim.h +++ b/drivers/net/wireless/mac80211_hwsim.h @@ -132,6 +132,8 @@ enum { * @HWSIM_ATTR_TX_INFO_FLAGS: additional flags for corresponding * rates of %HWSIM_ATTR_TX_INFO * @HWSIM_ATTR_PERM_ADDR: permanent mac address of new radio + * @HWSIM_ATTR_IFTYPE_SUPPORT: u32 attribute of supported interface types bits + * @HWSIM_ATTR_CIPHER_SUPPORT: u32 array of supported cipher types * @__HWSIM_ATTR_MAX: enum limit */ @@ -160,6 +162,8 @@ enum { HWSIM_ATTR_PAD, HWSIM_ATTR_TX_INFO_FLAGS, HWSIM_ATTR_PERM_ADDR, + HWSIM_ATTR_IFTYPE_SUPPORT, + HWSIM_ATTR_CIPHER_SUPPORT, __HWSIM_ATTR_MAX, }; #define HWSIM_ATTR_MAX (__HWSIM_ATTR_MAX - 1) diff --git a/drivers/net/wireless/marvell/libertas/if_spi.c b/drivers/net/wireless/marvell/libertas/if_spi.c index 504d6e096476..7c3224b83ef7 100644 --- a/drivers/net/wireless/marvell/libertas/if_spi.c +++ b/drivers/net/wireless/marvell/libertas/if_spi.c @@ -796,15 +796,13 @@ static void if_spi_h2c(struct if_spi_card *card, { struct lbs_private *priv = card->priv; int err = 0; - u16 int_type, port_reg; + u16 port_reg; switch (type) { case MVMS_DAT: - int_type = IF_SPI_CIC_TX_DOWNLOAD_OVER; port_reg = IF_SPI_DATA_RDWRPORT_REG; break; case MVMS_CMD: - int_type = IF_SPI_CIC_CMD_DOWNLOAD_OVER; port_reg = IF_SPI_CMD_RDWRPORT_REG; break; default: diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c index adc88433faa8..1467af22e394 100644 --- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c +++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c @@ -1275,27 +1275,27 @@ mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy, } static void -mwifiex_parse_htinfo(struct mwifiex_private *priv, u8 tx_htinfo, +mwifiex_parse_htinfo(struct mwifiex_private *priv, u8 rateinfo, u8 htinfo, struct rate_info *rate) { struct mwifiex_adapter *adapter = priv->adapter; if (adapter->is_hw_11ac_capable) { /* bit[1-0]: 00=LG 01=HT 10=VHT */ - if (tx_htinfo & BIT(0)) { + if (htinfo & BIT(0)) { /* HT */ - rate->mcs = priv->tx_rate; + rate->mcs = rateinfo; rate->flags |= RATE_INFO_FLAGS_MCS; } - if (tx_htinfo & BIT(1)) { + if (htinfo & BIT(1)) { /* VHT */ - rate->mcs = priv->tx_rate & 0x0F; + rate->mcs = rateinfo & 0x0F; rate->flags |= RATE_INFO_FLAGS_VHT_MCS; } - if (tx_htinfo & (BIT(1) | BIT(0))) { + if (htinfo & (BIT(1) | BIT(0))) { /* HT or VHT */ - switch (tx_htinfo & (BIT(3) | BIT(2))) { + switch (htinfo & (BIT(3) | BIT(2))) { case 0: rate->bw = RATE_INFO_BW_20; break; @@ -1310,29 +1310,51 @@ mwifiex_parse_htinfo(struct mwifiex_private *priv, u8 tx_htinfo, break; } - if (tx_htinfo & BIT(4)) + if (htinfo & BIT(4)) rate->flags |= RATE_INFO_FLAGS_SHORT_GI; - if ((priv->tx_rate >> 4) == 1) + if ((rateinfo >> 4) == 1) rate->nss = 2; else rate->nss = 1; } } else { /* - * Bit 0 in tx_htinfo indicates that current Tx rate - * is 11n rate. Valid MCS index values for us are 0 to 15. + * Bit 0 in htinfo indicates that current rate is 11n. Valid + * MCS index values for us are 0 to 15. */ - if ((tx_htinfo & BIT(0)) && (priv->tx_rate < 16)) { - rate->mcs = priv->tx_rate; + if ((htinfo & BIT(0)) && (rateinfo < 16)) { + rate->mcs = rateinfo; rate->flags |= RATE_INFO_FLAGS_MCS; rate->bw = RATE_INFO_BW_20; - if (tx_htinfo & BIT(1)) + if (htinfo & BIT(1)) rate->bw = RATE_INFO_BW_40; - if (tx_htinfo & BIT(2)) + if (htinfo & BIT(2)) rate->flags |= RATE_INFO_FLAGS_SHORT_GI; } } + + /* Decode legacy rates for non-HT. */ + if (!(htinfo & (BIT(0) | BIT(1)))) { + /* Bitrates in multiples of 100kb/s. */ + static const int legacy_rates[] = { + [0] = 10, + [1] = 20, + [2] = 55, + [3] = 110, + [4] = 60, /* MWIFIEX_RATE_INDEX_OFDM0 */ + [5] = 60, + [6] = 90, + [7] = 120, + [8] = 180, + [9] = 240, + [10] = 360, + [11] = 480, + [12] = 540, + }; + if (rateinfo < ARRAY_SIZE(legacy_rates)) + rate->legacy = legacy_rates[rateinfo]; + } } /* @@ -1375,7 +1397,8 @@ mwifiex_dump_station_info(struct mwifiex_private *priv, sinfo->tx_packets = node->stats.tx_packets; sinfo->tx_failed = node->stats.tx_failed; - mwifiex_parse_htinfo(priv, node->stats.last_tx_htinfo, + mwifiex_parse_htinfo(priv, priv->tx_rate, + node->stats.last_tx_htinfo, &sinfo->txrate); sinfo->txrate.legacy = node->stats.last_tx_rate * 5; @@ -1401,7 +1424,8 @@ mwifiex_dump_station_info(struct mwifiex_private *priv, HostCmd_ACT_GEN_GET, DTIM_PERIOD_I, &priv->dtim_period, true); - mwifiex_parse_htinfo(priv, priv->tx_htinfo, &sinfo->txrate); + mwifiex_parse_htinfo(priv, priv->tx_rate, priv->tx_htinfo, + &sinfo->txrate); sinfo->signal_avg = priv->bcn_rssi_avg; sinfo->rx_bytes = priv->stats.rx_bytes; @@ -1412,6 +1436,10 @@ mwifiex_dump_station_info(struct mwifiex_private *priv, /* bit rate is in 500 kb/s units. Convert it to 100kb/s units */ sinfo->txrate.legacy = rate * 5; + sinfo->filled |= BIT(NL80211_STA_INFO_RX_BITRATE); + mwifiex_parse_htinfo(priv, priv->rxpd_rate, priv->rxpd_htinfo, + &sinfo->rxrate); + if (priv->bss_mode == NL80211_IFTYPE_STATION) { sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BSS_PARAM); sinfo->bss_param.flags = 0; diff --git a/drivers/net/wireless/marvell/mwifiex/debugfs.c b/drivers/net/wireless/marvell/mwifiex/debugfs.c index cce70252fd96..cbe4493b3266 100644 --- a/drivers/net/wireless/marvell/mwifiex/debugfs.c +++ b/drivers/net/wireless/marvell/mwifiex/debugfs.c @@ -273,15 +273,13 @@ mwifiex_histogram_read(struct file *file, char __user *ubuf, "total samples = %d\n", atomic_read(&phist_data->num_samples)); - p += sprintf(p, "rx rates (in Mbps): 0=1M 1=2M"); - p += sprintf(p, "2=5.5M 3=11M 4=6M 5=9M 6=12M\n"); - p += sprintf(p, "7=18M 8=24M 9=36M 10=48M 11=54M"); - p += sprintf(p, "12-27=MCS0-15(BW20) 28-43=MCS0-15(BW40)\n"); + p += sprintf(p, + "rx rates (in Mbps): 0=1M 1=2M 2=5.5M 3=11M 4=6M 5=9M 6=12M\n" + "7=18M 8=24M 9=36M 10=48M 11=54M 12-27=MCS0-15(BW20) 28-43=MCS0-15(BW40)\n"); if (ISSUPP_11ACENABLED(priv->adapter->fw_cap_info)) { - p += sprintf(p, "44-53=MCS0-9(VHT:BW20)"); - p += sprintf(p, "54-63=MCS0-9(VHT:BW40)"); - p += sprintf(p, "64-73=MCS0-9(VHT:BW80)\n\n"); + p += sprintf(p, + "44-53=MCS0-9(VHT:BW20) 54-63=MCS0-9(VHT:BW40) 64-73=MCS0-9(VHT:BW80)\n\n"); } else { p += sprintf(p, "\n"); } @@ -310,7 +308,7 @@ mwifiex_histogram_read(struct file *file, char __user *ubuf, for (i = 0; i < MWIFIEX_MAX_NOISE_FLR; i++) { value = atomic_read(&phist_data->noise_flr[i]); if (value) - p += sprintf(p, "noise_flr[-%02ddBm] = %d\n", + p += sprintf(p, "noise_flr[%02ddBm] = %d\n", (int)(i-128), value); } for (i = 0; i < MWIFIEX_MAX_SIG_STRENGTH; i++) { diff --git a/drivers/net/wireless/marvell/mwifiex/ie.c b/drivers/net/wireless/marvell/mwifiex/ie.c index 75cbd609d606..6845eb57b39a 100644 --- a/drivers/net/wireless/marvell/mwifiex/ie.c +++ b/drivers/net/wireless/marvell/mwifiex/ie.c @@ -363,6 +363,7 @@ static int mwifiex_uap_parse_tail_ies(struct mwifiex_private *priv, (const u8 *)hdr, hdr->len + sizeof(struct ieee_types_header))) break; + /* fall through */ default: memcpy(gen_ie->ie_buffer + ie_len, hdr, hdr->len + sizeof(struct ieee_types_header)); diff --git a/drivers/net/wireless/marvell/mwifiex/scan.c b/drivers/net/wireless/marvell/mwifiex/scan.c index 8e483b0bc3b1..935778ec9a1b 100644 --- a/drivers/net/wireless/marvell/mwifiex/scan.c +++ b/drivers/net/wireless/marvell/mwifiex/scan.c @@ -1882,15 +1882,17 @@ mwifiex_parse_single_response_buf(struct mwifiex_private *priv, u8 **bss_info, ETH_ALEN)) mwifiex_update_curr_bss_params(priv, bss); - cfg80211_put_bss(priv->wdev.wiphy, bss); - } - if ((chan->flags & IEEE80211_CHAN_RADAR) || - (chan->flags & IEEE80211_CHAN_NO_IR)) { - mwifiex_dbg(adapter, INFO, - "radar or passive channel %d\n", - channel); - mwifiex_save_hidden_ssid_channels(priv, bss); + if ((chan->flags & IEEE80211_CHAN_RADAR) || + (chan->flags & IEEE80211_CHAN_NO_IR)) { + mwifiex_dbg(adapter, INFO, + "radar or passive channel %d\n", + channel); + mwifiex_save_hidden_ssid_channels(priv, + bss); + } + + cfg80211_put_bss(priv->wdev.wiphy, bss); } } } else { diff --git a/drivers/net/wireless/marvell/mwifiex/sta_rx.c b/drivers/net/wireless/marvell/mwifiex/sta_rx.c index 00fcbda09349..fb28a5c7f441 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_rx.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_rx.c @@ -152,14 +152,17 @@ int mwifiex_process_rx_packet(struct mwifiex_private *priv, mwifiex_process_tdls_action_frame(priv, offset, rx_pkt_len); } - priv->rxpd_rate = local_rx_pd->rx_rate; - - priv->rxpd_htinfo = local_rx_pd->ht_info; + /* Only stash RX bitrate for unicast packets. */ + if (likely(!is_multicast_ether_addr(rx_pkt_hdr->eth803_hdr.h_dest))) { + priv->rxpd_rate = local_rx_pd->rx_rate; + priv->rxpd_htinfo = local_rx_pd->ht_info; + } if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA || GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_UAP) { - adj_rx_rate = mwifiex_adjust_data_rate(priv, priv->rxpd_rate, - priv->rxpd_htinfo); + adj_rx_rate = mwifiex_adjust_data_rate(priv, + local_rx_pd->rx_rate, + local_rx_pd->ht_info); mwifiex_hist_data_add(priv, adj_rx_rate, local_rx_pd->snr, local_rx_pd->nf); } diff --git a/drivers/net/wireless/mediatek/mt76/Makefile b/drivers/net/wireless/mediatek/mt76/Makefile index 9b8d7488c545..1a45cb30f39f 100644 --- a/drivers/net/wireless/mediatek/mt76/Makefile +++ b/drivers/net/wireless/mediatek/mt76/Makefile @@ -14,7 +14,8 @@ CFLAGS_mt76x02_trace.o := -I$(src) mt76x02-lib-y := mt76x02_util.o mt76x02_mac.o mt76x02_mcu.o \ mt76x02_eeprom.o mt76x02_phy.o mt76x02_mmio.o \ - mt76x02_txrx.o mt76x02_trace.o + mt76x02_txrx.o mt76x02_trace.o mt76x02_debugfs.o \ + mt76x02_dfs.o mt76x02-usb-y := mt76x02_usb_mcu.o mt76x02_usb_core.o diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c index f7fbd7016403..e2ba26378575 100644 --- a/drivers/net/wireless/mediatek/mt76/dma.c +++ b/drivers/net/wireless/mediatek/mt76/dma.c @@ -157,17 +157,20 @@ mt76_dma_tx_cleanup(struct mt76_dev *dev, enum mt76_txq_id qid, bool flush) if (entry.schedule) q->swq_queued--; - if (entry.skb) + q->tail = (q->tail + 1) % q->ndesc; + q->queued--; + + if (entry.skb) { + spin_unlock_bh(&q->lock); dev->drv->tx_complete_skb(dev, q, &entry, flush); + spin_lock_bh(&q->lock); + } if (entry.txwi) { mt76_put_txwi(dev, entry.txwi); - wake = true; + wake = !flush; } - q->tail = (q->tail + 1) % q->ndesc; - q->queued--; - if (!flush && q->tail == last) last = ioread32(&q->regs->dma_idx); } @@ -258,6 +261,7 @@ int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, return -ENOMEM; } + skb->prev = skb->next = NULL; dma_sync_single_for_cpu(dev->dev, t->dma_addr, sizeof(t->txwi), DMA_TO_DEVICE); ret = dev->drv->tx_prepare_skb(dev, &t->txwi, skb, q, wcid, sta, diff --git a/drivers/net/wireless/mediatek/mt76/mac80211.c b/drivers/net/wireless/mediatek/mt76/mac80211.c index 7d219ff2d480..7b926dfa6b97 100644 --- a/drivers/net/wireless/mediatek/mt76/mac80211.c +++ b/drivers/net/wireless/mediatek/mt76/mac80211.c @@ -285,6 +285,7 @@ mt76_alloc_device(unsigned int size, const struct ieee80211_ops *ops) spin_lock_init(&dev->cc_lock); mutex_init(&dev->mutex); init_waitqueue_head(&dev->tx_wait); + skb_queue_head_init(&dev->status_list); return dev; } @@ -326,6 +327,7 @@ int mt76_register_device(struct mt76_dev *dev, bool vht, ieee80211_hw_set(hw, TX_FRAG_LIST); ieee80211_hw_set(hw, MFP_CAPABLE); ieee80211_hw_set(hw, AP_LINK_PS); + ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); wiphy->flags |= WIPHY_FLAG_IBSS_RSN; @@ -359,6 +361,7 @@ void mt76_unregister_device(struct mt76_dev *dev) { struct ieee80211_hw *hw = dev->hw; + mt76_tx_status_check(dev, NULL, true); ieee80211_unregister_hw(hw); mt76_tx_free(dev); } @@ -629,3 +632,80 @@ void mt76_rx_poll_complete(struct mt76_dev *dev, enum mt76_rxq_id q, mt76_rx_complete(dev, &frames, napi); } EXPORT_SYMBOL_GPL(mt76_rx_poll_complete); + +static int +mt76_sta_add(struct mt76_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; + int ret; + int i; + + mutex_lock(&dev->mutex); + + ret = dev->drv->sta_add(dev, vif, sta); + if (ret) + goto out; + + for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { + struct mt76_txq *mtxq; + + if (!sta->txq[i]) + continue; + + mtxq = (struct mt76_txq *)sta->txq[i]->drv_priv; + mtxq->wcid = wcid; + + mt76_txq_init(dev, sta->txq[i]); + } + + rcu_assign_pointer(dev->wcid[wcid->idx], wcid); + +out: + mutex_unlock(&dev->mutex); + + return ret; +} + +static void +mt76_sta_remove(struct mt76_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) +{ + struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; + int idx = wcid->idx; + int i; + + rcu_assign_pointer(dev->wcid[idx], NULL); + synchronize_rcu(); + + mutex_lock(&dev->mutex); + + if (dev->drv->sta_remove) + dev->drv->sta_remove(dev, vif, sta); + + mt76_tx_status_check(dev, wcid, true); + for (i = 0; i < ARRAY_SIZE(sta->txq); i++) + mt76_txq_remove(dev, sta->txq[i]); + mt76_wcid_free(dev->wcid_mask, idx); + + mutex_unlock(&dev->mutex); +} + +int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + enum ieee80211_sta_state old_state, + enum ieee80211_sta_state new_state) +{ + struct mt76_dev *dev = hw->priv; + + if (old_state == IEEE80211_STA_NOTEXIST && + new_state == IEEE80211_STA_NONE) + return mt76_sta_add(dev, vif, sta); + + if (old_state == IEEE80211_STA_NONE && + new_state == IEEE80211_STA_NOTEXIST) + mt76_sta_remove(dev, vif, sta); + + return 0; +} +EXPORT_SYMBOL_GPL(mt76_sta_state); diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h index 3bfa7f5e3513..5cd508a68609 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76.h +++ b/drivers/net/wireless/mediatek/mt76/mt76.h @@ -135,9 +135,8 @@ struct mt76_queue { }; struct mt76_mcu_ops { - struct sk_buff *(*mcu_msg_alloc)(const void *data, int len); - int (*mcu_send_msg)(struct mt76_dev *dev, struct sk_buff *skb, - int cmd, bool wait_resp); + int (*mcu_send_msg)(struct mt76_dev *dev, int cmd, const void *data, + int len, bool wait_resp); int (*mcu_wr_rp)(struct mt76_dev *dev, u32 base, const struct mt76_reg_pair *rp, int len); int (*mcu_rd_rp)(struct mt76_dev *dev, u32 base, @@ -195,6 +194,8 @@ struct mt76_wcid { u8 tx_rate_nss; s8 max_txpwr_adj; bool sw_iv; + + u8 packet_id; }; struct mt76_txq { @@ -233,6 +234,22 @@ struct mt76_rx_tid { struct sk_buff *reorder_buf[]; }; +#define MT_TX_CB_DMA_DONE BIT(0) +#define MT_TX_CB_TXS_DONE BIT(1) +#define MT_TX_CB_TXS_FAILED BIT(2) + +#define MT_PACKET_ID_MASK GENMASK(7, 0) +#define MT_PACKET_ID_NO_ACK MT_PACKET_ID_MASK + +#define MT_TX_STATUS_SKB_TIMEOUT HZ + +struct mt76_tx_cb { + unsigned long jiffies; + u8 wcid; + u8 pktid; + u8 flags; +}; + enum { MT76_STATE_INITIALIZED, MT76_STATE_RUNNING, @@ -271,6 +288,12 @@ struct mt76_driver_ops { void (*sta_ps)(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps); + + int (*sta_add)(struct mt76_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); + + void (*sta_remove)(struct mt76_dev *dev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); }; struct mt76_channel_state { @@ -400,6 +423,7 @@ struct mt76_dev { const struct mt76_queue_ops *queue_ops; wait_queue_head_t tx_wait; + struct sk_buff_head status_list; unsigned long wcid_mask[MT76_N_WCIDS / BITS_PER_LONG]; @@ -484,7 +508,6 @@ struct mt76_rx_status { #define mt76_wr_rp(dev, ...) (dev)->mt76.bus->wr_rp(&((dev)->mt76), __VA_ARGS__) #define mt76_rd_rp(dev, ...) (dev)->mt76.bus->rd_rp(&((dev)->mt76), __VA_ARGS__) -#define mt76_mcu_msg_alloc(dev, ...) (dev)->mt76.mcu_ops->mcu_msg_alloc(__VA_ARGS__) #define mt76_mcu_send_msg(dev, ...) (dev)->mt76.mcu_ops->mcu_send_msg(&((dev)->mt76), __VA_ARGS__) #define mt76_set(dev, offset, val) mt76_rmw(dev, offset, 0, val) @@ -594,6 +617,13 @@ wcid_to_sta(struct mt76_wcid *wcid) return container_of(ptr, struct ieee80211_sta, drv_priv); } +static inline struct mt76_tx_cb *mt76_tx_skb_cb(struct sk_buff *skb) +{ + BUILD_BUG_ON(sizeof(struct mt76_tx_cb) > + sizeof(IEEE80211_SKB_CB(skb)->status.status_driver_data)); + return ((void *) IEEE80211_SKB_CB(skb)->status.status_driver_data); +} + int mt76_dma_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, struct sk_buff *skb, struct mt76_wcid *wcid, struct ieee80211_sta *sta); @@ -625,6 +655,26 @@ void mt76_rx_aggr_stop(struct mt76_dev *dev, struct mt76_wcid *wcid, u8 tid); void mt76_wcid_key_setup(struct mt76_dev *dev, struct mt76_wcid *wcid, struct ieee80211_key_conf *key); +void mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list) + __acquires(&dev->status_list.lock); +void mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) + __releases(&dev->status_list.lock); + +int mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, + struct sk_buff *skb); +struct sk_buff *mt76_tx_status_skb_get(struct mt76_dev *dev, + struct mt76_wcid *wcid, int pktid, + struct sk_buff_head *list); +void mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, + struct sk_buff_head *list); +void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb); +void mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, + bool flush); +int mt76_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + struct ieee80211_sta *sta, + enum ieee80211_sta_state old_state, + enum ieee80211_sta_state new_state); + struct ieee80211_sta *mt76_rx_convert(struct sk_buff *skb); /* internal */ @@ -668,8 +718,6 @@ int mt76u_vendor_request(struct mt76_dev *dev, u8 req, void *buf, size_t len); void mt76u_single_wr(struct mt76_dev *dev, const u8 req, const u16 offset, const u32 val); -u32 mt76u_rr(struct mt76_dev *dev, u32 addr); -void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val); int mt76u_init(struct mt76_dev *dev, struct usb_interface *intf); void mt76u_deinit(struct mt76_dev *dev); int mt76u_buf_alloc(struct mt76_dev *dev, struct mt76u_buf *buf, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile b/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile index 20672978dceb..aa22ba954716 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/Makefile @@ -2,11 +2,9 @@ obj-$(CONFIG_MT76x0U) += mt76x0u.o obj-$(CONFIG_MT76x0E) += mt76x0e.o obj-$(CONFIG_MT76x0_COMMON) += mt76x0-common.o -mt76x0-common-y := \ - init.o main.o trace.o eeprom.o phy.o \ - mac.o debugfs.o +mt76x0-common-y := init.o main.o eeprom.o phy.o + mt76x0u-y := usb.o usb_mcu.o mt76x0e-y := pci.o pci_mcu.o # ccflags-y := -DDEBUG -CFLAGS_trace.o := -I$(src) diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c deleted file mode 100644 index 3224e5b1a1e5..000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/debugfs.c +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org> - * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl> - * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include <linux/debugfs.h> - -#include "mt76x0.h" -#include "eeprom.h" - -static int -mt76x0_ampdu_stat_read(struct seq_file *file, void *data) -{ - struct mt76x02_dev *dev = file->private; - int i, j; - -#define stat_printf(grp, off, name) \ - seq_printf(file, #name ":\t%llu\n", dev->stats.grp[off]) - - stat_printf(rx_stat, 0, rx_crc_err); - stat_printf(rx_stat, 1, rx_phy_err); - stat_printf(rx_stat, 2, rx_false_cca); - stat_printf(rx_stat, 3, rx_plcp_err); - stat_printf(rx_stat, 4, rx_fifo_overflow); - stat_printf(rx_stat, 5, rx_duplicate); - - stat_printf(tx_stat, 0, tx_fail_cnt); - stat_printf(tx_stat, 1, tx_bcn_cnt); - stat_printf(tx_stat, 2, tx_success); - stat_printf(tx_stat, 3, tx_retransmit); - stat_printf(tx_stat, 4, tx_zero_len); - stat_printf(tx_stat, 5, tx_underflow); - - stat_printf(aggr_stat, 0, non_aggr_tx); - stat_printf(aggr_stat, 1, aggr_tx); - - stat_printf(zero_len_del, 0, tx_zero_len_del); - stat_printf(zero_len_del, 1, rx_zero_len_del); -#undef stat_printf - - seq_puts(file, "Aggregations stats:\n"); - for (i = 0; i < 4; i++) { - for (j = 0; j < 8; j++) - seq_printf(file, "%08llx ", - dev->stats.aggr_n[i * 8 + j]); - seq_putc(file, '\n'); - } - - seq_printf(file, "recent average AMPDU len: %d\n", - atomic_read(&dev->avg_ampdu_len)); - - return 0; -} - -static int -mt76x0_ampdu_stat_open(struct inode *inode, struct file *f) -{ - return single_open(f, mt76x0_ampdu_stat_read, inode->i_private); -} - -static const struct file_operations fops_ampdu_stat = { - .open = mt76x0_ampdu_stat_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; - -void mt76x0_init_debugfs(struct mt76x02_dev *dev) -{ - struct dentry *dir; - - dir = mt76_register_debugfs(&dev->mt76); - if (!dir) - return; - - debugfs_create_file("ampdu_stat", S_IRUSR, dir, dev, &fops_ampdu_stat); -} diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c index ab4fd6e0f23a..497e762978cc 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/eeprom.c @@ -135,9 +135,6 @@ static s8 mt76x0_get_delta(struct mt76x02_dev *dev) struct cfg80211_chan_def *chandef = &dev->mt76.chandef; u8 val; - if (mt76x0_tssi_enabled(dev)) - return 0; - if (chandef->width == NL80211_CHAN_WIDTH_80) { val = mt76x02_eeprom_get(dev, MT_EE_5G_TARGET_POWER) >> 8; } else if (chandef->width == NL80211_CHAN_WIDTH_40) { @@ -160,8 +157,8 @@ void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev) struct ieee80211_channel *chan = dev->mt76.chandef.chan; bool is_2ghz = chan->band == NL80211_BAND_2GHZ; struct mt76_rate_power *t = &dev->mt76.rate_power; - s8 delta = mt76x0_get_delta(dev); u16 val, addr; + s8 delta; memset(t, 0, sizeof(*t)); @@ -211,6 +208,7 @@ void mt76x0_get_tx_power_per_rate(struct mt76x02_dev *dev) t->vht[7] = s6_to_s8(val); t->vht[8] = s6_to_s8(val >> 8); + delta = mt76x0_tssi_enabled(dev) ? 0 : mt76x0_get_delta(dev); mt76x02_add_rate_power_offset(t, delta); } @@ -233,6 +231,20 @@ void mt76x0_get_power_info(struct mt76x02_dev *dev, u8 *info) u16 data; int i; + if (mt76x0_tssi_enabled(dev)) { + s8 target_power; + + if (chan->band == NL80211_BAND_5GHZ) + data = mt76x02_eeprom_get(dev, MT_EE_5G_TARGET_POWER); + else + data = mt76x02_eeprom_get(dev, MT_EE_2G_TARGET_POWER); + target_power = (data & 0xff) - dev->mt76.rate_power.ofdm[7]; + info[0] = target_power + mt76x0_get_delta(dev); + info[1] = 0; + + return; + } + for (i = 0; i < ARRAY_SIZE(chan_map); i++) { if (chan_map[i].chan <= chan->hw_value) { offset = chan_map[i].offset; @@ -340,8 +352,6 @@ int mt76x0_eeprom_init(struct mt76x02_dev *dev) mt76x0_set_freq_offset(dev); mt76x0_set_temp_offset(dev); - dev->mt76.chainmask = 0x0101; - return 0; } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c index 4a9408801260..87b575fe1c74 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/init.c @@ -16,7 +16,6 @@ #include "mt76x0.h" #include "eeprom.h" -#include "trace.h" #include "mcu.h" #include "initvals.h" @@ -113,7 +112,7 @@ static int mt76x0_init_bbp(struct mt76x02_dev *dev) { int ret, i; - ret = mt76x0_wait_bbp_ready(dev); + ret = mt76x0_phy_wait_bbp_ready(dev); if (ret) return ret; @@ -134,80 +133,28 @@ static int mt76x0_init_bbp(struct mt76x02_dev *dev) static void mt76x0_init_mac_registers(struct mt76x02_dev *dev) { - u32 reg; - RANDOM_WRITE(dev, common_mac_reg_table); - mt76x02_set_beacon_offsets(dev); - /* Enable PBF and MAC clock SYS_CTRL[11:10] = 0x3 */ RANDOM_WRITE(dev, mt76x0_mac_reg_table); /* Release BBP and MAC reset MAC_SYS_CTRL[1:0] = 0x0 */ - reg = mt76_rr(dev, MT_MAC_SYS_CTRL); - reg &= ~0x3; - mt76_wr(dev, MT_MAC_SYS_CTRL, reg); + mt76_clear(dev, MT_MAC_SYS_CTRL, 0x3); /* Set 0x141C[15:12]=0xF */ - reg = mt76_rr(dev, MT_EXT_CCA_CFG); - reg |= 0x0000F000; - mt76_wr(dev, MT_EXT_CCA_CFG, reg); + mt76_set(dev, MT_EXT_CCA_CFG, 0xf000); mt76_clear(dev, MT_FCE_L2_STUFF, MT_FCE_L2_STUFF_WR_MPDU_LEN_EN); /* - TxRing 9 is for Mgmt frame. - TxRing 8 is for In-band command frame. - WMM_RG0_TXQMA: This register setting is for FCE to define the rule of TxRing 9. - WMM_RG1_TXQMA: This register setting is for FCE to define the rule of TxRing 8. - */ - reg = mt76_rr(dev, MT_WMM_CTRL); - reg &= ~0x000003FF; - reg |= 0x00000201; - mt76_wr(dev, MT_WMM_CTRL, reg); -} - -static int mt76x0_init_wcid_mem(struct mt76x02_dev *dev) -{ - u32 *vals; - int i; - - vals = kmalloc(sizeof(*vals) * MT76_N_WCIDS * 2, GFP_KERNEL); - if (!vals) - return -ENOMEM; - - for (i = 0; i < MT76_N_WCIDS; i++) { - vals[i * 2] = 0xffffffff; - vals[i * 2 + 1] = 0x00ffffff; - } - - mt76_wr_copy(dev, MT_WCID_ADDR_BASE, vals, MT76_N_WCIDS * 2); - kfree(vals); - return 0; -} - -static void mt76x0_init_key_mem(struct mt76x02_dev *dev) -{ - u32 vals[4] = {}; - - mt76_wr_copy(dev, MT_SKEY_MODE_BASE_0, vals, ARRAY_SIZE(vals)); -} - -static int mt76x0_init_wcid_attr_mem(struct mt76x02_dev *dev) -{ - u32 *vals; - int i; - - vals = kmalloc(sizeof(*vals) * MT76_N_WCIDS * 2, GFP_KERNEL); - if (!vals) - return -ENOMEM; - - for (i = 0; i < MT76_N_WCIDS * 2; i++) - vals[i] = 1; - - mt76_wr_copy(dev, MT_WCID_ATTR_BASE, vals, MT76_N_WCIDS * 2); - kfree(vals); - return 0; + * tx_ring 9 is for mgmt frame + * tx_ring 8 is for in-band command frame. + * WMM_RG0_TXQMA: this register setting is for FCE to + * define the rule of tx_ring 9 + * WMM_RG1_TXQMA: this register setting is for FCE to + * define the rule of tx_ring 8 + */ + mt76_rmw(dev, MT_WMM_CTRL, 0x3ff, 0x201); } static void mt76x0_reset_counters(struct mt76x02_dev *dev) @@ -270,7 +217,7 @@ EXPORT_SYMBOL_GPL(mt76x0_mac_stop); int mt76x0_init_hardware(struct mt76x02_dev *dev) { - int ret; + int ret, i, k; if (!mt76x02_wait_for_wpdma(&dev->mt76, 1000)) return -EIO; @@ -280,7 +227,7 @@ int mt76x0_init_hardware(struct mt76x02_dev *dev) return -ETIMEDOUT; mt76x0_reset_csr_bbp(dev); - ret = mt76x02_mcu_function_select(dev, Q_SELECT, 1, false); + ret = mt76x02_mcu_function_select(dev, Q_SELECT, 1); if (ret) return ret; @@ -295,20 +242,12 @@ int mt76x0_init_hardware(struct mt76x02_dev *dev) dev->mt76.rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG); - ret = mt76x0_init_wcid_mem(dev); - if (ret) - return ret; + for (i = 0; i < 16; i++) + for (k = 0; k < 4; k++) + mt76x02_mac_shared_key_setup(dev, i, k, NULL); - mt76x0_init_key_mem(dev); - - ret = mt76x0_init_wcid_attr_mem(dev); - if (ret) - return ret; - - mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN | - MT_BEACON_TIME_CFG_SYNC_MODE | - MT_BEACON_TIME_CFG_TBTT_EN | - MT_BEACON_TIME_CFG_BEACON_TX)); + for (i = 0; i < 256; i++) + mt76x02_mac_wcid_setup(dev, i, 0, NULL); mt76x0_reset_counters(dev); @@ -317,6 +256,7 @@ int mt76x0_init_hardware(struct mt76x02_dev *dev) return ret; mt76x0_phy_init(dev); + mt76x02_init_beacon_config(dev); return 0; } @@ -339,7 +279,6 @@ mt76x0_alloc_device(struct device *pdev, dev = container_of(mdev, struct mt76x02_dev, mt76); mutex_init(&dev->phy_mutex); - atomic_set(&dev->avg_ampdu_len, 1); return dev; } @@ -347,49 +286,21 @@ EXPORT_SYMBOL_GPL(mt76x0_alloc_device); int mt76x0_register_device(struct mt76x02_dev *dev) { - struct mt76_dev *mdev = &dev->mt76; - struct ieee80211_hw *hw = mdev->hw; - struct wiphy *wiphy = hw->wiphy; int ret; - /* Reserve WCID 0 for mcast - thanks to this APs WCID will go to - * entry no. 1 like it does in the vendor driver. - */ - mdev->wcid_mask[0] |= 1; - - /* init fake wcid for monitor interfaces */ - mdev->global_wcid.idx = 0xff; - mdev->global_wcid.hw_key_idx = -1; - - /* init antenna configuration */ - mdev->antenna_mask = 1; - - hw->queues = 4; - hw->max_rates = 1; - hw->max_report_rates = 7; - hw->max_rate_tries = 1; - hw->extra_tx_headroom = 2; - if (mt76_is_usb(dev)) - hw->extra_tx_headroom += sizeof(struct mt76x02_txwi) + - MT_DMA_HDR_LEN; - - hw->sta_data_size = sizeof(struct mt76x02_sta); - hw->vif_data_size = sizeof(struct mt76x02_vif); - - wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); - - INIT_DELAYED_WORK(&dev->mac_work, mt76x0_mac_work); + mt76x02_init_device(dev); + mt76x02_config_mac_addr_list(dev); - ret = mt76_register_device(mdev, true, mt76x02_rates, + ret = mt76_register_device(&dev->mt76, true, mt76x02_rates, ARRAY_SIZE(mt76x02_rates)); if (ret) return ret; /* overwrite unsupported features */ - if (mdev->cap.has_5ghz) + if (dev->mt76.cap.has_5ghz) mt76x0_vht_cap_mask(&dev->mt76.sband_5g.sband); - mt76x0_init_debugfs(dev); + mt76x02_init_debugfs(dev); return 0; } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h index 236dce6860b4..a1657922758e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals.h @@ -37,14 +37,14 @@ static const struct mt76_reg_pair common_mac_reg_table[] = { { MT_PBF_RX_MAX_PCNT, 0x0000fe9f }, { MT_TX_RETRY_CFG, 0x47d01f0f }, { MT_AUTO_RSP_CFG, 0x00000013 }, - { MT_CCK_PROT_CFG, 0x05740003 }, - { MT_OFDM_PROT_CFG, 0x05740003 }, + { MT_CCK_PROT_CFG, 0x07f40003 }, + { MT_OFDM_PROT_CFG, 0x07f42004 }, { MT_PBF_CFG, 0x00f40006 }, { MT_WPDMA_GLO_CFG, 0x00000030 }, - { MT_GF20_PROT_CFG, 0x01744004 }, - { MT_GF40_PROT_CFG, 0x03f44084 }, - { MT_MM20_PROT_CFG, 0x01744004 }, - { MT_MM40_PROT_CFG, 0x03f54084 }, + { MT_GF20_PROT_CFG, 0x01742004 }, + { MT_GF40_PROT_CFG, 0x03f42084 }, + { MT_MM20_PROT_CFG, 0x01742004 }, + { MT_MM40_PROT_CFG, 0x03f42084 }, { MT_TXOP_CTRL_CFG, 0x0000583f }, { MT_TX_RTS_CFG, 0x00092b20 }, { MT_EXP_ACK_TIME, 0x002400ca }, @@ -85,6 +85,9 @@ static const struct mt76_reg_pair mt76x0_mac_reg_table[] = { { MT_HT_CTRL_CFG, 0x000001FF }, { MT_TXOP_HLDR_ET, 0x00000000 }, { MT_PN_PAD_MODE, 0x00000003 }, + { MT_TX_PROT_CFG6, 0xe3f42004 }, + { MT_TX_PROT_CFG7, 0xe3f42084 }, + { MT_TX_PROT_CFG8, 0xe3f42104 }, }; static const struct mt76_reg_pair mt76x0_bbp_init_tab[] = { diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h index 95d43efc1f3d..56c6fa73daf5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/initvals_phy.h @@ -16,757 +16,626 @@ #ifndef __MT76X0U_PHY_INITVALS_H #define __MT76X0U_PHY_INITVALS_H -#define RF_REG_PAIR(bank, reg, value) \ - { (bank) << 16 | (reg), value } - - static const struct mt76_reg_pair mt76x0_rf_central_tab[] = { -/* - Bank 0 - For central blocks: BG, PLL, XTAL, LO, ADC/DAC -*/ - { MT_RF(0, 1), 0x01}, - { MT_RF(0, 2), 0x11}, - - /* - R3 ~ R7: VCO Cal. - */ - { MT_RF(0, 3), 0x73}, /* VCO Freq Cal - No Bypass, VCO Amp Cal - No Bypass */ - { MT_RF(0, 4), 0x30}, /* R4 b<7>=1, VCO cal */ - { MT_RF(0, 5), 0x00}, - { MT_RF(0, 6), 0x41}, /* Set the open loop amplitude to middle since bypassing amplitude calibration */ - { MT_RF(0, 7), 0x00}, - - /* - XO - */ - { MT_RF(0, 8), 0x00}, - { MT_RF(0, 9), 0x00}, - { MT_RF(0, 10), 0x0C}, - { MT_RF(0, 11), 0x00}, - { MT_RF(0, 12), 0x00}, - - /* - BG - */ - { MT_RF(0, 13), 0x00}, - { MT_RF(0, 14), 0x00}, - { MT_RF(0, 15), 0x00}, - - /* - LDO - */ - { MT_RF(0, 19), 0x20}, - /* - XO - */ - { MT_RF(0, 20), 0x22}, - { MT_RF(0, 21), 0x12}, - { MT_RF(0, 23), 0x00}, - { MT_RF(0, 24), 0x33}, /* See band selection for R24<1:0> */ - { MT_RF(0, 25), 0x00}, - - /* - PLL, See Freq Selection - */ - { MT_RF(0, 26), 0x00}, - { MT_RF(0, 27), 0x00}, - { MT_RF(0, 28), 0x00}, - { MT_RF(0, 29), 0x00}, - { MT_RF(0, 30), 0x00}, - { MT_RF(0, 31), 0x00}, - { MT_RF(0, 32), 0x00}, - { MT_RF(0, 33), 0x00}, - { MT_RF(0, 34), 0x00}, - { MT_RF(0, 35), 0x00}, - { MT_RF(0, 36), 0x00}, - { MT_RF(0, 37), 0x00}, - - /* - LO Buffer - */ - { MT_RF(0, 38), 0x2F}, - - /* - Test Ports - */ - { MT_RF(0, 64), 0x00}, - { MT_RF(0, 65), 0x80}, - { MT_RF(0, 66), 0x01}, - { MT_RF(0, 67), 0x04}, - - /* - ADC/DAC - */ - { MT_RF(0, 68), 0x00}, - { MT_RF(0, 69), 0x08}, - { MT_RF(0, 70), 0x08}, - { MT_RF(0, 71), 0x40}, - { MT_RF(0, 72), 0xD0}, - { MT_RF(0, 73), 0x93}, + { MT_RF(0, 1), 0x01 }, + { MT_RF(0, 2), 0x11 }, + /* R3 ~ R7: VCO Cal */ + { MT_RF(0, 3), 0x73 }, /* VCO Freq Cal */ + { MT_RF(0, 4), 0x30 }, /* R4 b<7>=1, VCO cal */ + { MT_RF(0, 5), 0x00 }, + { MT_RF(0, 6), 0x41 }, + { MT_RF(0, 7), 0x00 }, + { MT_RF(0, 8), 0x00 }, + { MT_RF(0, 9), 0x00 }, + { MT_RF(0, 10), 0x0C }, + { MT_RF(0, 11), 0x00 }, + { MT_RF(0, 12), 0x00 }, + /* BG */ + { MT_RF(0, 13), 0x00 }, + { MT_RF(0, 14), 0x00 }, + { MT_RF(0, 15), 0x00 }, + /* LDO */ + { MT_RF(0, 19), 0x20 }, + { MT_RF(0, 20), 0x22 }, + { MT_RF(0, 21), 0x12 }, + { MT_RF(0, 23), 0x00 }, + { MT_RF(0, 24), 0x33 }, + { MT_RF(0, 25), 0x00 }, + /* PLL */ + { MT_RF(0, 26), 0x00 }, + { MT_RF(0, 27), 0x00 }, + { MT_RF(0, 28), 0x00 }, + { MT_RF(0, 29), 0x00 }, + { MT_RF(0, 30), 0x00 }, + { MT_RF(0, 31), 0x00 }, + { MT_RF(0, 32), 0x00 }, + { MT_RF(0, 33), 0x00 }, + { MT_RF(0, 34), 0x00 }, + { MT_RF(0, 35), 0x00 }, + { MT_RF(0, 36), 0x00 }, + { MT_RF(0, 37), 0x00 }, + /* LO Buffer */ + { MT_RF(0, 38), 0x2F }, + /* Test Ports */ + { MT_RF(0, 64), 0x00 }, + { MT_RF(0, 65), 0x80 }, + { MT_RF(0, 66), 0x01 }, + { MT_RF(0, 67), 0x04 }, + /* ADC-DAC */ + { MT_RF(0, 68), 0x00 }, + { MT_RF(0, 69), 0x08 }, + { MT_RF(0, 70), 0x08 }, + { MT_RF(0, 71), 0x40 }, + { MT_RF(0, 72), 0xD0 }, + { MT_RF(0, 73), 0x93 }, }; static const struct mt76_reg_pair mt76x0_rf_2g_channel_0_tab[] = { -/* - Bank 5 - Channel 0 2G RF registers -*/ - /* - RX logic operation - */ - /* RF_R00 Change in SelectBand6590 */ - - { MT_RF(5, 2), 0x0C}, /* 5G+2G (MT7610U) */ - { MT_RF(5, 3), 0x00}, - - /* - TX logic operation - */ - { MT_RF(5, 4), 0x00}, - { MT_RF(5, 5), 0x84}, - { MT_RF(5, 6), 0x02}, - - /* - LDO - */ - { MT_RF(5, 7), 0x00}, - { MT_RF(5, 8), 0x00}, - { MT_RF(5, 9), 0x00}, - - /* - RX - */ - { MT_RF(5, 10), 0x51}, - { MT_RF(5, 11), 0x22}, - { MT_RF(5, 12), 0x22}, - { MT_RF(5, 13), 0x0F}, - { MT_RF(5, 14), 0x47}, /* Increase mixer current for more gain */ - { MT_RF(5, 15), 0x25}, - { MT_RF(5, 16), 0xC7}, /* Tune LNA2 tank */ - { MT_RF(5, 17), 0x00}, - { MT_RF(5, 18), 0x00}, - { MT_RF(5, 19), 0x30}, /* Improve max Pin */ - { MT_RF(5, 20), 0x33}, - { MT_RF(5, 21), 0x02}, - { MT_RF(5, 22), 0x32}, /* Tune LNA1 tank */ - { MT_RF(5, 23), 0x00}, - { MT_RF(5, 24), 0x25}, - { MT_RF(5, 26), 0x00}, - { MT_RF(5, 27), 0x12}, - { MT_RF(5, 28), 0x0F}, - { MT_RF(5, 29), 0x00}, - - /* - LOGEN - */ - { MT_RF(5, 30), 0x51}, /* Tune LOGEN tank */ - { MT_RF(5, 31), 0x35}, - { MT_RF(5, 32), 0x31}, - { MT_RF(5, 33), 0x31}, - { MT_RF(5, 34), 0x34}, - { MT_RF(5, 35), 0x03}, - { MT_RF(5, 36), 0x00}, - - /* - TX - */ - { MT_RF(5, 37), 0xDD}, /* Improve 3.2GHz spur */ - { MT_RF(5, 38), 0xB3}, - { MT_RF(5, 39), 0x33}, - { MT_RF(5, 40), 0xB1}, - { MT_RF(5, 41), 0x71}, - { MT_RF(5, 42), 0xF2}, - { MT_RF(5, 43), 0x47}, - { MT_RF(5, 44), 0x77}, - { MT_RF(5, 45), 0x0E}, - { MT_RF(5, 46), 0x10}, - { MT_RF(5, 47), 0x00}, - { MT_RF(5, 48), 0x53}, - { MT_RF(5, 49), 0x03}, - { MT_RF(5, 50), 0xEF}, - { MT_RF(5, 51), 0xC7}, - { MT_RF(5, 52), 0x62}, - { MT_RF(5, 53), 0x62}, - { MT_RF(5, 54), 0x00}, - { MT_RF(5, 55), 0x00}, - { MT_RF(5, 56), 0x0F}, - { MT_RF(5, 57), 0x0F}, - { MT_RF(5, 58), 0x16}, - { MT_RF(5, 59), 0x16}, - { MT_RF(5, 60), 0x10}, - { MT_RF(5, 61), 0x10}, - { MT_RF(5, 62), 0xD0}, - { MT_RF(5, 63), 0x6C}, - { MT_RF(5, 64), 0x58}, - { MT_RF(5, 65), 0x58}, - { MT_RF(5, 66), 0xF2}, - { MT_RF(5, 67), 0xE8}, - { MT_RF(5, 68), 0xF0}, - { MT_RF(5, 69), 0xF0}, - { MT_RF(5, 127), 0x04}, + /* RX logic operation */ + { MT_RF(5, 2), 0x0C }, /* 5G+2G */ + { MT_RF(5, 3), 0x00 }, + /* TX logic operation */ + { MT_RF(5, 4), 0x00 }, + { MT_RF(5, 5), 0x84 }, + { MT_RF(5, 6), 0x02 }, + /* LDO */ + { MT_RF(5, 7), 0x00 }, + { MT_RF(5, 8), 0x00 }, + { MT_RF(5, 9), 0x00 }, + /* RX */ + { MT_RF(5, 10), 0x51 }, + { MT_RF(5, 11), 0x22 }, + { MT_RF(5, 12), 0x22 }, + { MT_RF(5, 13), 0x0F }, + { MT_RF(5, 14), 0x47 }, + { MT_RF(5, 15), 0x25 }, + { MT_RF(5, 16), 0xC7 }, + { MT_RF(5, 17), 0x00 }, + { MT_RF(5, 18), 0x00 }, + { MT_RF(5, 19), 0x30 }, + { MT_RF(5, 20), 0x33 }, + { MT_RF(5, 21), 0x02 }, + { MT_RF(5, 22), 0x32 }, + { MT_RF(5, 23), 0x00 }, + { MT_RF(5, 24), 0x25 }, + { MT_RF(5, 26), 0x00 }, + { MT_RF(5, 27), 0x12 }, + { MT_RF(5, 28), 0x0F }, + { MT_RF(5, 29), 0x00 }, + /* LOGEN */ + { MT_RF(5, 30), 0x51 }, + { MT_RF(5, 31), 0x35 }, + { MT_RF(5, 32), 0x31 }, + { MT_RF(5, 33), 0x31 }, + { MT_RF(5, 34), 0x34 }, + { MT_RF(5, 35), 0x03 }, + { MT_RF(5, 36), 0x00 }, + /* TX */ + { MT_RF(5, 37), 0xDD }, + { MT_RF(5, 38), 0xB3 }, + { MT_RF(5, 39), 0x33 }, + { MT_RF(5, 40), 0xB1 }, + { MT_RF(5, 41), 0x71 }, + { MT_RF(5, 42), 0xF2 }, + { MT_RF(5, 43), 0x47 }, + { MT_RF(5, 44), 0x77 }, + { MT_RF(5, 45), 0x0E }, + { MT_RF(5, 46), 0x10 }, + { MT_RF(5, 47), 0x00 }, + { MT_RF(5, 48), 0x53 }, + { MT_RF(5, 49), 0x03 }, + { MT_RF(5, 50), 0xEF }, + { MT_RF(5, 51), 0xC7 }, + { MT_RF(5, 52), 0x62 }, + { MT_RF(5, 53), 0x62 }, + { MT_RF(5, 54), 0x00 }, + { MT_RF(5, 55), 0x00 }, + { MT_RF(5, 56), 0x0F }, + { MT_RF(5, 57), 0x0F }, + { MT_RF(5, 58), 0x16 }, + { MT_RF(5, 59), 0x16 }, + { MT_RF(5, 60), 0x10 }, + { MT_RF(5, 61), 0x10 }, + { MT_RF(5, 62), 0xD0 }, + { MT_RF(5, 63), 0x6C }, + { MT_RF(5, 64), 0x58 }, + { MT_RF(5, 65), 0x58 }, + { MT_RF(5, 66), 0xF2 }, + { MT_RF(5, 67), 0xE8 }, + { MT_RF(5, 68), 0xF0 }, + { MT_RF(5, 69), 0xF0 }, + { MT_RF(5, 127), 0x04 }, }; static const struct mt76_reg_pair mt76x0_rf_5g_channel_0_tab[] = { -/* - Bank 6 - Channel 0 5G RF registers -*/ - /* - RX logic operation - */ - /* RF_R00 Change in SelectBandmt76x0 */ - - { MT_RF(6, 2), 0x0C}, - { MT_RF(6, 3), 0x00}, - - /* - TX logic operation - */ - { MT_RF(6, 4), 0x00}, - { MT_RF(6, 5), 0x84}, - { MT_RF(6, 6), 0x02}, - - /* - LDO - */ - { MT_RF(6, 7), 0x00}, - { MT_RF(6, 8), 0x00}, - { MT_RF(6, 9), 0x00}, - - /* - RX - */ - { MT_RF(6, 10), 0x00}, - { MT_RF(6, 11), 0x01}, - - { MT_RF(6, 13), 0x23}, - { MT_RF(6, 14), 0x00}, - { MT_RF(6, 15), 0x04}, - { MT_RF(6, 16), 0x22}, - - { MT_RF(6, 18), 0x08}, - { MT_RF(6, 19), 0x00}, - { MT_RF(6, 20), 0x00}, - { MT_RF(6, 21), 0x00}, - { MT_RF(6, 22), 0xFB}, - - /* - LOGEN5G - */ - { MT_RF(6, 25), 0x76}, - { MT_RF(6, 26), 0x24}, - { MT_RF(6, 27), 0x04}, - { MT_RF(6, 28), 0x00}, - { MT_RF(6, 29), 0x00}, - - /* - TX - */ - { MT_RF(6, 37), 0xBB}, - { MT_RF(6, 38), 0xB3}, - - { MT_RF(6, 40), 0x33}, - { MT_RF(6, 41), 0x33}, - - { MT_RF(6, 43), 0x03}, - { MT_RF(6, 44), 0xB3}, - - { MT_RF(6, 46), 0x17}, - { MT_RF(6, 47), 0x0E}, - { MT_RF(6, 48), 0x10}, - { MT_RF(6, 49), 0x07}, - - { MT_RF(6, 62), 0x00}, - { MT_RF(6, 63), 0x00}, - { MT_RF(6, 64), 0xF1}, - { MT_RF(6, 65), 0x0F}, + /* RX logic operation */ + { MT_RF(6, 2), 0x0C }, + { MT_RF(6, 3), 0x00 }, + /* TX logic operation */ + { MT_RF(6, 4), 0x00 }, + { MT_RF(6, 5), 0x84 }, + { MT_RF(6, 6), 0x02 }, + /* LDO */ + { MT_RF(6, 7), 0x00 }, + { MT_RF(6, 8), 0x00 }, + { MT_RF(6, 9), 0x00 }, + /* RX */ + { MT_RF(6, 10), 0x00 }, + { MT_RF(6, 11), 0x01 }, + { MT_RF(6, 13), 0x23 }, + { MT_RF(6, 14), 0x00 }, + { MT_RF(6, 15), 0x04 }, + { MT_RF(6, 16), 0x22 }, + { MT_RF(6, 18), 0x08 }, + { MT_RF(6, 19), 0x00 }, + { MT_RF(6, 20), 0x00 }, + { MT_RF(6, 21), 0x00 }, + { MT_RF(6, 22), 0xFB }, + /* LOGEN5G */ + { MT_RF(6, 25), 0x76 }, + { MT_RF(6, 26), 0x24 }, + { MT_RF(6, 27), 0x04 }, + { MT_RF(6, 28), 0x00 }, + { MT_RF(6, 29), 0x00 }, + /* TX */ + { MT_RF(6, 37), 0xBB }, + { MT_RF(6, 38), 0xB3 }, + { MT_RF(6, 40), 0x33 }, + { MT_RF(6, 41), 0x33 }, + { MT_RF(6, 43), 0x03 }, + { MT_RF(6, 44), 0xB3 }, + { MT_RF(6, 46), 0x17 }, + { MT_RF(6, 47), 0x0E }, + { MT_RF(6, 48), 0x10 }, + { MT_RF(6, 49), 0x07 }, + { MT_RF(6, 62), 0x00 }, + { MT_RF(6, 63), 0x00 }, + { MT_RF(6, 64), 0xF1 }, + { MT_RF(6, 65), 0x0F }, }; static const struct mt76_reg_pair mt76x0_rf_vga_channel_0_tab[] = { -/* - Bank 7 - Channel 0 VGA RF registers -*/ /* E3 CR */ - { MT_RF(7, 0), 0x47}, /* Allow BBP/MAC to do calibration */ - { MT_RF(7, 1), 0x00}, - { MT_RF(7, 2), 0x00}, - { MT_RF(7, 3), 0x00}, - { MT_RF(7, 4), 0x00}, - - { MT_RF(7, 10), 0x13}, - { MT_RF(7, 11), 0x0F}, - { MT_RF(7, 12), 0x13}, /* For dcoc */ - { MT_RF(7, 13), 0x13}, /* For dcoc */ - { MT_RF(7, 14), 0x13}, /* For dcoc */ - { MT_RF(7, 15), 0x20}, /* For dcoc */ - { MT_RF(7, 16), 0x22}, /* For dcoc */ - - { MT_RF(7, 17), 0x7C}, - - { MT_RF(7, 18), 0x00}, - { MT_RF(7, 19), 0x00}, - { MT_RF(7, 20), 0x00}, - { MT_RF(7, 21), 0xF1}, - { MT_RF(7, 22), 0x11}, - { MT_RF(7, 23), 0xC2}, - { MT_RF(7, 24), 0x41}, - { MT_RF(7, 25), 0x20}, - { MT_RF(7, 26), 0x40}, - { MT_RF(7, 27), 0xD7}, - { MT_RF(7, 28), 0xA2}, - { MT_RF(7, 29), 0x60}, - { MT_RF(7, 30), 0x49}, - { MT_RF(7, 31), 0x20}, - { MT_RF(7, 32), 0x44}, - { MT_RF(7, 33), 0xC1}, - { MT_RF(7, 34), 0x60}, - { MT_RF(7, 35), 0xC0}, - - { MT_RF(7, 61), 0x01}, - - { MT_RF(7, 72), 0x3C}, - { MT_RF(7, 73), 0x34}, - { MT_RF(7, 74), 0x00}, + { MT_RF(7, 0), 0x47 }, + { MT_RF(7, 1), 0x00 }, + { MT_RF(7, 2), 0x00 }, + { MT_RF(7, 3), 0x00 }, + { MT_RF(7, 4), 0x00 }, + { MT_RF(7, 10), 0x13 }, + { MT_RF(7, 11), 0x0F }, + { MT_RF(7, 12), 0x13 }, + { MT_RF(7, 13), 0x13 }, + { MT_RF(7, 14), 0x13 }, + { MT_RF(7, 15), 0x20 }, + { MT_RF(7, 16), 0x22 }, + { MT_RF(7, 17), 0x7C }, + { MT_RF(7, 18), 0x00 }, + { MT_RF(7, 19), 0x00 }, + { MT_RF(7, 20), 0x00 }, + { MT_RF(7, 21), 0xF1 }, + { MT_RF(7, 22), 0x11 }, + { MT_RF(7, 23), 0xC2 }, + { MT_RF(7, 24), 0x41 }, + { MT_RF(7, 25), 0x20 }, + { MT_RF(7, 26), 0x40 }, + { MT_RF(7, 27), 0xD7 }, + { MT_RF(7, 28), 0xA2 }, + { MT_RF(7, 29), 0x60 }, + { MT_RF(7, 30), 0x49 }, + { MT_RF(7, 31), 0x20 }, + { MT_RF(7, 32), 0x44 }, + { MT_RF(7, 33), 0xC1 }, + { MT_RF(7, 34), 0x60 }, + { MT_RF(7, 35), 0xC0 }, + { MT_RF(7, 61), 0x01 }, + { MT_RF(7, 72), 0x3C }, + { MT_RF(7, 73), 0x34 }, + { MT_RF(7, 74), 0x00 }, }; static const struct mt76x0_rf_switch_item mt76x0_rf_bw_switch_tab[] = { - /* Bank, Register, Bw/Band, Value */ - { MT_RF(0, 17), RF_G_BAND | RF_BW_20, 0x00}, - { MT_RF(0, 17), RF_G_BAND | RF_BW_40, 0x00}, - { MT_RF(0, 17), RF_A_BAND | RF_BW_20, 0x00}, - { MT_RF(0, 17), RF_A_BAND | RF_BW_40, 0x00}, - { MT_RF(0, 17), RF_A_BAND | RF_BW_80, 0x00}, - - /* TODO: need to check B7.R6 & B7.R7 setting for 2.4G again @20121112 */ - { MT_RF(7, 6), RF_G_BAND | RF_BW_20, 0x40}, - { MT_RF(7, 6), RF_G_BAND | RF_BW_40, 0x1C}, - { MT_RF(7, 6), RF_A_BAND | RF_BW_20, 0x40}, - { MT_RF(7, 6), RF_A_BAND | RF_BW_40, 0x20}, - { MT_RF(7, 6), RF_A_BAND | RF_BW_80, 0x10}, - - { MT_RF(7, 7), RF_G_BAND | RF_BW_20, 0x40}, - { MT_RF(7, 7), RF_G_BAND | RF_BW_40, 0x20}, - { MT_RF(7, 7), RF_A_BAND | RF_BW_20, 0x40}, - { MT_RF(7, 7), RF_A_BAND | RF_BW_40, 0x20}, - { MT_RF(7, 7), RF_A_BAND | RF_BW_80, 0x10}, - - { MT_RF(7, 8), RF_G_BAND | RF_BW_20, 0x03}, - { MT_RF(7, 8), RF_G_BAND | RF_BW_40, 0x01}, - { MT_RF(7, 8), RF_A_BAND | RF_BW_20, 0x03}, - { MT_RF(7, 8), RF_A_BAND | RF_BW_40, 0x01}, - { MT_RF(7, 8), RF_A_BAND | RF_BW_80, 0x00}, - - /* TODO: need to check B7.R58 & B7.R59 setting for 2.4G again @20121112 */ - { MT_RF(7, 58), RF_G_BAND | RF_BW_20, 0x40}, - { MT_RF(7, 58), RF_G_BAND | RF_BW_40, 0x40}, - { MT_RF(7, 58), RF_A_BAND | RF_BW_20, 0x40}, - { MT_RF(7, 58), RF_A_BAND | RF_BW_40, 0x40}, - { MT_RF(7, 58), RF_A_BAND | RF_BW_80, 0x10}, - - { MT_RF(7, 59), RF_G_BAND | RF_BW_20, 0x40}, - { MT_RF(7, 59), RF_G_BAND | RF_BW_40, 0x40}, - { MT_RF(7, 59), RF_A_BAND | RF_BW_20, 0x40}, - { MT_RF(7, 59), RF_A_BAND | RF_BW_40, 0x40}, - { MT_RF(7, 59), RF_A_BAND | RF_BW_80, 0x10}, - - { MT_RF(7, 60), RF_G_BAND | RF_BW_20, 0xAA}, - { MT_RF(7, 60), RF_G_BAND | RF_BW_40, 0xAA}, - { MT_RF(7, 60), RF_A_BAND | RF_BW_20, 0xAA}, - { MT_RF(7, 60), RF_A_BAND | RF_BW_40, 0xAA}, - { MT_RF(7, 60), RF_A_BAND | RF_BW_80, 0xAA}, - - { MT_RF(7, 76), RF_BW_20, 0x40}, - { MT_RF(7, 76), RF_BW_40, 0x40}, - { MT_RF(7, 76), RF_BW_80, 0x10}, - - { MT_RF(7, 77), RF_BW_20, 0x40}, - { MT_RF(7, 77), RF_BW_40, 0x40}, - { MT_RF(7, 77), RF_BW_80, 0x10}, + /* bank, reg bw/band value */ + { MT_RF(0, 17), RF_G_BAND | RF_BW_20, 0x00 }, + { MT_RF(0, 17), RF_G_BAND | RF_BW_40, 0x00 }, + { MT_RF(0, 17), RF_A_BAND | RF_BW_20, 0x00 }, + { MT_RF(0, 17), RF_A_BAND | RF_BW_40, 0x00 }, + { MT_RF(0, 17), RF_A_BAND | RF_BW_80, 0x00 }, + { MT_RF(7, 6), RF_G_BAND | RF_BW_20, 0x40 }, + { MT_RF(7, 6), RF_G_BAND | RF_BW_40, 0x1C }, + { MT_RF(7, 6), RF_A_BAND | RF_BW_20, 0x40 }, + { MT_RF(7, 6), RF_A_BAND | RF_BW_40, 0x20 }, + { MT_RF(7, 6), RF_A_BAND | RF_BW_80, 0x10 }, + { MT_RF(7, 7), RF_G_BAND | RF_BW_20, 0x40 }, + { MT_RF(7, 7), RF_G_BAND | RF_BW_40, 0x20 }, + { MT_RF(7, 7), RF_A_BAND | RF_BW_20, 0x40 }, + { MT_RF(7, 7), RF_A_BAND | RF_BW_40, 0x20 }, + { MT_RF(7, 7), RF_A_BAND | RF_BW_80, 0x10 }, + { MT_RF(7, 8), RF_G_BAND | RF_BW_20, 0x03 }, + { MT_RF(7, 8), RF_G_BAND | RF_BW_40, 0x01 }, + { MT_RF(7, 8), RF_A_BAND | RF_BW_20, 0x03 }, + { MT_RF(7, 8), RF_A_BAND | RF_BW_40, 0x01 }, + { MT_RF(7, 8), RF_A_BAND | RF_BW_80, 0x00 }, + { MT_RF(7, 58), RF_G_BAND | RF_BW_20, 0x40 }, + { MT_RF(7, 58), RF_G_BAND | RF_BW_40, 0x40 }, + { MT_RF(7, 58), RF_A_BAND | RF_BW_20, 0x40 }, + { MT_RF(7, 58), RF_A_BAND | RF_BW_40, 0x40 }, + { MT_RF(7, 58), RF_A_BAND | RF_BW_80, 0x10 }, + { MT_RF(7, 59), RF_G_BAND | RF_BW_20, 0x40 }, + { MT_RF(7, 59), RF_G_BAND | RF_BW_40, 0x40 }, + { MT_RF(7, 59), RF_A_BAND | RF_BW_20, 0x40 }, + { MT_RF(7, 59), RF_A_BAND | RF_BW_40, 0x40 }, + { MT_RF(7, 59), RF_A_BAND | RF_BW_80, 0x10 }, + { MT_RF(7, 60), RF_G_BAND | RF_BW_20, 0xAA }, + { MT_RF(7, 60), RF_G_BAND | RF_BW_40, 0xAA }, + { MT_RF(7, 60), RF_A_BAND | RF_BW_20, 0xAA }, + { MT_RF(7, 60), RF_A_BAND | RF_BW_40, 0xAA }, + { MT_RF(7, 60), RF_A_BAND | RF_BW_80, 0xAA }, + { MT_RF(7, 76), RF_BW_20, 0x40 }, + { MT_RF(7, 76), RF_BW_40, 0x40 }, + { MT_RF(7, 76), RF_BW_80, 0x10 }, + { MT_RF(7, 77), RF_BW_20, 0x40 }, + { MT_RF(7, 77), RF_BW_40, 0x40 }, + { MT_RF(7, 77), RF_BW_80, 0x10 }, }; static const struct mt76x0_rf_switch_item mt76x0_rf_band_switch_tab[] = { - /* Bank, Register, Bw/Band, Value */ - { MT_RF(0, 16), RF_G_BAND, 0x20}, - { MT_RF(0, 16), RF_A_BAND, 0x20}, - - { MT_RF(0, 18), RF_G_BAND, 0x00}, - { MT_RF(0, 18), RF_A_BAND, 0x00}, - - { MT_RF(0, 39), RF_G_BAND, 0x36}, - { MT_RF(0, 39), RF_A_BAND_LB, 0x34}, - { MT_RF(0, 39), RF_A_BAND_MB, 0x33}, - { MT_RF(0, 39), RF_A_BAND_HB, 0x31}, - { MT_RF(0, 39), RF_A_BAND_11J, 0x36}, - - { MT_RF(6, 12), RF_A_BAND_LB, 0x44}, - { MT_RF(6, 12), RF_A_BAND_MB, 0x44}, - { MT_RF(6, 12), RF_A_BAND_HB, 0x55}, - { MT_RF(6, 12), RF_A_BAND_11J, 0x44}, - - { MT_RF(6, 17), RF_A_BAND_LB, 0x02}, - { MT_RF(6, 17), RF_A_BAND_MB, 0x00}, - { MT_RF(6, 17), RF_A_BAND_HB, 0x00}, - { MT_RF(6, 17), RF_A_BAND_11J, 0x05}, - - { MT_RF(6, 24), RF_A_BAND_LB, 0xA1}, - { MT_RF(6, 24), RF_A_BAND_MB, 0x41}, - { MT_RF(6, 24), RF_A_BAND_HB, 0x21}, - { MT_RF(6, 24), RF_A_BAND_11J, 0xE1}, - - { MT_RF(6, 39), RF_A_BAND_LB, 0x36}, - { MT_RF(6, 39), RF_A_BAND_MB, 0x34}, - { MT_RF(6, 39), RF_A_BAND_HB, 0x32}, - { MT_RF(6, 39), RF_A_BAND_11J, 0x37}, - - { MT_RF(6, 42), RF_A_BAND_LB, 0xFB}, - { MT_RF(6, 42), RF_A_BAND_MB, 0xF3}, - { MT_RF(6, 42), RF_A_BAND_HB, 0xEB}, - { MT_RF(6, 42), RF_A_BAND_11J, 0xEB}, - - /* Move R6-R45, R50~R59 to mt76x0_RF_INT_PA_5G_Channel_0_RegTb/mt76x0_RF_EXT_PA_5G_Channel_0_RegTb */ - - { MT_RF(6, 127), RF_G_BAND, 0x84}, - { MT_RF(6, 127), RF_A_BAND, 0x04}, - - { MT_RF(7, 5), RF_G_BAND, 0x40}, - { MT_RF(7, 5), RF_A_BAND, 0x00}, - - { MT_RF(7, 9), RF_G_BAND, 0x00}, - { MT_RF(7, 9), RF_A_BAND, 0x00}, - - { MT_RF(7, 70), RF_G_BAND, 0x00}, - { MT_RF(7, 70), RF_A_BAND, 0x6D}, - - { MT_RF(7, 71), RF_G_BAND, 0x00}, - { MT_RF(7, 71), RF_A_BAND, 0xB0}, - - { MT_RF(7, 78), RF_G_BAND, 0x00}, - { MT_RF(7, 78), RF_A_BAND, 0x55}, - - { MT_RF(7, 79), RF_G_BAND, 0x00}, - { MT_RF(7, 79), RF_A_BAND, 0x55}, + /* bank, reg bw/band value */ + { MT_RF(0, 16), RF_G_BAND, 0x20 }, + { MT_RF(0, 16), RF_A_BAND, 0x20 }, + { MT_RF(0, 18), RF_G_BAND, 0x00 }, + { MT_RF(0, 18), RF_A_BAND, 0x00 }, + { MT_RF(0, 39), RF_G_BAND, 0x36 }, + { MT_RF(0, 39), RF_A_BAND_LB, 0x34 }, + { MT_RF(0, 39), RF_A_BAND_MB, 0x33 }, + { MT_RF(0, 39), RF_A_BAND_HB, 0x31 }, + { MT_RF(0, 39), RF_A_BAND_11J, 0x36 }, + { MT_RF(6, 12), RF_A_BAND_LB, 0x44 }, + { MT_RF(6, 12), RF_A_BAND_MB, 0x44 }, + { MT_RF(6, 12), RF_A_BAND_HB, 0x55 }, + { MT_RF(6, 12), RF_A_BAND_11J, 0x44 }, + { MT_RF(6, 17), RF_A_BAND_LB, 0x02 }, + { MT_RF(6, 17), RF_A_BAND_MB, 0x00 }, + { MT_RF(6, 17), RF_A_BAND_HB, 0x00 }, + { MT_RF(6, 17), RF_A_BAND_11J, 0x05 }, + { MT_RF(6, 24), RF_A_BAND_LB, 0xA1 }, + { MT_RF(6, 24), RF_A_BAND_MB, 0x41 }, + { MT_RF(6, 24), RF_A_BAND_HB, 0x21 }, + { MT_RF(6, 24), RF_A_BAND_11J, 0xE1 }, + { MT_RF(6, 39), RF_A_BAND_LB, 0x36 }, + { MT_RF(6, 39), RF_A_BAND_MB, 0x34 }, + { MT_RF(6, 39), RF_A_BAND_HB, 0x32 }, + { MT_RF(6, 39), RF_A_BAND_11J, 0x37 }, + { MT_RF(6, 42), RF_A_BAND_LB, 0xFB }, + { MT_RF(6, 42), RF_A_BAND_MB, 0xF3 }, + { MT_RF(6, 42), RF_A_BAND_HB, 0xEB }, + { MT_RF(6, 42), RF_A_BAND_11J, 0xEB }, + { MT_RF(6, 127), RF_G_BAND, 0x84 }, + { MT_RF(6, 127), RF_A_BAND, 0x04 }, + { MT_RF(7, 5), RF_G_BAND, 0x40 }, + { MT_RF(7, 5), RF_A_BAND, 0x00 }, + { MT_RF(7, 9), RF_G_BAND, 0x00 }, + { MT_RF(7, 9), RF_A_BAND, 0x00 }, + { MT_RF(7, 70), RF_G_BAND, 0x00 }, + { MT_RF(7, 70), RF_A_BAND, 0x6D }, + { MT_RF(7, 71), RF_G_BAND, 0x00 }, + { MT_RF(7, 71), RF_A_BAND, 0xB0 }, + { MT_RF(7, 78), RF_G_BAND, 0x00 }, + { MT_RF(7, 78), RF_A_BAND, 0x55 }, + { MT_RF(7, 79), RF_G_BAND, 0x00 }, + { MT_RF(7, 79), RF_A_BAND, 0x55 }, }; static const struct mt76x0_freq_item mt76x0_frequency_plan[] = { - {1, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xE2, 0x40, 0x02, 0x40, 0x02, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3}, /* Freq 2412 */ - {2, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xE4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA1, 0, 0x30, 0, 0, 0x1}, /* Freq 2417 */ - {3, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xE2, 0x40, 0x07, 0x40, 0x0B, 0, 0, 1, 0x50, 0, 0x30, 0, 0, 0x0}, /* Freq 2422 */ - {4, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xD4, 0x40, 0x02, 0x40, 0x09, 0, 0, 1, 0x50, 0, 0x30, 0, 0, 0x0}, /* Freq 2427 */ - {5, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA2, 0, 0x30, 0, 0, 0x1}, /* Freq 2432 */ - {6, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x07, 0, 0, 1, 0xA2, 0, 0x30, 0, 0, 0x1}, /* Freq 2437 */ - {7, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xE2, 0x40, 0x02, 0x40, 0x07, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3}, /* Freq 2442 */ - {8, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA3, 0, 0x30, 0, 0, 0x1}, /* Freq 2447 */ - {9, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xF2, 0x40, 0x07, 0x40, 0x0D, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3}, /* Freq 2452 */ - {10, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xD4, 0x40, 0x02, 0x40, 0x09, 0, 0, 1, 0x51, 0, 0x30, 0, 0, 0x0}, /* Freq 2457 */ - {11, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA4, 0, 0x30, 0, 0, 0x1}, /* Freq 2462 */ - {12, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x07, 0, 0, 1, 0xA4, 0, 0x30, 0, 0, 0x1}, /* Freq 2467 */ - {13, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xF2, 0x40, 0x02, 0x40, 0x02, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 2472 */ - {14, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xF2, 0x40, 0x02, 0x40, 0x04, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 2484 */ - - {183, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3}, /* Freq 4915 */ - {184, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x00, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4920 */ - {185, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4925 */ - {187, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4935 */ - {188, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4940 */ - {189, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4945 */ - {192, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4960 */ - {196, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3}, /* Freq 4980 */ - - {36, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5180 */ - {37, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5185 */ - {38, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5190 */ - {39, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5195 */ - {40, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5200 */ - {41, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5205 */ - {42, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5210 */ - {43, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5215 */ - {44, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5220 */ - {45, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5225 */ - {46, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5230 */ - {47, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5235 */ - {48, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5240 */ - {49, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5245 */ - {50, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5250 */ - {51, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5255 */ - {52, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5260 */ - {53, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5265 */ - {54, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5270 */ - {55, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3}, /* Freq 5275 */ - {56, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5280 */ - {57, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5285 */ - {58, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5290 */ - {59, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5295 */ - {60, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5300 */ - {61, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5305 */ - {62, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5310 */ - {63, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5315 */ - {64, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3}, /* Freq 5320 */ - - {100, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3}, /* Freq 5500 */ - {101, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3}, /* Freq 5505 */ - {102, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3}, /* Freq 5510 */ - {103, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3}, /* Freq 5515 */ - {104, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5520 */ - {105, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5525 */ - {106, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5530 */ - {107, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5535 */ - {108, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5540 */ - {109, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5545 */ - {110, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5550 */ - {111, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5555 */ - {112, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5560 */ - {113, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5565 */ - {114, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5570 */ - {115, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5575 */ - {116, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5580 */ - {117, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5585 */ - {118, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5590 */ - {119, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5595 */ - {120, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5600 */ - {121, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5605 */ - {122, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5610 */ - {123, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5615 */ - {124, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5620 */ - {125, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5625 */ - {126, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5630 */ - {127, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3}, /* Freq 5635 */ - {128, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5640 */ - {129, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5645 */ - {130, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5650 */ - {131, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5655 */ - {132, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5660 */ - {133, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5665 */ - {134, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5670 */ - {135, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5675 */ - {136, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5680 */ - - {137, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5685 */ - {138, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5690 */ - {139, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5695 */ - {140, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5700 */ - {141, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5705 */ - {142, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5710 */ - {143, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5715 */ - {144, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5720 */ - {145, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5725 */ - {146, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5730 */ - {147, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5735 */ - {148, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5740 */ - {149, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5745 */ - {150, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5750 */ - {151, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3}, /* Freq 5755 */ - {152, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5760 */ - {153, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5765 */ - {154, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5770 */ - {155, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5775 */ - {156, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5780 */ - {157, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5785 */ - {158, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5790 */ - {159, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5795 */ - {160, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5800 */ - {161, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5805 */ - {162, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5810 */ - {163, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5815 */ - {164, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5820 */ - {165, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5825 */ - {166, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5830 */ - {167, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5835 */ - {168, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5840 */ - {169, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5845 */ - {170, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5850 */ - {171, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5855 */ - {172, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5860 */ - {173, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3}, /* Freq 5865 */ + { 1, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xE2, 0x40, 0x02, 0x40, 0x02, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3 }, /* Freq 2412 */ + { 2, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xE4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA1, 0, 0x30, 0, 0, 0x1 }, /* Freq 2417 */ + { 3, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xE2, 0x40, 0x07, 0x40, 0x0B, 0, 0, 1, 0x50, 0, 0x30, 0, 0, 0x0 }, /* Freq 2422 */ + { 4, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xD4, 0x40, 0x02, 0x40, 0x09, 0, 0, 1, 0x50, 0, 0x30, 0, 0, 0x0 }, /* Freq 2427 */ + { 5, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA2, 0, 0x30, 0, 0, 0x1 }, /* Freq 2432 */ + { 6, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x07, 0, 0, 1, 0xA2, 0, 0x30, 0, 0, 0x1 }, /* Freq 2437 */ + { 7, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xE2, 0x40, 0x02, 0x40, 0x07, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3 }, /* Freq 2442 */ + { 8, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA3, 0, 0x30, 0, 0, 0x1 }, /* Freq 2447 */ + { 9, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xF2, 0x40, 0x07, 0x40, 0x0D, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3 }, /* Freq 2452 */ + { 10, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xD4, 0x40, 0x02, 0x40, 0x09, 0, 0, 1, 0x51, 0, 0x30, 0, 0, 0x0 }, /* Freq 2457 */ + { 11, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x02, 0, 0, 1, 0xA4, 0, 0x30, 0, 0, 0x1 }, /* Freq 2462 */ + { 12, RF_G_BAND, 0x02, 0x3F, 0x3C, 0xDD, 0xD4, 0x40, 0x07, 0x40, 0x07, 0, 0, 1, 0xA4, 0, 0x30, 0, 0, 0x1 }, /* Freq 2467 */ + { 13, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xF2, 0x40, 0x02, 0x40, 0x02, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3 }, /* Freq 2472 */ + { 14, RF_G_BAND, 0x02, 0x3F, 0x28, 0xDD, 0xF2, 0x40, 0x02, 0x40, 0x04, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3 }, /* Freq 2484 */ + { 183, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x28, 0, 0x30, 0, 0, 0x3 }, /* Freq 4915 */ + { 184, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x00, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3 }, /* Freq 4920 */ + { 185, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3 }, /* Freq 4925 */ + { 187, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3 }, /* Freq 4935 */ + { 188, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3 }, /* Freq 4940 */ + { 189, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3 }, /* Freq 4945 */ + { 192, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3 }, /* Freq 4960 */ + { 196, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x29, 0, 0x30, 0, 0, 0x3 }, /* Freq 4980 */ + { 36, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5180 */ + { 37, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5185 */ + { 38, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5190 */ + { 39, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5195 */ + { 40, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5200 */ + { 41, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5205 */ + { 42, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5210 */ + { 43, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5215 */ + { 44, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5220 */ + { 45, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5225 */ + { 46, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5230 */ + { 47, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5235 */ + { 48, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5240 */ + { 49, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5245 */ + { 50, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5250 */ + { 51, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5255 */ + { 52, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5260 */ + { 53, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5265 */ + { 54, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5270 */ + { 55, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2B, 0, 0x30, 0, 0, 0x3 }, /* Freq 5275 */ + { 56, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3 }, /* Freq 5280 */ + { 57, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3 }, /* Freq 5285 */ + { 58, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3 }, /* Freq 5290 */ + { 59, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3 }, /* Freq 5295 */ + { 60, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3 }, /* Freq 5300 */ + { 61, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3 }, /* Freq 5305 */ + { 62, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3 }, /* Freq 5310 */ + { 63, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3 }, /* Freq 5315 */ + { 64, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2C, 0, 0x30, 0, 0, 0x3 }, /* Freq 5320 */ + { 100, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3 }, /* Freq 5500 */ + { 101, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3 }, /* Freq 5505 */ + { 102, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3 }, /* Freq 5510 */ + { 103, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2D, 0, 0x30, 0, 0, 0x3 }, /* Freq 5515 */ + { 104, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5520 */ + { 105, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5525 */ + { 106, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5530 */ + { 107, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5535 */ + { 108, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5540 */ + { 109, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5545 */ + { 110, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5550 */ + { 111, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5555 */ + { 112, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5560 */ + { 113, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5565 */ + { 114, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5570 */ + { 115, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5575 */ + { 116, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5580 */ + { 117, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5585 */ + { 118, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5590 */ + { 119, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5595 */ + { 120, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5600 */ + { 121, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5605 */ + { 122, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5610 */ + { 123, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5615 */ + { 124, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5620 */ + { 125, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5625 */ + { 126, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5630 */ + { 127, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2E, 0, 0x30, 0, 0, 0x3 }, /* Freq 5635 */ + { 128, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5640 */ + { 129, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5645 */ + { 130, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5650 */ + { 131, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5655 */ + { 132, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5660 */ + { 133, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5665 */ + { 134, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5670 */ + { 135, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5675 */ + { 136, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5680 */ + { 137, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5685 */ + { 138, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5690 */ + { 139, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5695 */ + { 140, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5700 */ + { 141, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5705 */ + { 142, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5710 */ + { 143, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5715 */ + { 144, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5720 */ + { 145, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5725 */ + { 146, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5730 */ + { 147, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5735 */ + { 148, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5740 */ + { 149, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5745 */ + { 150, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x0B, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5750 */ + { 151, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x70, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x17, 0, 0, 1, 0x2F, 0, 0x30, 0, 0, 0x3 }, /* Freq 5755 */ + { 152, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x00, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5760 */ + { 153, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x01, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5765 */ + { 154, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x01, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5770 */ + { 155, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x03, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5775 */ + { 156, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x02, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5780 */ + { 157, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x05, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5785 */ + { 158, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x03, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5790 */ + { 159, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x07, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5795 */ + { 160, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x04, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5800 */ + { 161, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x09, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5805 */ + { 162, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x05, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5810 */ + { 163, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0B, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5815 */ + { 164, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x06, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5820 */ + { 165, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0D, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5825 */ + { 166, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0xDD, 0xD2, 0x40, 0x04, 0x40, 0x07, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5830 */ + { 167, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x0F, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5835 */ + { 168, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x08, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5840 */ + { 169, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x11, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5845 */ + { 170, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x09, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5850 */ + { 171, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x13, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5855 */ + { 172, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x30, 0x97, 0xD2, 0x40, 0x04, 0x40, 0x0A, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5860 */ + { 173, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x68, 0xDD, 0xD2, 0x40, 0x10, 0x40, 0x15, 0, 0, 1, 0x30, 0, 0x30, 0, 0, 0x3 }, /* Freq 5865 */ }; static const struct mt76x0_freq_item mt76x0_sdm_frequency_plan[] = { - {1, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0xCCCC, 0x3}, /* Freq 2412 */ - {2, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x12222, 0x3}, /* Freq 2417 */ - {3, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x17777, 0x3}, /* Freq 2422 */ - {4, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x1CCCC, 0x3}, /* Freq 2427 */ - {5, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x22222, 0x3}, /* Freq 2432 */ - {6, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x27777, 0x3}, /* Freq 2437 */ - {7, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x2CCCC, 0x3}, /* Freq 2442 */ - {8, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x32222, 0x3}, /* Freq 2447 */ - {9, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x37777, 0x3}, /* Freq 2452 */ - {10, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x3CCCC, 0x3}, /* Freq 2457 */ - {11, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x2222, 0x3}, /* Freq 2462 */ - {12, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x7777, 0x3}, /* Freq 2467 */ - {13, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0xCCCC, 0x3}, /* Freq 2472 */ - {14, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x19999, 0x3}, /* Freq 2484 */ - - {183, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x28, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 4915 */ - {184, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x0, 0x3}, /* Freq 4920 */ - {185, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x2AAA, 0x3}, /* Freq 4925 */ - {187, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x8000, 0x3}, /* Freq 4935 */ - {188, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0xAAAA, 0x3}, /* Freq 4940 */ - {189, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0xD555, 0x3}, /* Freq 4945 */ - {192, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 4960 */ - {196, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x29, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 4980 */ - - {36, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0xAAAA, 0x3}, /* Freq 5180 */ - {37, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0xD555, 0x3}, /* Freq 5185 */ - {38, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5190 */ - {39, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5195 */ - {40, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5200 */ - {41, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x18000, 0x3}, /* Freq 5205 */ - {42, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x1AAAA, 0x3}, /* Freq 5210 */ - {43, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x1D555, 0x3}, /* Freq 5215 */ - {44, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 5220 */ - {45, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x22AAA, 0x3}, /* Freq 5225 */ - {46, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x25555, 0x3}, /* Freq 5230 */ - {47, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x28000, 0x3}, /* Freq 5235 */ - {48, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x2AAAA, 0x3}, /* Freq 5240 */ - {49, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x2D555, 0x3}, /* Freq 5245 */ - {50, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x30000, 0x3}, /* Freq 5250 */ - {51, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x32AAA, 0x3}, /* Freq 5255 */ - {52, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5260 */ - {53, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5265 */ - {54, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x3AAAA, 0x3}, /* Freq 5270 */ - {55, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2B, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 5275 */ - {56, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x00000, 0x3}, /* Freq 5280 */ - {57, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x02AAA, 0x3}, /* Freq 5285 */ - {58, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x05555, 0x3}, /* Freq 5290 */ - {59, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x08000, 0x3}, /* Freq 5295 */ - {60, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x0AAAA, 0x3}, /* Freq 5300 */ - {61, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x0D555, 0x3}, /* Freq 5305 */ - {62, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5310 */ - {63, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5315 */ - {64, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2C, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5320 */ - - {100, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2D, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5500 */ - {101, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2D, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5505 */ - {102, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2D, 0, 0x0, 0x8, 0x3AAAA, 0x3}, /* Freq 5510 */ - {103, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2D, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 5515 */ - {104, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x00000, 0x3}, /* Freq 5520 */ - {105, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x02AAA, 0x3}, /* Freq 5525 */ - {106, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x05555, 0x3}, /* Freq 5530 */ - {107, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x08000, 0x3}, /* Freq 5535 */ - {108, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x0AAAA, 0x3}, /* Freq 5540 */ - {109, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x0D555, 0x3}, /* Freq 5545 */ - {110, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5550 */ - {111, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5555 */ - {112, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5560 */ - {113, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x18000, 0x3}, /* Freq 5565 */ - {114, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x1AAAA, 0x3}, /* Freq 5570 */ - {115, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x1D555, 0x3}, /* Freq 5575 */ - {116, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 5580 */ - {117, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x22AAA, 0x3}, /* Freq 5585 */ - {118, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x25555, 0x3}, /* Freq 5590 */ - {119, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x28000, 0x3}, /* Freq 5595 */ - {120, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x2AAAA, 0x3}, /* Freq 5600 */ - {121, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x2D555, 0x3}, /* Freq 5605 */ - {122, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x30000, 0x3}, /* Freq 5610 */ - {123, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x32AAA, 0x3}, /* Freq 5615 */ - {124, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5620 */ - {125, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5625 */ - {126, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x3AAAA, 0x3}, /* Freq 5630 */ - {127, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2E, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 5635 */ - {128, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x00000, 0x3}, /* Freq 5640 */ - {129, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x02AAA, 0x3}, /* Freq 5645 */ - {130, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x05555, 0x3}, /* Freq 5650 */ - {131, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x08000, 0x3}, /* Freq 5655 */ - {132, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x0AAAA, 0x3}, /* Freq 5660 */ - {133, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x0D555, 0x3}, /* Freq 5665 */ - {134, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5670 */ - {135, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5675 */ - {136, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5680 */ - - {137, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x18000, 0x3}, /* Freq 5685 */ - {138, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x1AAAA, 0x3}, /* Freq 5690 */ - {139, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x1D555, 0x3}, /* Freq 5695 */ - {140, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 5700 */ - {141, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x22AAA, 0x3}, /* Freq 5705 */ - {142, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x25555, 0x3}, /* Freq 5710 */ - {143, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x28000, 0x3}, /* Freq 5715 */ - {144, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x2AAAA, 0x3}, /* Freq 5720 */ - {145, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x2D555, 0x3}, /* Freq 5725 */ - {146, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x30000, 0x3}, /* Freq 5730 */ - {147, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x32AAA, 0x3}, /* Freq 5735 */ - {148, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5740 */ - {149, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5745 */ - {150, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x3AAAA, 0x3}, /* Freq 5750 */ - {151, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x2F, 0, 0x0, 0x8, 0x3D555, 0x3}, /* Freq 5755 */ - {152, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x00000, 0x3}, /* Freq 5760 */ - {153, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x02AAA, 0x3}, /* Freq 5765 */ - {154, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x05555, 0x3}, /* Freq 5770 */ - {155, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x08000, 0x3}, /* Freq 5775 */ - {156, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x0AAAA, 0x3}, /* Freq 5780 */ - {157, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x0D555, 0x3}, /* Freq 5785 */ - {158, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x10000, 0x3}, /* Freq 5790 */ - {159, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x12AAA, 0x3}, /* Freq 5795 */ - {160, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x15555, 0x3}, /* Freq 5800 */ - {161, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x18000, 0x3}, /* Freq 5805 */ - {162, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x1AAAA, 0x3}, /* Freq 5810 */ - {163, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x1D555, 0x3}, /* Freq 5815 */ - {164, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x20000, 0x3}, /* Freq 5820 */ - {165, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x22AAA, 0x3}, /* Freq 5825 */ - {166, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x25555, 0x3}, /* Freq 5830 */ - {167, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x28000, 0x3}, /* Freq 5835 */ - {168, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x2AAAA, 0x3}, /* Freq 5840 */ - {169, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x2D555, 0x3}, /* Freq 5845 */ - {170, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x30000, 0x3}, /* Freq 5850 */ - {171, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x32AAA, 0x3}, /* Freq 5855 */ - {172, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x35555, 0x3}, /* Freq 5860 */ - {173, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0/*0 -> 1*/, 0, 0, 0x30, 0, 0x0, 0x8, 0x38000, 0x3}, /* Freq 5865 */ + { 1, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x0CCCC, 0x3 }, /* Freq 2412 */ + { 2, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x12222, 0x3 }, /* Freq 2417 */ + { 3, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x17777, 0x3 }, /* Freq 2422 */ + { 4, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x1CCCC, 0x3 }, /* Freq 2427 */ + { 5, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x22222, 0x3 }, /* Freq 2432 */ + { 6, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x27777, 0x3 }, /* Freq 2437 */ + { 7, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x2CCCC, 0x3 }, /* Freq 2442 */ + { 8, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x32222, 0x3 }, /* Freq 2447 */ + { 9, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x37777, 0x3 }, /* Freq 2452 */ + { 10, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x3CCCC, 0x3 }, /* Freq 2457 */ + { 11, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x02222, 0x3 }, /* Freq 2462 */ + { 12, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x07777, 0x3 }, /* Freq 2467 */ + { 13, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x0CCCC, 0x3 }, /* Freq 2472 */ + { 14, RF_G_BAND, 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x19999, 0x3 }, /* Freq 2484 */ + { 183, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x28, 0, 0x0, 0x8, 0x3D555, 0x3 }, /* Freq 4915 */ + { 184, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x00000, 0x3 }, /* Freq 4920 */ + { 185, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x02AAA, 0x3 }, /* Freq 4925 */ + { 187, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x08000, 0x3 }, /* Freq 4935 */ + { 188, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x0AAAA, 0x3 }, /* Freq 4940 */ + { 189, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x0D555, 0x3 }, /* Freq 4945 */ + { 192, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x15555, 0x3 }, /* Freq 4960 */ + { 196, (RF_A_BAND | RF_A_BAND_11J), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x29, 0, 0x0, 0x8, 0x20000, 0x3 }, /* Freq 4980 */ + { 36, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x0AAAA, 0x3 }, /* Freq 5180 */ + { 37, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x0D555, 0x3 }, /* Freq 5185 */ + { 38, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x10000, 0x3 }, /* Freq 5190 */ + { 39, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x12AAA, 0x3 }, /* Freq 5195 */ + { 40, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x15555, 0x3 }, /* Freq 5200 */ + { 41, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x18000, 0x3 }, /* Freq 5205 */ + { 42, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x1AAAA, 0x3 }, /* Freq 5210 */ + { 43, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x1D555, 0x3 }, /* Freq 5215 */ + { 44, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x20000, 0x3 }, /* Freq 5220 */ + { 45, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x22AAA, 0x3 }, /* Freq 5225 */ + { 46, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x25555, 0x3 }, /* Freq 5230 */ + { 47, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x28000, 0x3 }, /* Freq 5235 */ + { 48, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x2AAAA, 0x3 }, /* Freq 5240 */ + { 49, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x2D555, 0x3 }, /* Freq 5245 */ + { 50, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x30000, 0x3 }, /* Freq 5250 */ + { 51, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x32AAA, 0x3 }, /* Freq 5255 */ + { 52, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x35555, 0x3 }, /* Freq 5260 */ + { 53, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x38000, 0x3 }, /* Freq 5265 */ + { 54, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x3AAAA, 0x3 }, /* Freq 5270 */ + { 55, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2B, 0, 0x0, 0x8, 0x3D555, 0x3 }, /* Freq 5275 */ + { 56, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2C, 0, 0x0, 0x8, 0x00000, 0x3 }, /* Freq 5280 */ + { 57, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2C, 0, 0x0, 0x8, 0x02AAA, 0x3 }, /* Freq 5285 */ + { 58, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2C, 0, 0x0, 0x8, 0x05555, 0x3 }, /* Freq 5290 */ + { 59, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2C, 0, 0x0, 0x8, 0x08000, 0x3 }, /* Freq 5295 */ + { 60, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2C, 0, 0x0, 0x8, 0x0AAAA, 0x3 }, /* Freq 5300 */ + { 61, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2C, 0, 0x0, 0x8, 0x0D555, 0x3 }, /* Freq 5305 */ + { 62, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2C, 0, 0x0, 0x8, 0x10000, 0x3 }, /* Freq 5310 */ + { 63, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2C, 0, 0x0, 0x8, 0x12AAA, 0x3 }, /* Freq 5315 */ + { 64, (RF_A_BAND | RF_A_BAND_LB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2C, 0, 0x0, 0x8, 0x15555, 0x3 }, /* Freq 5320 */ + { 100, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2D, 0, 0x0, 0x8, 0x35555, 0x3 }, /* Freq 5500 */ + { 101, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2D, 0, 0x0, 0x8, 0x38000, 0x3 }, /* Freq 5505 */ + { 102, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2D, 0, 0x0, 0x8, 0x3AAAA, 0x3 }, /* Freq 5510 */ + { 103, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2D, 0, 0x0, 0x8, 0x3D555, 0x3 }, /* Freq 5515 */ + { 104, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x00000, 0x3 }, /* Freq 5520 */ + { 105, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x02AAA, 0x3 }, /* Freq 5525 */ + { 106, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x05555, 0x3 }, /* Freq 5530 */ + { 107, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x08000, 0x3 }, /* Freq 5535 */ + { 108, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x0AAAA, 0x3 }, /* Freq 5540 */ + { 109, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x0D555, 0x3 }, /* Freq 5545 */ + { 110, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x10000, 0x3 }, /* Freq 5550 */ + { 111, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x12AAA, 0x3 }, /* Freq 5555 */ + { 112, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x15555, 0x3 }, /* Freq 5560 */ + { 113, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x18000, 0x3 }, /* Freq 5565 */ + { 114, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x1AAAA, 0x3 }, /* Freq 5570 */ + { 115, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x1D555, 0x3 }, /* Freq 5575 */ + { 116, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x20000, 0x3 }, /* Freq 5580 */ + { 117, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x22AAA, 0x3 }, /* Freq 5585 */ + { 118, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x25555, 0x3 }, /* Freq 5590 */ + { 119, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x28000, 0x3 }, /* Freq 5595 */ + { 120, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x2AAAA, 0x3 }, /* Freq 5600 */ + { 121, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x2D555, 0x3 }, /* Freq 5605 */ + { 122, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x30000, 0x3 }, /* Freq 5610 */ + { 123, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x32AAA, 0x3 }, /* Freq 5615 */ + { 124, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x35555, 0x3 }, /* Freq 5620 */ + { 125, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x38000, 0x3 }, /* Freq 5625 */ + { 126, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x3AAAA, 0x3 }, /* Freq 5630 */ + { 127, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2E, 0, 0x0, 0x8, 0x3D555, 0x3 }, /* Freq 5635 */ + { 128, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x00000, 0x3 }, /* Freq 5640 */ + { 129, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x02AAA, 0x3 }, /* Freq 5645 */ + { 130, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x05555, 0x3 }, /* Freq 5650 */ + { 131, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x08000, 0x3 }, /* Freq 5655 */ + { 132, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x0AAAA, 0x3 }, /* Freq 5660 */ + { 133, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x0D555, 0x3 }, /* Freq 5665 */ + { 134, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x10000, 0x3 }, /* Freq 5670 */ + { 135, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x12AAA, 0x3 }, /* Freq 5675 */ + { 136, (RF_A_BAND | RF_A_BAND_MB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x15555, 0x3 }, /* Freq 5680 */ + { 137, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x18000, 0x3 }, /* Freq 5685 */ + { 138, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x1AAAA, 0x3 }, /* Freq 5690 */ + { 139, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x1D555, 0x3 }, /* Freq 5695 */ + { 140, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x20000, 0x3 }, /* Freq 5700 */ + { 141, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x22AAA, 0x3 }, /* Freq 5705 */ + { 142, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x25555, 0x3 }, /* Freq 5710 */ + { 143, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x28000, 0x3 }, /* Freq 5715 */ + { 144, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x2AAAA, 0x3 }, /* Freq 5720 */ + { 145, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x2D555, 0x3 }, /* Freq 5725 */ + { 146, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x30000, 0x3 }, /* Freq 5730 */ + { 147, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x32AAA, 0x3 }, /* Freq 5735 */ + { 148, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x35555, 0x3 }, /* Freq 5740 */ + { 149, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x38000, 0x3 }, /* Freq 5745 */ + { 150, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x3AAAA, 0x3 }, /* Freq 5750 */ + { 151, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x2F, 0, 0x0, 0x8, 0x3D555, 0x3 }, /* Freq 5755 */ + { 152, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x00000, 0x3 }, /* Freq 5760 */ + { 153, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x02AAA, 0x3 }, /* Freq 5765 */ + { 154, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x05555, 0x3 }, /* Freq 5770 */ + { 155, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x08000, 0x3 }, /* Freq 5775 */ + { 156, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x0AAAA, 0x3 }, /* Freq 5780 */ + { 157, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x0D555, 0x3 }, /* Freq 5785 */ + { 158, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x10000, 0x3 }, /* Freq 5790 */ + { 159, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x12AAA, 0x3 }, /* Freq 5795 */ + { 160, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x15555, 0x3 }, /* Freq 5800 */ + { 161, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x18000, 0x3 }, /* Freq 5805 */ + { 162, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x1AAAA, 0x3 }, /* Freq 5810 */ + { 163, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x1D555, 0x3 }, /* Freq 5815 */ + { 164, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x20000, 0x3 }, /* Freq 5820 */ + { 165, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x22AAA, 0x3 }, /* Freq 5825 */ + { 166, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x25555, 0x3 }, /* Freq 5830 */ + { 167, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x28000, 0x3 }, /* Freq 5835 */ + { 168, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x2AAAA, 0x3 }, /* Freq 5840 */ + { 169, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x2D555, 0x3 }, /* Freq 5845 */ + { 170, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x30000, 0x3 }, /* Freq 5850 */ + { 171, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x32AAA, 0x3 }, /* Freq 5855 */ + { 172, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x35555, 0x3 }, /* Freq 5860 */ + { 173, (RF_A_BAND | RF_A_BAND_HB), 0x02, 0x3F, 0x7F, 0xDD, 0xC3, 0x40, 0x0, 0x80, 0x0, 0, 0, 0, 0x30, 0, 0x0, 0x8, 0x38000, 0x3 }, /* Freq 5865 */ }; static const u8 mt76x0_sdm_channel[] = { - 183, 185, 43, 45, 54, 55, 57, 58, 102, 103, 105, 106, 115, 117, 126, 127, 129, 130, 139, 141, 150, 151, 153, 154, 163, 165 + 183, 185, 43, 45, + 54, 55, 57, 58, + 102, 103, 105, 106, + 115, 117, 126, 127, + 129, 130, 139, 141, + 150, 151, 153, 154, + 163, 165 }; static const struct mt76x0_rf_switch_item mt76x0_rf_ext_pa_tab[] = { - { MT_RF(6, 45), RF_A_BAND_LB, 0x63}, - { MT_RF(6, 45), RF_A_BAND_MB, 0x43}, - { MT_RF(6, 45), RF_A_BAND_HB, 0x33}, - { MT_RF(6, 45), RF_A_BAND_11J, 0x73}, - - { MT_RF(6, 50), RF_A_BAND_LB, 0x02}, - { MT_RF(6, 50), RF_A_BAND_MB, 0x02}, - { MT_RF(6, 50), RF_A_BAND_HB, 0x02}, - { MT_RF(6, 50), RF_A_BAND_11J, 0x02}, - - { MT_RF(6, 51), RF_A_BAND_LB, 0x02}, - { MT_RF(6, 51), RF_A_BAND_MB, 0x02}, - { MT_RF(6, 51), RF_A_BAND_HB, 0x02}, - { MT_RF(6, 51), RF_A_BAND_11J, 0x02}, - - { MT_RF(6, 52), RF_A_BAND_LB, 0x08}, - { MT_RF(6, 52), RF_A_BAND_MB, 0x08}, - { MT_RF(6, 52), RF_A_BAND_HB, 0x08}, - { MT_RF(6, 52), RF_A_BAND_11J, 0x08}, - - { MT_RF(6, 53), RF_A_BAND_LB, 0x08}, - { MT_RF(6, 53), RF_A_BAND_MB, 0x08}, - { MT_RF(6, 53), RF_A_BAND_HB, 0x08}, - { MT_RF(6, 53), RF_A_BAND_11J, 0x08}, - - { MT_RF(6, 54), RF_A_BAND_LB, 0x0A}, - { MT_RF(6, 54), RF_A_BAND_MB, 0x0A}, - { MT_RF(6, 54), RF_A_BAND_HB, 0x0A}, - { MT_RF(6, 54), RF_A_BAND_11J, 0x0A}, - - { MT_RF(6, 55), RF_A_BAND_LB, 0x0A}, - { MT_RF(6, 55), RF_A_BAND_MB, 0x0A}, - { MT_RF(6, 55), RF_A_BAND_HB, 0x0A}, - { MT_RF(6, 55), RF_A_BAND_11J, 0x0A}, - - { MT_RF(6, 56), RF_A_BAND_LB, 0x05}, - { MT_RF(6, 56), RF_A_BAND_MB, 0x05}, - { MT_RF(6, 56), RF_A_BAND_HB, 0x05}, - { MT_RF(6, 56), RF_A_BAND_11J, 0x05}, - - { MT_RF(6, 57), RF_A_BAND_LB, 0x05}, - { MT_RF(6, 57), RF_A_BAND_MB, 0x05}, - { MT_RF(6, 57), RF_A_BAND_HB, 0x05}, - { MT_RF(6, 57), RF_A_BAND_11J, 0x05}, - - { MT_RF(6, 58), RF_A_BAND_LB, 0x05}, - { MT_RF(6, 58), RF_A_BAND_MB, 0x03}, - { MT_RF(6, 58), RF_A_BAND_HB, 0x02}, - { MT_RF(6, 58), RF_A_BAND_11J, 0x07}, - - { MT_RF(6, 59), RF_A_BAND_LB, 0x05}, - { MT_RF(6, 59), RF_A_BAND_MB, 0x03}, - { MT_RF(6, 59), RF_A_BAND_HB, 0x02}, - { MT_RF(6, 59), RF_A_BAND_11J, 0x07}, + { MT_RF(6, 45), RF_A_BAND_LB, 0x63 }, + { MT_RF(6, 45), RF_A_BAND_MB, 0x43 }, + { MT_RF(6, 45), RF_A_BAND_HB, 0x33 }, + { MT_RF(6, 45), RF_A_BAND_11J, 0x73 }, + { MT_RF(6, 50), RF_A_BAND_LB, 0x02 }, + { MT_RF(6, 50), RF_A_BAND_MB, 0x02 }, + { MT_RF(6, 50), RF_A_BAND_HB, 0x02 }, + { MT_RF(6, 50), RF_A_BAND_11J, 0x02 }, + { MT_RF(6, 51), RF_A_BAND_LB, 0x02 }, + { MT_RF(6, 51), RF_A_BAND_MB, 0x02 }, + { MT_RF(6, 51), RF_A_BAND_HB, 0x02 }, + { MT_RF(6, 51), RF_A_BAND_11J, 0x02 }, + { MT_RF(6, 52), RF_A_BAND_LB, 0x08 }, + { MT_RF(6, 52), RF_A_BAND_MB, 0x08 }, + { MT_RF(6, 52), RF_A_BAND_HB, 0x08 }, + { MT_RF(6, 52), RF_A_BAND_11J, 0x08 }, + { MT_RF(6, 53), RF_A_BAND_LB, 0x08 }, + { MT_RF(6, 53), RF_A_BAND_MB, 0x08 }, + { MT_RF(6, 53), RF_A_BAND_HB, 0x08 }, + { MT_RF(6, 53), RF_A_BAND_11J, 0x08 }, + { MT_RF(6, 54), RF_A_BAND_LB, 0x0A }, + { MT_RF(6, 54), RF_A_BAND_MB, 0x0A }, + { MT_RF(6, 54), RF_A_BAND_HB, 0x0A }, + { MT_RF(6, 54), RF_A_BAND_11J, 0x0A }, + { MT_RF(6, 55), RF_A_BAND_LB, 0x0A }, + { MT_RF(6, 55), RF_A_BAND_MB, 0x0A }, + { MT_RF(6, 55), RF_A_BAND_HB, 0x0A }, + { MT_RF(6, 55), RF_A_BAND_11J, 0x0A }, + { MT_RF(6, 56), RF_A_BAND_LB, 0x05 }, + { MT_RF(6, 56), RF_A_BAND_MB, 0x05 }, + { MT_RF(6, 56), RF_A_BAND_HB, 0x05 }, + { MT_RF(6, 56), RF_A_BAND_11J, 0x05 }, + { MT_RF(6, 57), RF_A_BAND_LB, 0x05 }, + { MT_RF(6, 57), RF_A_BAND_MB, 0x05 }, + { MT_RF(6, 57), RF_A_BAND_HB, 0x05 }, + { MT_RF(6, 57), RF_A_BAND_11J, 0x05 }, + { MT_RF(6, 58), RF_A_BAND_LB, 0x05 }, + { MT_RF(6, 58), RF_A_BAND_MB, 0x03 }, + { MT_RF(6, 58), RF_A_BAND_HB, 0x02 }, + { MT_RF(6, 58), RF_A_BAND_11J, 0x07 }, + { MT_RF(6, 59), RF_A_BAND_LB, 0x05 }, + { MT_RF(6, 59), RF_A_BAND_MB, 0x03 }, + { MT_RF(6, 59), RF_A_BAND_HB, 0x02 }, + { MT_RF(6, 59), RF_A_BAND_11J, 0x07 }, }; #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c b/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c deleted file mode 100644 index 7a422c590211..000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/mac.c +++ /dev/null @@ -1,197 +0,0 @@ -/* - * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org> - * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl> - * Copyright (C) 2018 Stanislaw Gruszka <stf_xl@wp.pl> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include <linux/etherdevice.h> - -#include "mt76x0.h" -#include "trace.h" - -void mt76x0_mac_set_protection(struct mt76x02_dev *dev, bool legacy_prot, - int ht_mode) -{ - int mode = ht_mode & IEEE80211_HT_OP_MODE_PROTECTION; - bool non_gf = !!(ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); - u32 prot[6]; - bool ht_rts[4] = {}; - int i; - - prot[0] = MT_PROT_NAV_SHORT | - MT_PROT_TXOP_ALLOW_ALL | - MT_PROT_RTS_THR_EN; - prot[1] = prot[0]; - if (legacy_prot) - prot[1] |= MT_PROT_CTRL_CTS2SELF; - - prot[2] = prot[4] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_BW20; - prot[3] = prot[5] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_ALL; - - if (legacy_prot) { - prot[2] |= MT_PROT_RATE_CCK_11; - prot[3] |= MT_PROT_RATE_CCK_11; - prot[4] |= MT_PROT_RATE_CCK_11; - prot[5] |= MT_PROT_RATE_CCK_11; - } else { - prot[2] |= MT_PROT_RATE_OFDM_24; - prot[3] |= MT_PROT_RATE_DUP_OFDM_24; - prot[4] |= MT_PROT_RATE_OFDM_24; - prot[5] |= MT_PROT_RATE_DUP_OFDM_24; - } - - switch (mode) { - case IEEE80211_HT_OP_MODE_PROTECTION_NONE: - break; - - case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER: - ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true; - break; - - case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ: - ht_rts[1] = ht_rts[3] = true; - break; - - case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED: - ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true; - break; - } - - if (non_gf) - ht_rts[2] = ht_rts[3] = true; - - for (i = 0; i < 4; i++) - if (ht_rts[i]) - prot[i + 2] |= MT_PROT_CTRL_RTS_CTS; - - for (i = 0; i < 6; i++) - mt76_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]); -} - -void mt76x0_mac_set_short_preamble(struct mt76x02_dev *dev, bool short_preamb) -{ - if (short_preamb) - mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT); - else - mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT); -} - -void mt76x0_mac_config_tsf(struct mt76x02_dev *dev, bool enable, int interval) -{ - u32 val = mt76_rr(dev, MT_BEACON_TIME_CFG); - - val &= ~(MT_BEACON_TIME_CFG_TIMER_EN | - MT_BEACON_TIME_CFG_SYNC_MODE | - MT_BEACON_TIME_CFG_TBTT_EN); - - if (!enable) { - mt76_wr(dev, MT_BEACON_TIME_CFG, val); - return; - } - - val &= ~MT_BEACON_TIME_CFG_INTVAL; - val |= FIELD_PREP(MT_BEACON_TIME_CFG_INTVAL, interval << 4) | - MT_BEACON_TIME_CFG_TIMER_EN | - MT_BEACON_TIME_CFG_SYNC_MODE | - MT_BEACON_TIME_CFG_TBTT_EN; -} - -static void mt76x0_check_mac_err(struct mt76x02_dev *dev) -{ - u32 val = mt76_rr(dev, 0x10f4); - - if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5)))) - return; - - dev_err(dev->mt76.dev, "Error: MAC specific condition occurred\n"); - - mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR); - udelay(10); - mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR); -} -void mt76x0_mac_work(struct work_struct *work) -{ - struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev, - mac_work.work); - struct { - u32 addr_base; - u32 span; - u64 *stat_base; - } spans[] = { - { MT_RX_STAT_0, 3, dev->stats.rx_stat }, - { MT_TX_STA_0, 3, dev->stats.tx_stat }, - { MT_TX_AGG_STAT, 1, dev->stats.aggr_stat }, - { MT_MPDU_DENSITY_CNT, 1, dev->stats.zero_len_del }, - { MT_TX_AGG_CNT_BASE0, 8, &dev->stats.aggr_n[0] }, - { MT_TX_AGG_CNT_BASE1, 8, &dev->stats.aggr_n[16] }, - }; - u32 sum, n; - int i, j, k; - - /* Note: using MCU_RANDOM_READ is actually slower then reading all the - * registers by hand. MCU takes ca. 20ms to complete read of 24 - * registers while reading them one by one will takes roughly - * 24*200us =~ 5ms. - */ - - k = 0; - n = 0; - sum = 0; - for (i = 0; i < ARRAY_SIZE(spans); i++) - for (j = 0; j < spans[i].span; j++) { - u32 val = mt76_rr(dev, spans[i].addr_base + j * 4); - - spans[i].stat_base[j * 2] += val & 0xffff; - spans[i].stat_base[j * 2 + 1] += val >> 16; - - /* Calculate average AMPDU length */ - if (spans[i].addr_base != MT_TX_AGG_CNT_BASE0 && - spans[i].addr_base != MT_TX_AGG_CNT_BASE1) - continue; - - n += (val >> 16) + (val & 0xffff); - sum += (val & 0xffff) * (1 + k * 2) + - (val >> 16) * (2 + k * 2); - k++; - } - - atomic_set(&dev->avg_ampdu_len, n ? DIV_ROUND_CLOSEST(sum, n) : 1); - - mt76x0_check_mac_err(dev); - - ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work, 10 * HZ); -} - -void mt76x0_mac_set_ampdu_factor(struct mt76x02_dev *dev) -{ - struct ieee80211_sta *sta; - struct mt76_wcid *wcid; - void *msta; - u8 min_factor = 3; - int i; - - rcu_read_lock(); - for (i = 0; i < ARRAY_SIZE(dev->mt76.wcid); i++) { - wcid = rcu_dereference(dev->mt76.wcid[i]); - if (!wcid) - continue; - - msta = container_of(wcid, struct mt76x02_sta, wcid); - sta = container_of(msta, struct ieee80211_sta, drv_priv); - - min_factor = min(min_factor, sta->ht_cap.ampdu_factor); - } - rcu_read_unlock(); - - mt76_wr(dev, MT_MAX_LEN_CFG, 0xa0fff | - FIELD_PREP(MT_MAX_LEN_CFG_AMPDU, min_factor)); -} diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c index 9273d2d2764a..a803a9b6a4c5 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/main.c @@ -22,9 +22,23 @@ mt76x0_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef) int ret; cancel_delayed_work_sync(&dev->cal_work); + if (mt76_is_mmio(dev)) { + tasklet_disable(&dev->pre_tbtt_tasklet); + tasklet_disable(&dev->dfs_pd.dfs_tasklet); + } mt76_set_channel(&dev->mt76); ret = mt76x0_phy_set_channel(dev, chandef); + + /* channel cycle counters read-and-clear */ + mt76_rr(dev, MT_CH_IDLE); + mt76_rr(dev, MT_CH_BUSY); + + if (mt76_is_mmio(dev)) { + mt76x02_dfs_init_params(dev); + tasklet_enable(&dev->pre_tbtt_tasklet); + tasklet_enable(&dev->dfs_pd.dfs_tasklet); + } mt76_txq_schedule_all(&dev->mt76); return ret; @@ -64,89 +78,3 @@ int mt76x0_config(struct ieee80211_hw *hw, u32 changed) return ret; } EXPORT_SYMBOL_GPL(mt76x0_config); - -static void -mt76x0_addr_wr(struct mt76x02_dev *dev, const u32 offset, const u8 *addr) -{ - mt76_wr(dev, offset, get_unaligned_le32(addr)); - mt76_wr(dev, offset + 4, addr[4] | addr[5] << 8); -} - -void mt76x0_bss_info_changed(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *info, u32 changed) -{ - struct mt76x02_dev *dev = hw->priv; - - mutex_lock(&dev->mt76.mutex); - - if (changed & BSS_CHANGED_BSSID) { - mt76x0_addr_wr(dev, MT_MAC_BSSID_DW0, info->bssid); - - /* Note: this is a hack because beacon_int is not changed - * on leave nor is any more appropriate event generated. - * rt2x00 doesn't seem to be bothered though. - */ - if (is_zero_ether_addr(info->bssid)) - mt76x0_mac_config_tsf(dev, false, 0); - } - - if (changed & BSS_CHANGED_BASIC_RATES) { - mt76_wr(dev, MT_LEGACY_BASIC_RATE, info->basic_rates); - mt76_wr(dev, MT_VHT_HT_FBK_CFG0, 0x65432100); - mt76_wr(dev, MT_VHT_HT_FBK_CFG1, 0xedcba980); - mt76_wr(dev, MT_LG_FBK_CFG0, 0xedcba988); - mt76_wr(dev, MT_LG_FBK_CFG1, 0x00002100); - } - - if (changed & BSS_CHANGED_BEACON_INT) - mt76x0_mac_config_tsf(dev, true, info->beacon_int); - - if (changed & BSS_CHANGED_HT || changed & BSS_CHANGED_ERP_CTS_PROT) - mt76x0_mac_set_protection(dev, info->use_cts_prot, - info->ht_operation_mode); - - if (changed & BSS_CHANGED_ERP_PREAMBLE) - mt76x0_mac_set_short_preamble(dev, info->use_short_preamble); - - if (changed & BSS_CHANGED_ERP_SLOT) { - int slottime = info->use_short_slot ? 9 : 20; - - mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG, - MT_BKOFF_SLOT_CFG_SLOTTIME, slottime); - } - - if (changed & BSS_CHANGED_ASSOC) - mt76x0_phy_recalibrate_after_assoc(dev); - - mutex_unlock(&dev->mt76.mutex); -} -EXPORT_SYMBOL_GPL(mt76x0_bss_info_changed); - -void mt76x0_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - const u8 *mac_addr) -{ - struct mt76x02_dev *dev = hw->priv; - - set_bit(MT76_SCANNING, &dev->mt76.state); -} -EXPORT_SYMBOL_GPL(mt76x0_sw_scan); - -void mt76x0_sw_scan_complete(struct ieee80211_hw *hw, - struct ieee80211_vif *vif) -{ - struct mt76x02_dev *dev = hw->priv; - - clear_bit(MT76_SCANNING, &dev->mt76.state); -} -EXPORT_SYMBOL_GPL(mt76x0_sw_scan_complete); - -int mt76x0_set_rts_threshold(struct ieee80211_hw *hw, u32 value) -{ - struct mt76x02_dev *dev = hw->priv; - - mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, value); - - return 0; -} -EXPORT_SYMBOL_GPL(mt76x0_set_rts_threshold); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h index 2187bafaf2e9..46629f61673b 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/mt76x0.h @@ -28,18 +28,26 @@ #include "../mt76x02.h" #include "eeprom.h" -#define MT_CALIBRATE_INTERVAL (4 * HZ) +#define MT7610E_FIRMWARE "mediatek/mt7610e.bin" +#define MT7650E_FIRMWARE "mediatek/mt7650e.bin" + +#define MT7610U_FIRMWARE "mediatek/mt7610u.bin" #define MT_USB_AGGR_SIZE_LIMIT 21 /* * 1024B */ #define MT_USB_AGGR_TIMEOUT 0x80 /* * 33ns */ static inline bool is_mt7610e(struct mt76x02_dev *dev) { - /* TODO */ - return false; + if (!mt76_is_mmio(dev)) + return false; + + return mt76_chip(&dev->mt76) == 0x7610; } -void mt76x0_init_debugfs(struct mt76x02_dev *dev); +static inline bool is_mt7630(struct mt76x02_dev *dev) +{ + return mt76_chip(&dev->mt76) == 0x7630; +} /* Init */ struct mt76x02_dev * @@ -54,30 +62,12 @@ int mt76x0_mac_start(struct mt76x02_dev *dev); void mt76x0_mac_stop(struct mt76x02_dev *dev); int mt76x0_config(struct ieee80211_hw *hw, u32 changed); -void mt76x0_bss_info_changed(struct ieee80211_hw *hw, - struct ieee80211_vif *vif, - struct ieee80211_bss_conf *info, u32 changed); -void mt76x0_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - const u8 *mac_addr); -void mt76x0_sw_scan_complete(struct ieee80211_hw *hw, - struct ieee80211_vif *vif); -int mt76x0_set_rts_threshold(struct ieee80211_hw *hw, u32 value); /* PHY */ void mt76x0_phy_init(struct mt76x02_dev *dev); -int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev); +int mt76x0_phy_wait_bbp_ready(struct mt76x02_dev *dev); int mt76x0_phy_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef); -void mt76x0_phy_recalibrate_after_assoc(struct mt76x02_dev *dev); void mt76x0_phy_set_txpower(struct mt76x02_dev *dev); void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on); - -/* MAC */ -void mt76x0_mac_work(struct work_struct *work); -void mt76x0_mac_set_protection(struct mt76x02_dev *dev, bool legacy_prot, - int ht_mode); -void mt76x0_mac_set_short_preamble(struct mt76x02_dev *dev, bool short_preamb); -void mt76x0_mac_config_tsf(struct mt76x02_dev *dev, bool enable, int interval); -void mt76x0_mac_set_ampdu_factor(struct mt76x02_dev *dev); - #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c index 522c86059bcb..d895b6f3dc44 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci.c @@ -68,6 +68,19 @@ static void mt76x0e_stop(struct ieee80211_hw *hw) mutex_unlock(&dev->mt76.mutex); } +static void +mt76x0e_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + u32 queues, bool drop) +{ +} + +static int +mt76x0e_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, + bool set) +{ + return 0; +} + static const struct ieee80211_ops mt76x0e_ops = { .tx = mt76x02_tx, .start = mt76x0e_start, @@ -76,15 +89,22 @@ static const struct ieee80211_ops mt76x0e_ops = { .remove_interface = mt76x02_remove_interface, .config = mt76x0_config, .configure_filter = mt76x02_configure_filter, - .sta_add = mt76x02_sta_add, - .sta_remove = mt76x02_sta_remove, + .bss_info_changed = mt76x02_bss_info_changed, + .sta_state = mt76_sta_state, .set_key = mt76x02_set_key, .conf_tx = mt76x02_conf_tx, - .sw_scan_start = mt76x0_sw_scan, - .sw_scan_complete = mt76x0_sw_scan_complete, + .sw_scan_start = mt76x02_sw_scan, + .sw_scan_complete = mt76x02_sw_scan_complete, .ampdu_action = mt76x02_ampdu_action, .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update, .wake_tx_queue = mt76_wake_tx_queue, + .get_survey = mt76_get_survey, + .get_txpower = mt76x02_get_txpower, + .flush = mt76x0e_flush, + .set_tim = mt76x0e_set_tim, + .release_buffered_frames = mt76_release_buffered_frames, + .set_coverage_class = mt76x02_set_coverage_class, + .set_rts_threshold = mt76x02_set_rts_threshold, }; static int mt76x0e_register_device(struct mt76x02_dev *dev) @@ -135,10 +155,14 @@ mt76x0e_probe(struct pci_dev *pdev, const struct pci_device_id *id) { static const struct mt76_driver_ops drv_ops = { .txwi_size = sizeof(struct mt76x02_txwi), + .update_survey = mt76x02_update_channel, .tx_prepare_skb = mt76x02_tx_prepare_skb, .tx_complete_skb = mt76x02_tx_complete_skb, .rx_skb = mt76x02_queue_rx_skb, .rx_poll_complete = mt76x02_rx_poll_complete, + .sta_ps = mt76x02_sta_ps, + .sta_add = mt76x02_sta_add, + .sta_remove = mt76x02_sta_remove, }; struct mt76x02_dev *dev; int ret; @@ -185,6 +209,7 @@ error: static void mt76x0e_cleanup(struct mt76x02_dev *dev) { clear_bit(MT76_STATE_INITIALIZED, &dev->mt76.state); + tasklet_disable(&dev->pre_tbtt_tasklet); mt76x0_chip_onoff(dev, false, false); mt76x0e_stop_hw(dev); mt76x02_dma_cleanup(dev); @@ -209,6 +234,8 @@ static const struct pci_device_id mt76x0e_device_table[] = { }; MODULE_DEVICE_TABLE(pci, mt76x0e_device_table); +MODULE_FIRMWARE(MT7610E_FIRMWARE); +MODULE_FIRMWARE(MT7650E_FIRMWARE); MODULE_LICENSE("Dual BSD/GPL"); static struct pci_driver mt76x0e_driver = { diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c index 569861289aa5..490c1869f2c4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/pci_mcu.c @@ -19,9 +19,6 @@ #include "mt76x0.h" #include "mcu.h" -#define MT7610E_FIRMWARE "mediatek/mt7610e.bin" -#define MT7650E_FIRMWARE "mediatek/mt7650e.bin" - #define MT_MCU_IVB_ADDR (MT_MCU_ILM_ADDR + 0x54000 - MT_MCU_IVB_SIZE) static int mt76x0e_load_firmware(struct mt76x02_dev *dev) @@ -130,7 +127,6 @@ out: int mt76x0e_mcu_init(struct mt76x02_dev *dev) { static const struct mt76_mcu_ops mt76x0e_mcu_ops = { - .mcu_msg_alloc = mt76x02_mcu_msg_alloc, .mcu_send_msg = mt76x02_mcu_msg_send, }; int err; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c index cf024950e0ed..1eb1a802ed20 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.c @@ -20,7 +20,6 @@ #include "mt76x0.h" #include "mcu.h" #include "eeprom.h" -#include "trace.h" #include "phy.h" #include "initvals.h" #include "initvals_phy.h" @@ -49,12 +48,12 @@ mt76x0_rf_csr_wr(struct mt76x02_dev *dev, u32 offset, u8 value) } mt76_wr(dev, MT_RF_CSR_CFG, - FIELD_PREP(MT_RF_CSR_CFG_DATA, value) | - FIELD_PREP(MT_RF_CSR_CFG_REG_BANK, bank) | - FIELD_PREP(MT_RF_CSR_CFG_REG_ID, reg) | - MT_RF_CSR_CFG_WR | - MT_RF_CSR_CFG_KICK); - trace_mt76x0_rf_write(&dev->mt76, bank, offset, value); + FIELD_PREP(MT_RF_CSR_CFG_DATA, value) | + FIELD_PREP(MT_RF_CSR_CFG_REG_BANK, bank) | + FIELD_PREP(MT_RF_CSR_CFG_REG_ID, reg) | + MT_RF_CSR_CFG_WR | + MT_RF_CSR_CFG_KICK); + out: mutex_unlock(&dev->phy_mutex); @@ -86,19 +85,18 @@ static int mt76x0_rf_csr_rr(struct mt76x02_dev *dev, u32 offset) goto out; mt76_wr(dev, MT_RF_CSR_CFG, - FIELD_PREP(MT_RF_CSR_CFG_REG_BANK, bank) | - FIELD_PREP(MT_RF_CSR_CFG_REG_ID, reg) | - MT_RF_CSR_CFG_KICK); + FIELD_PREP(MT_RF_CSR_CFG_REG_BANK, bank) | + FIELD_PREP(MT_RF_CSR_CFG_REG_ID, reg) | + MT_RF_CSR_CFG_KICK); if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100)) goto out; val = mt76_rr(dev, MT_RF_CSR_CFG); if (FIELD_GET(MT_RF_CSR_CFG_REG_ID, val) == reg && - FIELD_GET(MT_RF_CSR_CFG_REG_BANK, val) == bank) { + FIELD_GET(MT_RF_CSR_CFG_REG_BANK, val) == bank) ret = FIELD_GET(MT_RF_CSR_CFG_DATA, val); - trace_mt76x0_rf_read(&dev->mt76, bank, offset, ret); - } + out: mutex_unlock(&dev->phy_mutex); @@ -110,7 +108,7 @@ out: } static int -rf_wr(struct mt76x02_dev *dev, u32 offset, u8 val) +mt76x0_rf_wr(struct mt76x02_dev *dev, u32 offset, u8 val) { if (mt76_is_usb(dev)) { struct mt76_reg_pair pair = { @@ -126,8 +124,7 @@ rf_wr(struct mt76x02_dev *dev, u32 offset, u8 val) } } -static int -rf_rr(struct mt76x02_dev *dev, u32 offset) +static int mt76x0_rf_rr(struct mt76x02_dev *dev, u32 offset) { int ret; u32 val; @@ -149,38 +146,36 @@ rf_rr(struct mt76x02_dev *dev, u32 offset) } static int -rf_rmw(struct mt76x02_dev *dev, u32 offset, u8 mask, u8 val) +mt76x0_rf_rmw(struct mt76x02_dev *dev, u32 offset, u8 mask, u8 val) { int ret; - ret = rf_rr(dev, offset); + ret = mt76x0_rf_rr(dev, offset); if (ret < 0) return ret; + val |= ret & ~mask; - ret = rf_wr(dev, offset, val); - if (ret) - return ret; - return val; + ret = mt76x0_rf_wr(dev, offset, val); + return ret ? ret : val; } static int -rf_set(struct mt76x02_dev *dev, u32 offset, u8 val) +mt76x0_rf_set(struct mt76x02_dev *dev, u32 offset, u8 val) { - return rf_rmw(dev, offset, 0, val); + return mt76x0_rf_rmw(dev, offset, 0, val); } -#if 0 static int -rf_clear(struct mt76x02_dev *dev, u32 offset, u8 mask) +mt76x0_rf_clear(struct mt76x02_dev *dev, u32 offset, u8 mask) { - return rf_rmw(dev, offset, mask, 0); + return mt76x0_rf_rmw(dev, offset, mask, 0); } -#endif static void -mt76x0_rf_csr_wr_rp(struct mt76x02_dev *dev, const struct mt76_reg_pair *data, - int n) +mt76x0_phy_rf_csr_wr_rp(struct mt76x02_dev *dev, + const struct mt76_reg_pair *data, + int n) { while (n-- > 0) { mt76x0_rf_csr_wr(dev, data->reg, data->value); @@ -190,12 +185,12 @@ mt76x0_rf_csr_wr_rp(struct mt76x02_dev *dev, const struct mt76_reg_pair *data, #define RF_RANDOM_WRITE(dev, tab) do { \ if (mt76_is_mmio(dev)) \ - mt76x0_rf_csr_wr_rp(dev, tab, ARRAY_SIZE(tab)); \ + mt76x0_phy_rf_csr_wr_rp(dev, tab, ARRAY_SIZE(tab)); \ else \ mt76_wr_rp(dev, MT_MCU_MEMMAP_RF, tab, ARRAY_SIZE(tab));\ } while (0) -int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev) +int mt76x0_phy_wait_bbp_ready(struct mt76x02_dev *dev) { int i = 20; u32 val; @@ -215,62 +210,6 @@ int mt76x0_wait_bbp_ready(struct mt76x02_dev *dev) return 0; } -static void mt76x0_vco_cal(struct mt76x02_dev *dev, u8 channel) -{ - u8 val; - - val = rf_rr(dev, MT_RF(0, 4)); - if ((val & 0x70) != 0x30) - return; - - /* - * Calibration Mode - Open loop, closed loop, and amplitude: - * B0.R06.[0]: 1 - * B0.R06.[3:1] bp_close_code: 100 - * B0.R05.[7:0] bp_open_code: 0x0 - * B0.R04.[2:0] cal_bits: 000 - * B0.R03.[2:0] startup_time: 011 - * B0.R03.[6:4] settle_time: - * 80MHz channel: 110 - * 40MHz channel: 101 - * 20MHz channel: 100 - */ - val = rf_rr(dev, MT_RF(0, 6)); - val &= ~0xf; - val |= 0x09; - rf_wr(dev, MT_RF(0, 6), val); - - val = rf_rr(dev, MT_RF(0, 5)); - if (val != 0) - rf_wr(dev, MT_RF(0, 5), 0x0); - - val = rf_rr(dev, MT_RF(0, 4)); - val &= ~0x07; - rf_wr(dev, MT_RF(0, 4), val); - - val = rf_rr(dev, MT_RF(0, 3)); - val &= ~0x77; - if (channel == 1 || channel == 7 || channel == 9 || channel >= 13) { - val |= 0x63; - } else if (channel == 3 || channel == 4 || channel == 10) { - val |= 0x53; - } else if (channel == 2 || channel == 5 || channel == 6 || - channel == 8 || channel == 11 || channel == 12) { - val |= 0x43; - } else { - WARN(1, "Unknown channel %u\n", channel); - return; - } - rf_wr(dev, MT_RF(0, 3), val); - - /* TODO replace by mt76x0_rf_set(dev, MT_RF(0, 4), BIT(7)); */ - val = rf_rr(dev, MT_RF(0, 4)); - val = ((val & ~(0x80)) | 0x80); - rf_wr(dev, MT_RF(0, 4), val); - - msleep(2); -} - static void mt76x0_phy_set_band(struct mt76x02_dev *dev, enum nl80211_band band) { @@ -278,8 +217,8 @@ mt76x0_phy_set_band(struct mt76x02_dev *dev, enum nl80211_band band) case NL80211_BAND_2GHZ: RF_RANDOM_WRITE(dev, mt76x0_rf_2g_channel_0_tab); - rf_wr(dev, MT_RF(5, 0), 0x45); - rf_wr(dev, MT_RF(6, 0), 0x44); + mt76x0_rf_wr(dev, MT_RF(5, 0), 0x45); + mt76x0_rf_wr(dev, MT_RF(6, 0), 0x44); mt76_wr(dev, MT_TX_ALC_VGA3, 0x00050007); mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x003E0002); @@ -287,8 +226,8 @@ mt76x0_phy_set_band(struct mt76x02_dev *dev, enum nl80211_band band) case NL80211_BAND_5GHZ: RF_RANDOM_WRITE(dev, mt76x0_rf_5g_channel_0_tab); - rf_wr(dev, MT_RF(5, 0), 0x44); - rf_wr(dev, MT_RF(6, 0), 0x45); + mt76x0_rf_wr(dev, MT_RF(5, 0), 0x44); + mt76x0_rf_wr(dev, MT_RF(6, 0), 0x45); mt76_wr(dev, MT_TX_ALC_VGA3, 0x00000005); mt76_wr(dev, MT_TX0_RF_GAIN_CORR, 0x01010102); @@ -301,18 +240,17 @@ mt76x0_phy_set_band(struct mt76x02_dev *dev, enum nl80211_band band) static void mt76x0_phy_set_chan_rf_params(struct mt76x02_dev *dev, u8 channel, u16 rf_bw_band) { + const struct mt76x0_freq_item *freq_item; u16 rf_band = rf_bw_band & 0xff00; u16 rf_bw = rf_bw_band & 0x00ff; enum nl80211_band band; + bool b_sdm = false; u32 mac_reg; - u8 rf_val; int i; - bool bSDM = false; - const struct mt76x0_freq_item *freq_item; for (i = 0; i < ARRAY_SIZE(mt76x0_sdm_channel); i++) { if (channel == mt76x0_sdm_channel[i]) { - bSDM = true; + b_sdm = true; break; } } @@ -321,108 +259,84 @@ mt76x0_phy_set_chan_rf_params(struct mt76x02_dev *dev, u8 channel, u16 rf_bw_ban if (channel == mt76x0_frequency_plan[i].channel) { rf_band = mt76x0_frequency_plan[i].band; - if (bSDM) + if (b_sdm) freq_item = &(mt76x0_sdm_frequency_plan[i]); else freq_item = &(mt76x0_frequency_plan[i]); - rf_wr(dev, MT_RF(0, 37), freq_item->pllR37); - rf_wr(dev, MT_RF(0, 36), freq_item->pllR36); - rf_wr(dev, MT_RF(0, 35), freq_item->pllR35); - rf_wr(dev, MT_RF(0, 34), freq_item->pllR34); - rf_wr(dev, MT_RF(0, 33), freq_item->pllR33); + mt76x0_rf_wr(dev, MT_RF(0, 37), freq_item->pllR37); + mt76x0_rf_wr(dev, MT_RF(0, 36), freq_item->pllR36); + mt76x0_rf_wr(dev, MT_RF(0, 35), freq_item->pllR35); + mt76x0_rf_wr(dev, MT_RF(0, 34), freq_item->pllR34); + mt76x0_rf_wr(dev, MT_RF(0, 33), freq_item->pllR33); - rf_val = rf_rr(dev, MT_RF(0, 32)); - rf_val &= ~0xE0; - rf_val |= freq_item->pllR32_b7b5; - rf_wr(dev, MT_RF(0, 32), rf_val); + mt76x0_rf_rmw(dev, MT_RF(0, 32), 0xe0, + freq_item->pllR32_b7b5); /* R32<4:0> pll_den: (Denomina - 8) */ - rf_val = rf_rr(dev, MT_RF(0, 32)); - rf_val &= ~0x1F; - rf_val |= freq_item->pllR32_b4b0; - rf_wr(dev, MT_RF(0, 32), rf_val); + mt76x0_rf_rmw(dev, MT_RF(0, 32), MT_RF_PLL_DEN_MASK, + freq_item->pllR32_b4b0); /* R31<7:5> */ - rf_val = rf_rr(dev, MT_RF(0, 31)); - rf_val &= ~0xE0; - rf_val |= freq_item->pllR31_b7b5; - rf_wr(dev, MT_RF(0, 31), rf_val); + mt76x0_rf_rmw(dev, MT_RF(0, 31), 0xe0, + freq_item->pllR31_b7b5); /* R31<4:0> pll_k(Nominator) */ - rf_val = rf_rr(dev, MT_RF(0, 31)); - rf_val &= ~0x1F; - rf_val |= freq_item->pllR31_b4b0; - rf_wr(dev, MT_RF(0, 31), rf_val); + mt76x0_rf_rmw(dev, MT_RF(0, 31), MT_RF_PLL_K_MASK, + freq_item->pllR31_b4b0); /* R30<7> sdm_reset_n */ - rf_val = rf_rr(dev, MT_RF(0, 30)); - rf_val &= ~0x80; - if (bSDM) { - rf_wr(dev, MT_RF(0, 30), rf_val); - rf_val |= 0x80; - rf_wr(dev, MT_RF(0, 30), rf_val); + if (b_sdm) { + mt76x0_rf_clear(dev, MT_RF(0, 30), + MT_RF_SDM_RESET_MASK); + mt76x0_rf_set(dev, MT_RF(0, 30), + MT_RF_SDM_RESET_MASK); } else { - rf_val |= freq_item->pllR30_b7; - rf_wr(dev, MT_RF(0, 30), rf_val); + mt76x0_rf_rmw(dev, MT_RF(0, 30), + MT_RF_SDM_RESET_MASK, + freq_item->pllR30_b7); } /* R30<6:2> sdmmash_prbs,sin */ - rf_val = rf_rr(dev, MT_RF(0, 30)); - rf_val &= ~0x7C; - rf_val |= freq_item->pllR30_b6b2; - rf_wr(dev, MT_RF(0, 30), rf_val); + mt76x0_rf_rmw(dev, MT_RF(0, 30), + MT_RF_SDM_MASH_PRBS_MASK, + freq_item->pllR30_b6b2); /* R30<1> sdm_bp */ - rf_val = rf_rr(dev, MT_RF(0, 30)); - rf_val &= ~0x02; - rf_val |= (freq_item->pllR30_b1 << 1); - rf_wr(dev, MT_RF(0, 30), rf_val); + mt76x0_rf_rmw(dev, MT_RF(0, 30), MT_RF_SDM_BP_MASK, + freq_item->pllR30_b1 << 1); /* R30<0> R29<7:0> (hex) pll_n */ - rf_val = freq_item->pll_n & 0x00FF; - rf_wr(dev, MT_RF(0, 29), rf_val); + mt76x0_rf_wr(dev, MT_RF(0, 29), + freq_item->pll_n & 0xff); - rf_val = rf_rr(dev, MT_RF(0, 30)); - rf_val &= ~0x1; - rf_val |= ((freq_item->pll_n >> 8) & 0x0001); - rf_wr(dev, MT_RF(0, 30), rf_val); + mt76x0_rf_rmw(dev, MT_RF(0, 30), 0x1, + (freq_item->pll_n >> 8) & 0x1); /* R28<7:6> isi_iso */ - rf_val = rf_rr(dev, MT_RF(0, 28)); - rf_val &= ~0xC0; - rf_val |= freq_item->pllR28_b7b6; - rf_wr(dev, MT_RF(0, 28), rf_val); + mt76x0_rf_rmw(dev, MT_RF(0, 28), MT_RF_ISI_ISO_MASK, + freq_item->pllR28_b7b6); /* R28<5:4> pfd_dly */ - rf_val = rf_rr(dev, MT_RF(0, 28)); - rf_val &= ~0x30; - rf_val |= freq_item->pllR28_b5b4; - rf_wr(dev, MT_RF(0, 28), rf_val); + mt76x0_rf_rmw(dev, MT_RF(0, 28), MT_RF_PFD_DLY_MASK, + freq_item->pllR28_b5b4); /* R28<3:2> clksel option */ - rf_val = rf_rr(dev, MT_RF(0, 28)); - rf_val &= ~0x0C; - rf_val |= freq_item->pllR28_b3b2; - rf_wr(dev, MT_RF(0, 28), rf_val); + mt76x0_rf_rmw(dev, MT_RF(0, 28), MT_RF_CLK_SEL_MASK, + freq_item->pllR28_b3b2); /* R28<1:0> R27<7:0> R26<7:0> (hex) sdm_k */ - rf_val = freq_item->pll_sdm_k & 0x000000FF; - rf_wr(dev, MT_RF(0, 26), rf_val); - - rf_val = ((freq_item->pll_sdm_k >> 8) & 0x000000FF); - rf_wr(dev, MT_RF(0, 27), rf_val); + mt76x0_rf_wr(dev, MT_RF(0, 26), + freq_item->pll_sdm_k & 0xff); + mt76x0_rf_wr(dev, MT_RF(0, 27), + (freq_item->pll_sdm_k >> 8) & 0xff); - rf_val = rf_rr(dev, MT_RF(0, 28)); - rf_val &= ~0x3; - rf_val |= ((freq_item->pll_sdm_k >> 16) & 0x0003); - rf_wr(dev, MT_RF(0, 28), rf_val); + mt76x0_rf_rmw(dev, MT_RF(0, 28), 0x3, + (freq_item->pll_sdm_k >> 16) & 0x3); /* R24<1:0> xo_div */ - rf_val = rf_rr(dev, MT_RF(0, 24)); - rf_val &= ~0x3; - rf_val |= freq_item->pllR24_b1b0; - rf_wr(dev, MT_RF(0, 24), rf_val); + mt76x0_rf_rmw(dev, MT_RF(0, 24), MT_RF_XO_DIV_MASK, + freq_item->pllR24_b1b0); break; } @@ -430,25 +344,26 @@ mt76x0_phy_set_chan_rf_params(struct mt76x02_dev *dev, u8 channel, u16 rf_bw_ban for (i = 0; i < ARRAY_SIZE(mt76x0_rf_bw_switch_tab); i++) { if (rf_bw == mt76x0_rf_bw_switch_tab[i].bw_band) { - rf_wr(dev, mt76x0_rf_bw_switch_tab[i].rf_bank_reg, - mt76x0_rf_bw_switch_tab[i].value); + mt76x0_rf_wr(dev, + mt76x0_rf_bw_switch_tab[i].rf_bank_reg, + mt76x0_rf_bw_switch_tab[i].value); } else if ((rf_bw == (mt76x0_rf_bw_switch_tab[i].bw_band & 0xFF)) && (rf_band & mt76x0_rf_bw_switch_tab[i].bw_band)) { - rf_wr(dev, mt76x0_rf_bw_switch_tab[i].rf_bank_reg, - mt76x0_rf_bw_switch_tab[i].value); + mt76x0_rf_wr(dev, + mt76x0_rf_bw_switch_tab[i].rf_bank_reg, + mt76x0_rf_bw_switch_tab[i].value); } } for (i = 0; i < ARRAY_SIZE(mt76x0_rf_band_switch_tab); i++) { if (mt76x0_rf_band_switch_tab[i].bw_band & rf_band) { - rf_wr(dev, mt76x0_rf_band_switch_tab[i].rf_bank_reg, - mt76x0_rf_band_switch_tab[i].value); + mt76x0_rf_wr(dev, + mt76x0_rf_band_switch_tab[i].rf_bank_reg, + mt76x0_rf_band_switch_tab[i].value); } } - mac_reg = mt76_rr(dev, MT_RF_MISC); - mac_reg &= ~0xC; /* Clear 0x518[3:2] */ - mt76_wr(dev, MT_RF_MISC, mac_reg); + mt76_clear(dev, MT_RF_MISC, 0xc); band = (rf_band & RF_G_BAND) ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; if (mt76x02_ext_pa_enabled(dev, band)) { @@ -457,21 +372,17 @@ mt76x0_phy_set_chan_rf_params(struct mt76x02_dev *dev, u8 channel, u16 rf_bw_ban [2]1'b1: enable external A band PA, 1'b0: disable external A band PA [3]1'b1: enable external G band PA, 1'b0: disable external G band PA */ - if (rf_band & RF_A_BAND) { - mac_reg = mt76_rr(dev, MT_RF_MISC); - mac_reg |= 0x4; - mt76_wr(dev, MT_RF_MISC, mac_reg); - } else { - mac_reg = mt76_rr(dev, MT_RF_MISC); - mac_reg |= 0x8; - mt76_wr(dev, MT_RF_MISC, mac_reg); - } + if (rf_band & RF_A_BAND) + mt76_set(dev, MT_RF_MISC, BIT(2)); + else + mt76_set(dev, MT_RF_MISC, BIT(3)); /* External PA */ for (i = 0; i < ARRAY_SIZE(mt76x0_rf_ext_pa_tab); i++) if (mt76x0_rf_ext_pa_tab[i].bw_band & rf_band) - rf_wr(dev, mt76x0_rf_ext_pa_tab[i].rf_bank_reg, - mt76x0_rf_ext_pa_tab[i].value); + mt76x0_rf_wr(dev, + mt76x0_rf_ext_pa_tab[i].rf_bank_reg, + mt76x0_rf_ext_pa_tab[i].value); } if (rf_band & RF_G_BAND) { @@ -516,27 +427,53 @@ mt76x0_phy_set_chan_bbp_params(struct mt76x02_dev *dev, u16 rf_bw_band) } } -static void mt76x0_ant_select(struct mt76x02_dev *dev) +static void mt76x0_phy_ant_select(struct mt76x02_dev *dev) { - struct ieee80211_channel *chan = dev->mt76.chandef.chan; - - /* single antenna mode */ - if (chan->band == NL80211_BAND_2GHZ) { - mt76_rmw(dev, MT_COEXCFG3, - BIT(5) | BIT(4) | BIT(3) | BIT(2), BIT(1)); - mt76_rmw(dev, MT_WLAN_FUN_CTRL, BIT(5), BIT(6)); + u16 ee_ant = mt76x02_eeprom_get(dev, MT_EE_ANTENNA); + u16 nic_conf2 = mt76x02_eeprom_get(dev, MT_EE_NIC_CONF_2); + u32 wlan, coex3, cmb; + bool ant_div; + + wlan = mt76_rr(dev, MT_WLAN_FUN_CTRL); + cmb = mt76_rr(dev, MT_CMB_CTRL); + coex3 = mt76_rr(dev, MT_COEXCFG3); + + cmb &= ~(BIT(14) | BIT(12)); + wlan &= ~(BIT(6) | BIT(5)); + coex3 &= ~GENMASK(5, 2); + + if (ee_ant & MT_EE_ANTENNA_DUAL) { + /* dual antenna mode */ + ant_div = !(nic_conf2 & MT_EE_NIC_CONF_2_ANT_OPT) && + (nic_conf2 & MT_EE_NIC_CONF_2_ANT_DIV); + if (ant_div) + cmb |= BIT(12); + else + coex3 |= BIT(4); + coex3 |= BIT(3); + if (dev->mt76.cap.has_2ghz) + wlan |= BIT(6); } else { - mt76_rmw(dev, MT_COEXCFG3, BIT(5) | BIT(2), - BIT(4) | BIT(3)); - mt76_clear(dev, MT_WLAN_FUN_CTRL, - BIT(6) | BIT(5)); + /* sigle antenna mode */ + if (dev->mt76.cap.has_5ghz) { + coex3 |= BIT(3) | BIT(4); + } else { + wlan |= BIT(6); + coex3 |= BIT(1); + } } - mt76_clear(dev, MT_CMB_CTRL, BIT(14) | BIT(12)); + + if (is_mt7630(dev)) + cmb |= BIT(14) | BIT(11); + + mt76_wr(dev, MT_WLAN_FUN_CTRL, wlan); + mt76_wr(dev, MT_CMB_CTRL, cmb); mt76_clear(dev, MT_COEXCFG0, BIT(2)); + mt76_wr(dev, MT_COEXCFG3, coex3); } static void -mt76x0_bbp_set_bw(struct mt76x02_dev *dev, enum nl80211_chan_width width) +mt76x0_phy_bbp_set_bw(struct mt76x02_dev *dev, enum nl80211_chan_width width) { enum { BW_20 = 0, BW_40 = 1, BW_80 = 2, BW_10 = 4}; int bw; @@ -563,7 +500,346 @@ mt76x0_bbp_set_bw(struct mt76x02_dev *dev, enum nl80211_chan_width width) return ; } - mt76x02_mcu_function_select(dev, BW_SETTING, bw, false); + mt76x02_mcu_function_select(dev, BW_SETTING, bw); +} + +static void mt76x0_phy_tssi_dc_calibrate(struct mt76x02_dev *dev) +{ + struct ieee80211_channel *chan = dev->mt76.chandef.chan; + u32 val; + + if (chan->band == NL80211_BAND_5GHZ) + mt76x0_rf_clear(dev, MT_RF(0, 67), 0xf); + + /* bypass ADDA control */ + mt76_wr(dev, MT_RF_SETTING_0, 0x60002237); + mt76_wr(dev, MT_RF_BYPASS_0, 0xffffffff); + + /* bbp sw reset */ + mt76_set(dev, MT_BBP(CORE, 4), BIT(0)); + usleep_range(500, 1000); + mt76_clear(dev, MT_BBP(CORE, 4), BIT(0)); + + val = (chan->band == NL80211_BAND_5GHZ) ? 0x80055 : 0x80050; + mt76_wr(dev, MT_BBP(CORE, 34), val); + + /* enable TX with DAC0 input */ + mt76_wr(dev, MT_BBP(TXBE, 6), BIT(31)); + + mt76_poll_msec(dev, MT_BBP(CORE, 34), BIT(4), 0, 200); + dev->cal.tssi_dc = mt76_rr(dev, MT_BBP(CORE, 35)) & 0xff; + + /* stop bypass ADDA */ + mt76_wr(dev, MT_RF_BYPASS_0, 0); + /* stop TX */ + mt76_wr(dev, MT_BBP(TXBE, 6), 0); + /* bbp sw reset */ + mt76_set(dev, MT_BBP(CORE, 4), BIT(0)); + usleep_range(500, 1000); + mt76_clear(dev, MT_BBP(CORE, 4), BIT(0)); + + if (chan->band == NL80211_BAND_5GHZ) + mt76x0_rf_rmw(dev, MT_RF(0, 67), 0xf, 0x4); +} + +static int +mt76x0_phy_tssi_adc_calibrate(struct mt76x02_dev *dev, s16 *ltssi, + u8 *info) +{ + struct ieee80211_channel *chan = dev->mt76.chandef.chan; + u32 val; + + val = (chan->band == NL80211_BAND_5GHZ) ? 0x80055 : 0x80050; + mt76_wr(dev, MT_BBP(CORE, 34), val); + + if (!mt76_poll_msec(dev, MT_BBP(CORE, 34), BIT(4), 0, 200)) { + mt76_clear(dev, MT_BBP(CORE, 34), BIT(4)); + return -ETIMEDOUT; + } + + *ltssi = mt76_rr(dev, MT_BBP(CORE, 35)) & 0xff; + if (chan->band == NL80211_BAND_5GHZ) + *ltssi += 128; + + /* set packet info#1 mode */ + mt76_wr(dev, MT_BBP(CORE, 34), 0x80041); + info[0] = mt76_rr(dev, MT_BBP(CORE, 35)) & 0xff; + + /* set packet info#2 mode */ + mt76_wr(dev, MT_BBP(CORE, 34), 0x80042); + info[1] = mt76_rr(dev, MT_BBP(CORE, 35)) & 0xff; + + /* set packet info#3 mode */ + mt76_wr(dev, MT_BBP(CORE, 34), 0x80043); + info[2] = mt76_rr(dev, MT_BBP(CORE, 35)) & 0xff; + + return 0; +} + +static u8 mt76x0_phy_get_rf_pa_mode(struct mt76x02_dev *dev, + int index, u8 tx_rate) +{ + u32 val, reg; + + reg = (index == 1) ? MT_RF_PA_MODE_CFG1 : MT_RF_PA_MODE_CFG0; + val = mt76_rr(dev, reg); + return (val & (3 << (tx_rate * 2))) >> (tx_rate * 2); +} + +static int +mt76x0_phy_get_target_power(struct mt76x02_dev *dev, u8 tx_mode, + u8 *info, s8 *target_power, + s8 *target_pa_power) +{ + u8 tx_rate, cur_power; + + cur_power = mt76_rr(dev, MT_TX_ALC_CFG_0) & MT_TX_ALC_CFG_0_CH_INIT_0; + switch (tx_mode) { + case 0: + /* cck rates */ + tx_rate = (info[0] & 0x60) >> 5; + if (tx_rate > 3) + return -EINVAL; + + *target_power = cur_power + dev->mt76.rate_power.cck[tx_rate]; + *target_pa_power = mt76x0_phy_get_rf_pa_mode(dev, 0, tx_rate); + break; + case 1: { + u8 index; + + /* ofdm rates */ + tx_rate = (info[0] & 0xf0) >> 4; + switch (tx_rate) { + case 0xb: + index = 0; + break; + case 0xf: + index = 1; + break; + case 0xa: + index = 2; + break; + case 0xe: + index = 3; + break; + case 0x9: + index = 4; + break; + case 0xd: + index = 5; + break; + case 0x8: + index = 6; + break; + case 0xc: + index = 7; + break; + default: + return -EINVAL; + } + + *target_power = cur_power + dev->mt76.rate_power.ofdm[index]; + *target_pa_power = mt76x0_phy_get_rf_pa_mode(dev, 0, index + 4); + break; + } + case 4: + /* vht rates */ + tx_rate = info[1] & 0xf; + if (tx_rate > 9) + return -EINVAL; + + *target_power = cur_power + dev->mt76.rate_power.vht[tx_rate]; + *target_pa_power = mt76x0_phy_get_rf_pa_mode(dev, 1, tx_rate); + break; + default: + /* ht rates */ + tx_rate = info[1] & 0x7f; + if (tx_rate > 9) + return -EINVAL; + + *target_power = cur_power + dev->mt76.rate_power.ht[tx_rate]; + *target_pa_power = mt76x0_phy_get_rf_pa_mode(dev, 1, tx_rate); + break; + } + + return 0; +} + +static s16 mt76x0_phy_lin2db(u16 val) +{ + u32 mantissa = val << 4; + int ret, data; + s16 exp = -4; + + while (mantissa < BIT(15)) { + mantissa <<= 1; + if (--exp < -20) + return -10000; + } + while (mantissa > 0xffff) { + mantissa >>= 1; + if (++exp > 20) + return -10000; + } + + /* s(15,0) */ + if (mantissa <= 47104) + data = mantissa + (mantissa >> 3) + (mantissa >> 4) - 38400; + else + data = mantissa - (mantissa >> 3) - (mantissa >> 6) - 23040; + data = max_t(int, 0, data); + + ret = ((15 + exp) << 15) + data; + ret = (ret << 2) + (ret << 1) + (ret >> 6) + (ret >> 7); + return ret >> 10; +} + +static int +mt76x0_phy_get_delta_power(struct mt76x02_dev *dev, u8 tx_mode, + s8 target_power, s8 target_pa_power, + s16 ltssi) +{ + struct ieee80211_channel *chan = dev->mt76.chandef.chan; + int tssi_target = target_power << 12, tssi_slope; + int tssi_offset, tssi_db, ret; + u32 data; + u16 val; + + if (chan->band == NL80211_BAND_5GHZ) { + u8 bound[7]; + int i, err; + + err = mt76x02_eeprom_copy(dev, MT_EE_TSSI_BOUND1, bound, + sizeof(bound)); + if (err < 0) + return err; + + for (i = 0; i < ARRAY_SIZE(bound); i++) { + if (chan->hw_value <= bound[i] || !bound[i]) + break; + } + val = mt76x02_eeprom_get(dev, MT_EE_TSSI_SLOPE_5G + i * 2); + + tssi_offset = val >> 8; + if ((tssi_offset >= 64 && tssi_offset <= 127) || + (tssi_offset & BIT(7))) + tssi_offset -= BIT(8); + } else { + val = mt76x02_eeprom_get(dev, MT_EE_TSSI_SLOPE_2G); + + tssi_offset = val >> 8; + if (tssi_offset & BIT(7)) + tssi_offset -= BIT(8); + } + tssi_slope = val & 0xff; + + switch (target_pa_power) { + case 1: + if (chan->band == NL80211_BAND_2GHZ) + tssi_target += 29491; /* 3.6 * 8192 */ + /* fall through */ + case 0: + break; + default: + tssi_target += 4424; /* 0.54 * 8192 */ + break; + } + + if (!tx_mode) { + data = mt76_rr(dev, MT_BBP(CORE, 1)); + if (is_mt7630(dev) && mt76_is_mmio(dev)) { + int offset; + + /* 2.3 * 8192 or 1.5 * 8192 */ + offset = (data & BIT(5)) ? 18841 : 12288; + tssi_target += offset; + } else if (data & BIT(5)) { + /* 0.8 * 8192 */ + tssi_target += 6554; + } + } + + data = mt76_rr(dev, MT_BBP(TXBE, 4)); + switch (data & 0x3) { + case 1: + tssi_target -= 49152; /* -6db * 8192 */ + break; + case 2: + tssi_target -= 98304; /* -12db * 8192 */ + break; + case 3: + tssi_target += 49152; /* 6db * 8192 */ + break; + default: + break; + } + + tssi_db = mt76x0_phy_lin2db(ltssi - dev->cal.tssi_dc) * tssi_slope; + if (chan->band == NL80211_BAND_5GHZ) { + tssi_db += ((tssi_offset - 50) << 10); /* offset s4.3 */ + tssi_target -= tssi_db; + if (ltssi > 254 && tssi_target > 0) { + /* upper saturate */ + tssi_target = 0; + } + } else { + tssi_db += (tssi_offset << 9); /* offset s3.4 */ + tssi_target -= tssi_db; + /* upper-lower saturate */ + if ((ltssi > 126 && tssi_target > 0) || + ((ltssi - dev->cal.tssi_dc) < 1 && tssi_target < 0)) { + tssi_target = 0; + } + } + + if ((dev->cal.tssi_target ^ tssi_target) < 0 && + dev->cal.tssi_target > -4096 && dev->cal.tssi_target < 4096 && + tssi_target > -4096 && tssi_target < 4096) { + if ((tssi_target < 0 && + tssi_target + dev->cal.tssi_target > 0) || + (tssi_target > 0 && + tssi_target + dev->cal.tssi_target <= 0)) + tssi_target = 0; + else + dev->cal.tssi_target = tssi_target; + } else { + dev->cal.tssi_target = tssi_target; + } + + /* make the compensate value to the nearest compensate code */ + if (tssi_target > 0) + tssi_target += 2048; + else + tssi_target -= 2048; + tssi_target >>= 12; + + ret = mt76_get_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP); + if (ret & BIT(5)) + ret -= BIT(6); + ret += tssi_target; + + ret = min_t(int, 31, ret); + return max_t(int, -32, ret); +} + +static void mt76x0_phy_tssi_calibrate(struct mt76x02_dev *dev) +{ + s8 target_power, target_pa_power; + u8 tssi_info[3], tx_mode; + s16 ltssi; + s8 val; + + if (mt76x0_phy_tssi_adc_calibrate(dev, <ssi, tssi_info) < 0) + return; + + tx_mode = tssi_info[0] & 0x7; + if (mt76x0_phy_get_target_power(dev, tx_mode, tssi_info, + &target_power, &target_pa_power) < 0) + return; + + val = mt76x0_phy_get_delta_power(dev, tx_mode, target_power, + target_pa_power, ltssi); + mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP, val); } void mt76x0_phy_set_txpower(struct mt76x02_dev *dev) @@ -571,8 +847,8 @@ void mt76x0_phy_set_txpower(struct mt76x02_dev *dev) struct mt76_rate_power *t = &dev->mt76.rate_power; u8 info[2]; - mt76x0_get_power_info(dev, info); mt76x0_get_tx_power_per_rate(dev); + mt76x0_get_power_info(dev, info); mt76x02_add_rate_power_offset(t, info[0]); mt76x02_limit_rate_power(t, dev->mt76.txpower_conf); @@ -585,14 +861,25 @@ void mt76x0_phy_set_txpower(struct mt76x02_dev *dev) void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on) { struct ieee80211_channel *chan = dev->mt76.chandef.chan; + int is_5ghz = (chan->band == NL80211_BAND_5GHZ) ? 1 : 0; u32 val, tx_alc, reg_val; + if (is_mt7630(dev)) + return; + if (power_on) { - mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, false); - mt76x02_mcu_calibrate(dev, MCU_CAL_VCO, chan->hw_value, - false); + mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0); + mt76x02_mcu_calibrate(dev, MCU_CAL_VCO, chan->hw_value); usleep_range(10, 20); - /* XXX: tssi */ + + if (mt76x0_tssi_enabled(dev)) { + mt76_wr(dev, MT_MAC_SYS_CTRL, + MT_MAC_SYS_CTRL_ENABLE_RX); + mt76x0_phy_tssi_dc_calibrate(dev); + mt76_wr(dev, MT_MAC_SYS_CTRL, + MT_MAC_SYS_CTRL_ENABLE_TX | + MT_MAC_SYS_CTRL_ENABLE_RX); + } } tx_alc = mt76_rr(dev, MT_TX_ALC_CFG_0); @@ -602,7 +889,7 @@ void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on) reg_val = mt76_rr(dev, MT_BBP(IBI, 9)); mt76_wr(dev, MT_BBP(IBI, 9), 0xffffff7e); - if (chan->band == NL80211_BAND_5GHZ) { + if (is_5ghz) { if (chan->hw_value < 100) val = 0x701; else if (chan->hw_value < 140) @@ -613,14 +900,14 @@ void mt76x0_phy_calibrate(struct mt76x02_dev *dev, bool power_on) val = 0x600; } - mt76x02_mcu_calibrate(dev, MCU_CAL_FULL, val, false); + mt76x02_mcu_calibrate(dev, MCU_CAL_FULL, val); msleep(350); - mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 1, false); + mt76x02_mcu_calibrate(dev, MCU_CAL_LC, is_5ghz); usleep_range(15000, 20000); mt76_wr(dev, MT_BBP(IBI, 9), reg_val); mt76_wr(dev, MT_TX_ALC_CFG_0, tx_alc); - mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1, false); + mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1); } EXPORT_SYMBOL_GPL(mt76x0_phy_calibrate); @@ -684,7 +971,7 @@ int mt76x0_phy_set_channel(struct mt76x02_dev *dev, } if (mt76_is_usb(dev)) { - mt76x0_bbp_set_bw(dev, chandef->width); + mt76x0_phy_bbp_set_bw(dev, chandef->width); } else { if (chandef->width == NL80211_CHAN_WIDTH_80 || chandef->width == NL80211_CHAN_WIDTH_40) @@ -696,7 +983,6 @@ int mt76x0_phy_set_channel(struct mt76x02_dev *dev, mt76x02_phy_set_bw(dev, chandef->width, ch_group_index); mt76x02_phy_set_band(dev, chandef->chan->band, ch_group_index & 1); - mt76x0_ant_select(dev); mt76_rmw(dev, MT_EXT_CCA_CFG, (MT_EXT_CCA_CFG_CCA0 | @@ -710,29 +996,21 @@ int mt76x0_phy_set_channel(struct mt76x02_dev *dev, mt76x0_phy_set_chan_rf_params(dev, channel, rf_bw_band); /* set Japan Tx filter at channel 14 */ - val = mt76_rr(dev, MT_BBP(CORE, 1)); if (channel == 14) - val |= 0x20; + mt76_set(dev, MT_BBP(CORE, 1), 0x20); else - val &= ~0x20; - mt76_wr(dev, MT_BBP(CORE, 1), val); + mt76_clear(dev, MT_BBP(CORE, 1), 0x20); mt76x0_read_rx_gain(dev); mt76x0_phy_set_chan_bbp_params(dev, rf_bw_band); - mt76x02_init_agc_gain(dev); - - if (mt76_is_usb(dev)) { - mt76x0_vco_cal(dev, channel); - } else { - /* enable vco */ - rf_set(dev, MT_RF(0, 4), BIT(7)); - } + /* enable vco */ + mt76x0_rf_set(dev, MT_RF(0, 4), BIT(7)); if (scan) return 0; - if (mt76_is_mmio(dev)) - mt76x0_phy_calibrate(dev, false); + mt76x02_init_agc_gain(dev); + mt76x0_phy_calibrate(dev, false); mt76x0_phy_set_txpower(dev); ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work, @@ -741,55 +1019,21 @@ int mt76x0_phy_set_channel(struct mt76x02_dev *dev, return 0; } -void mt76x0_phy_recalibrate_after_assoc(struct mt76x02_dev *dev) -{ - u32 tx_alc, reg_val; - u8 channel = dev->mt76.chandef.chan->hw_value; - int is_5ghz = (dev->mt76.chandef.chan->band == NL80211_BAND_5GHZ) ? 1 : 0; - - mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, false); - - mt76x0_vco_cal(dev, channel); - - tx_alc = mt76_rr(dev, MT_TX_ALC_CFG_0); - mt76_wr(dev, MT_TX_ALC_CFG_0, 0); - usleep_range(500, 700); - - reg_val = mt76_rr(dev, MT_BBP(IBI, 9)); - mt76_wr(dev, MT_BBP(IBI, 9), 0xffffff7e); - - mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 0, false); - - mt76x02_mcu_calibrate(dev, MCU_CAL_LC, is_5ghz, false); - mt76x02_mcu_calibrate(dev, MCU_CAL_LOFT, is_5ghz, false); - mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz, false); - mt76x02_mcu_calibrate(dev, MCU_CAL_TX_GROUP_DELAY, is_5ghz, false); - mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQ, is_5ghz, false); - mt76x02_mcu_calibrate(dev, MCU_CAL_RX_GROUP_DELAY, is_5ghz, false); - - mt76_wr(dev, MT_BBP(IBI, 9), reg_val); - mt76_wr(dev, MT_TX_ALC_CFG_0, tx_alc); - msleep(100); - - mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, 1, false); -} - -static void mt76x0_temp_sensor(struct mt76x02_dev *dev) +static void mt76x0_phy_temp_sensor(struct mt76x02_dev *dev) { u8 rf_b7_73, rf_b0_66, rf_b0_67; s8 val; - rf_b7_73 = rf_rr(dev, MT_RF(7, 73)); - rf_b0_66 = rf_rr(dev, MT_RF(0, 66)); - rf_b0_67 = rf_rr(dev, MT_RF(0, 67)); + rf_b7_73 = mt76x0_rf_rr(dev, MT_RF(7, 73)); + rf_b0_66 = mt76x0_rf_rr(dev, MT_RF(0, 66)); + rf_b0_67 = mt76x0_rf_rr(dev, MT_RF(0, 67)); - rf_wr(dev, MT_RF(7, 73), 0x02); - rf_wr(dev, MT_RF(0, 66), 0x23); - rf_wr(dev, MT_RF(0, 67), 0x01); + mt76x0_rf_wr(dev, MT_RF(7, 73), 0x02); + mt76x0_rf_wr(dev, MT_RF(0, 66), 0x23); + mt76x0_rf_wr(dev, MT_RF(0, 67), 0x01); mt76_wr(dev, MT_BBP(CORE, 34), 0x00080055); - - if (!mt76_poll(dev, MT_BBP(CORE, 34), BIT(4), 0, 2000)) { + if (!mt76_poll_msec(dev, MT_BBP(CORE, 34), BIT(4), 0, 200)) { mt76_clear(dev, MT_BBP(CORE, 34), BIT(4)); goto done; } @@ -799,8 +1043,7 @@ static void mt76x0_temp_sensor(struct mt76x02_dev *dev) if (abs(val - dev->cal.temp_vco) > 20) { mt76x02_mcu_calibrate(dev, MCU_CAL_VCO, - dev->mt76.chandef.chan->hw_value, - false); + dev->mt76.chandef.chan->hw_value); dev->cal.temp_vco = val; } if (abs(val - dev->cal.temp) > 30) { @@ -809,18 +1052,20 @@ static void mt76x0_temp_sensor(struct mt76x02_dev *dev) } done: - rf_wr(dev, MT_RF(7, 73), rf_b7_73); - rf_wr(dev, MT_RF(0, 66), rf_b0_66); - rf_wr(dev, MT_RF(0, 67), rf_b0_67); + mt76x0_rf_wr(dev, MT_RF(7, 73), rf_b7_73); + mt76x0_rf_wr(dev, MT_RF(0, 66), rf_b0_66); + mt76x0_rf_wr(dev, MT_RF(0, 67), rf_b0_67); } static void mt76x0_phy_set_gain_val(struct mt76x02_dev *dev) { u8 gain = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust; - u32 val = 0x122c << 16 | 0xf2; - mt76_wr(dev, MT_BBP(AGC, 8), - val | FIELD_PREP(MT_BBP_AGC_GAIN, gain)); + mt76_rmw_field(dev, MT_BBP(AGC, 8), MT_BBP_AGC_GAIN, gain); + + if ((dev->mt76.chandef.chan->flags & IEEE80211_CHAN_RADAR) && + !is_mt7630(dev)) + mt76x02_phy_dfs_adjust_agc(dev); } static void @@ -835,7 +1080,8 @@ mt76x0_phy_update_channel_gain(struct mt76x02_dev *dev) low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) + (dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev)); - gain_change = (dev->cal.low_gain & 2) ^ (low_gain & 2); + gain_change = dev->cal.low_gain < 0 || + (dev->cal.low_gain & 2) ^ (low_gain & 2); dev->cal.low_gain = low_gain; if (!gain_change) { @@ -860,20 +1106,65 @@ static void mt76x0_phy_calibration_work(struct work_struct *work) cal_work.work); mt76x0_phy_update_channel_gain(dev); - if (!mt76x0_tssi_enabled(dev)) - mt76x0_temp_sensor(dev); + if (mt76x0_tssi_enabled(dev)) + mt76x0_phy_tssi_calibrate(dev); + else + mt76x0_phy_temp_sensor(dev); ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work, - MT_CALIBRATE_INTERVAL); + 4 * MT_CALIBRATE_INTERVAL); } -static void mt76x0_rf_init(struct mt76x02_dev *dev) +static void mt76x0_rf_patch_reg_array(struct mt76x02_dev *dev, + const struct mt76_reg_pair *rp, int len) +{ + int i; + + for (i = 0; i < len; i++) { + u32 reg = rp[i].reg; + u8 val = rp[i].value; + + switch (reg) { + case MT_RF(0, 3): + if (mt76_is_mmio(dev)) { + if (is_mt7630(dev)) + val = 0x70; + else + val = 0x63; + } else { + val = 0x73; + } + break; + case MT_RF(0, 21): + if (is_mt7610e(dev)) + val = 0x10; + else + val = 0x12; + break; + case MT_RF(5, 2): + if (is_mt7630(dev)) + val = 0x1d; + else if (is_mt7610e(dev)) + val = 0x00; + else + val = 0x0c; + break; + default: + break; + } + mt76x0_rf_wr(dev, reg, val); + } +} + +static void mt76x0_phy_rf_init(struct mt76x02_dev *dev) { int i; u8 val; - RF_RANDOM_WRITE(dev, mt76x0_rf_central_tab); - RF_RANDOM_WRITE(dev, mt76x0_rf_2g_channel_0_tab); + mt76x0_rf_patch_reg_array(dev, mt76x0_rf_central_tab, + ARRAY_SIZE(mt76x0_rf_central_tab)); + mt76x0_rf_patch_reg_array(dev, mt76x0_rf_2g_channel_0_tab, + ARRAY_SIZE(mt76x0_rf_2g_channel_0_tab)); RF_RANDOM_WRITE(dev, mt76x0_rf_5g_channel_0_tab); RF_RANDOM_WRITE(dev, mt76x0_rf_vga_channel_0_tab); @@ -881,16 +1172,16 @@ static void mt76x0_rf_init(struct mt76x02_dev *dev) const struct mt76x0_rf_switch_item *item = &mt76x0_rf_bw_switch_tab[i]; if (item->bw_band == RF_BW_20) - rf_wr(dev, item->rf_bank_reg, item->value); + mt76x0_rf_wr(dev, item->rf_bank_reg, item->value); else if (((RF_G_BAND | RF_BW_20) & item->bw_band) == (RF_G_BAND | RF_BW_20)) - rf_wr(dev, item->rf_bank_reg, item->value); + mt76x0_rf_wr(dev, item->rf_bank_reg, item->value); } for (i = 0; i < ARRAY_SIZE(mt76x0_rf_band_switch_tab); i++) { if (mt76x0_rf_band_switch_tab[i].bw_band & RF_G_BAND) { - rf_wr(dev, - mt76x0_rf_band_switch_tab[i].rf_bank_reg, - mt76x0_rf_band_switch_tab[i].value); + mt76x0_rf_wr(dev, + mt76x0_rf_band_switch_tab[i].rf_bank_reg, + mt76x0_rf_band_switch_tab[i].value); } } @@ -899,32 +1190,29 @@ static void mt76x0_rf_init(struct mt76x02_dev *dev) E1: B0.R22<6:0>: xo_cxo<6:0> E2: B0.R21<0>: xo_cxo<0>, B0.R22<7:0>: xo_cxo<8:1> */ - rf_wr(dev, MT_RF(0, 22), - min_t(u8, dev->cal.rx.freq_offset, 0xbf)); - val = rf_rr(dev, MT_RF(0, 22)); - - /* - Reset the DAC (Set B0.R73<7>=1, then set B0.R73<7>=0, and then set B0.R73<7>) during power up. + mt76x0_rf_wr(dev, MT_RF(0, 22), + min_t(u8, dev->cal.rx.freq_offset, 0xbf)); + val = mt76x0_rf_rr(dev, MT_RF(0, 22)); + + /* Reset procedure DAC during power-up: + * - set B0.R73<7> + * - clear B0.R73<7> + * - set B0.R73<7> */ - val = rf_rr(dev, MT_RF(0, 73)); - val |= 0x80; - rf_wr(dev, MT_RF(0, 73), val); - val &= ~0x80; - rf_wr(dev, MT_RF(0, 73), val); - val |= 0x80; - rf_wr(dev, MT_RF(0, 73), val); + mt76x0_rf_set(dev, MT_RF(0, 73), BIT(7)); + mt76x0_rf_clear(dev, MT_RF(0, 73), BIT(7)); + mt76x0_rf_set(dev, MT_RF(0, 73), BIT(7)); - /* - vcocal_en (initiate VCO calibration (reset after completion)) - It should be at the end of RF configuration. - */ - rf_set(dev, MT_RF(0, 4), 0x80); + /* vcocal_en: initiate VCO calibration (reset after completion)) */ + mt76x0_rf_set(dev, MT_RF(0, 4), 0x80); } void mt76x0_phy_init(struct mt76x02_dev *dev) { INIT_DELAYED_WORK(&dev->cal_work, mt76x0_phy_calibration_work); - mt76x0_rf_init(dev); + mt76x0_phy_ant_select(dev); + mt76x0_phy_rf_init(dev); mt76x02_phy_set_rxpath(dev); mt76x02_phy_set_txdac(dev); } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h index 2880a43c3cb0..9889132b768a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/phy.h @@ -30,6 +30,23 @@ #define MT_RF_BANK(offset) (offset >> 16) #define MT_RF_REG(offset) (offset & 0xff) +#define MT_RF_VCO_BP_CLOSE_LOOP BIT(3) +#define MT_RF_VCO_BP_CLOSE_LOOP_MASK GENMASK(3, 0) +#define MT_RF_VCO_CAL_MASK GENMASK(2, 0) +#define MT_RF_START_TIME 0x3 +#define MT_RF_START_TIME_MASK GENMASK(2, 0) +#define MT_RF_SETTLE_TIME_MASK GENMASK(6, 4) + +#define MT_RF_PLL_DEN_MASK GENMASK(4, 0) +#define MT_RF_PLL_K_MASK GENMASK(4, 0) +#define MT_RF_SDM_RESET_MASK BIT(7) +#define MT_RF_SDM_MASH_PRBS_MASK GENMASK(6, 2) +#define MT_RF_SDM_BP_MASK BIT(1) +#define MT_RF_ISI_ISO_MASK GENMASK(7, 6) +#define MT_RF_PFD_DLY_MASK GENMASK(5, 4) +#define MT_RF_CLK_SEL_MASK GENMASK(3, 2) +#define MT_RF_XO_DIV_MASK GENMASK(1, 0) + struct mt76x0_bbp_switch_item { u16 bw_band; struct mt76_reg_pair reg_pair; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/trace.c b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.c deleted file mode 100644 index 8abdd3cd546d..000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/trace.c +++ /dev/null @@ -1,21 +0,0 @@ -/* - * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org> - * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include <linux/module.h> - -#ifndef __CHECKER__ -#define CREATE_TRACE_POINTS -#include "trace.h" - -#endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h b/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h deleted file mode 100644 index 75d1d6738c34..000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/trace.h +++ /dev/null @@ -1,312 +0,0 @@ -/* - * Copyright (C) 2014 Felix Fietkau <nbd@openwrt.org> - * Copyright (C) 2015 Jakub Kicinski <kubakici@wp.pl> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#if !defined(__MT76X0U_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) -#define __MT76X0U_TRACE_H - -#include <linux/tracepoint.h> -#include "mt76x0.h" - -#undef TRACE_SYSTEM -#define TRACE_SYSTEM mt76x0 - -#define MAXNAME 32 -#define DEV_ENTRY __array(char, wiphy_name, 32) -#define DEV_ASSIGN strlcpy(__entry->wiphy_name, \ - wiphy_name(dev->hw->wiphy), MAXNAME) -#define DEV_PR_FMT "%s " -#define DEV_PR_ARG __entry->wiphy_name - -#define REG_ENTRY __field(u32, reg) __field(u32, val) -#define REG_ASSIGN __entry->reg = reg; __entry->val = val -#define REG_PR_FMT "%04x=%08x" -#define REG_PR_ARG __entry->reg, __entry->val - -DECLARE_EVENT_CLASS(dev_reg_evt, - TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val), - TP_ARGS(dev, reg, val), - TP_STRUCT__entry( - DEV_ENTRY - REG_ENTRY - ), - TP_fast_assign( - DEV_ASSIGN; - REG_ASSIGN; - ), - TP_printk( - DEV_PR_FMT REG_PR_FMT, - DEV_PR_ARG, REG_PR_ARG - ) -); - -DEFINE_EVENT(dev_reg_evt, mt76x0_reg_read, - TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val), - TP_ARGS(dev, reg, val) -); - -DEFINE_EVENT(dev_reg_evt, mt76x0_reg_write, - TP_PROTO(struct mt76_dev *dev, u32 reg, u32 val), - TP_ARGS(dev, reg, val) -); - -TRACE_EVENT(mt76x0_submit_urb, - TP_PROTO(struct mt76_dev *dev, struct urb *u), - TP_ARGS(dev, u), - TP_STRUCT__entry( - DEV_ENTRY __field(unsigned, pipe) __field(u32, len) - ), - TP_fast_assign( - DEV_ASSIGN; - __entry->pipe = u->pipe; - __entry->len = u->transfer_buffer_length; - ), - TP_printk(DEV_PR_FMT "p:%08x len:%u", - DEV_PR_ARG, __entry->pipe, __entry->len) -); - -#define trace_mt76x0_submit_urb_sync(__dev, __pipe, __len) ({ \ - struct urb u; \ - u.pipe = __pipe; \ - u.transfer_buffer_length = __len; \ - trace_mt76x0_submit_urb(__dev, &u); \ -}) - -TRACE_EVENT(mt76x0_mcu_msg_send, - TP_PROTO(struct mt76_dev *dev, - struct sk_buff *skb, u32 csum, bool resp), - TP_ARGS(dev, skb, csum, resp), - TP_STRUCT__entry( - DEV_ENTRY - __field(u32, info) - __field(u32, csum) - __field(bool, resp) - ), - TP_fast_assign( - DEV_ASSIGN; - __entry->info = *(u32 *)skb->data; - __entry->csum = csum; - __entry->resp = resp; - ), - TP_printk(DEV_PR_FMT "i:%08x c:%08x r:%d", - DEV_PR_ARG, __entry->info, __entry->csum, __entry->resp) -); - -TRACE_EVENT(mt76x0_vend_req, - TP_PROTO(struct mt76_dev *dev, unsigned pipe, u8 req, u8 req_type, - u16 val, u16 offset, void *buf, size_t buflen, int ret), - TP_ARGS(dev, pipe, req, req_type, val, offset, buf, buflen, ret), - TP_STRUCT__entry( - DEV_ENTRY - __field(unsigned, pipe) __field(u8, req) __field(u8, req_type) - __field(u16, val) __field(u16, offset) __field(void*, buf) - __field(int, buflen) __field(int, ret) - ), - TP_fast_assign( - DEV_ASSIGN; - __entry->pipe = pipe; - __entry->req = req; - __entry->req_type = req_type; - __entry->val = val; - __entry->offset = offset; - __entry->buf = buf; - __entry->buflen = buflen; - __entry->ret = ret; - ), - TP_printk(DEV_PR_FMT - "%d p:%08x req:%02hhx %02hhx val:%04hx %04hx buf:%d %d", - DEV_PR_ARG, __entry->ret, __entry->pipe, __entry->req, - __entry->req_type, __entry->val, __entry->offset, - !!__entry->buf, __entry->buflen) -); - -DECLARE_EVENT_CLASS(dev_rf_reg_evt, - TP_PROTO(struct mt76_dev *dev, u8 bank, u8 reg, u8 val), - TP_ARGS(dev, bank, reg, val), - TP_STRUCT__entry( - DEV_ENTRY - __field(u8, bank) - __field(u8, reg) - __field(u8, val) - ), - TP_fast_assign( - DEV_ASSIGN; - REG_ASSIGN; - __entry->bank = bank; - ), - TP_printk( - DEV_PR_FMT "%02hhx:%02hhx=%02hhx", - DEV_PR_ARG, __entry->bank, __entry->reg, __entry->val - ) -); - -DEFINE_EVENT(dev_rf_reg_evt, mt76x0_rf_read, - TP_PROTO(struct mt76_dev *dev, u8 bank, u8 reg, u8 val), - TP_ARGS(dev, bank, reg, val) -); - -DEFINE_EVENT(dev_rf_reg_evt, mt76x0_rf_write, - TP_PROTO(struct mt76_dev *dev, u8 bank, u8 reg, u8 val), - TP_ARGS(dev, bank, reg, val) -); - -DECLARE_EVENT_CLASS(dev_simple_evt, - TP_PROTO(struct mt76_dev *dev, u8 val), - TP_ARGS(dev, val), - TP_STRUCT__entry( - DEV_ENTRY - __field(u8, val) - ), - TP_fast_assign( - DEV_ASSIGN; - __entry->val = val; - ), - TP_printk( - DEV_PR_FMT "%02hhx", DEV_PR_ARG, __entry->val - ) -); - -TRACE_EVENT(mt76x0_rx, - TP_PROTO(struct mt76_dev *dev, struct mt76x02_rxwi *rxwi, u32 f), - TP_ARGS(dev, rxwi, f), - TP_STRUCT__entry( - DEV_ENTRY - __field_struct(struct mt76x02_rxwi, rxwi) - __field(u32, fce_info) - ), - TP_fast_assign( - DEV_ASSIGN; - __entry->rxwi = *rxwi; - __entry->fce_info = f; - ), - TP_printk(DEV_PR_FMT "rxi:%08x ctl:%08x", DEV_PR_ARG, - le32_to_cpu(__entry->rxwi.rxinfo), - le32_to_cpu(__entry->rxwi.ctl)) -); - -TRACE_EVENT(mt76x0_tx, - TP_PROTO(struct mt76_dev *dev, struct sk_buff *skb, - struct mt76x02_sta *sta, struct mt76x02_txwi *h), - TP_ARGS(dev, skb, sta, h), - TP_STRUCT__entry( - DEV_ENTRY - __field_struct(struct mt76x02_txwi, h) - __field(struct sk_buff *, skb) - __field(struct mt76x02_sta *, sta) - ), - TP_fast_assign( - DEV_ASSIGN; - __entry->h = *h; - __entry->skb = skb; - __entry->sta = sta; - ), - TP_printk(DEV_PR_FMT "skb:%p sta:%p flg:%04hx rate:%04hx " - "ack:%02hhx wcid:%02hhx len_ctl:%05hx", DEV_PR_ARG, - __entry->skb, __entry->sta, - le16_to_cpu(__entry->h.flags), - le16_to_cpu(__entry->h.rate), - __entry->h.ack_ctl, __entry->h.wcid, - le16_to_cpu(__entry->h.len_ctl)) -); - -TRACE_EVENT(mt76x0_tx_dma_done, - TP_PROTO(struct mt76_dev *dev, struct sk_buff *skb), - TP_ARGS(dev, skb), - TP_STRUCT__entry( - DEV_ENTRY - __field(struct sk_buff *, skb) - ), - TP_fast_assign( - DEV_ASSIGN; - __entry->skb = skb; - ), - TP_printk(DEV_PR_FMT "%p", DEV_PR_ARG, __entry->skb) -); - -TRACE_EVENT(mt76x0_tx_status_cleaned, - TP_PROTO(struct mt76_dev *dev, int cleaned), - TP_ARGS(dev, cleaned), - TP_STRUCT__entry( - DEV_ENTRY - __field(int, cleaned) - ), - TP_fast_assign( - DEV_ASSIGN; - __entry->cleaned = cleaned; - ), - TP_printk(DEV_PR_FMT "%d", DEV_PR_ARG, __entry->cleaned) -); - -TRACE_EVENT(mt76x0_tx_status, - TP_PROTO(struct mt76_dev *dev, u32 stat1, u32 stat2), - TP_ARGS(dev, stat1, stat2), - TP_STRUCT__entry( - DEV_ENTRY - __field(u32, stat1) __field(u32, stat2) - ), - TP_fast_assign( - DEV_ASSIGN; - __entry->stat1 = stat1; - __entry->stat2 = stat2; - ), - TP_printk(DEV_PR_FMT "%08x %08x", - DEV_PR_ARG, __entry->stat1, __entry->stat2) -); - -TRACE_EVENT(mt76x0_rx_dma_aggr, - TP_PROTO(struct mt76_dev *dev, int cnt, bool paged), - TP_ARGS(dev, cnt, paged), - TP_STRUCT__entry( - DEV_ENTRY - __field(u8, cnt) - __field(bool, paged) - ), - TP_fast_assign( - DEV_ASSIGN; - __entry->cnt = cnt; - __entry->paged = paged; - ), - TP_printk(DEV_PR_FMT "cnt:%d paged:%d", - DEV_PR_ARG, __entry->cnt, __entry->paged) -); - -DEFINE_EVENT(dev_simple_evt, mt76x0_set_key, - TP_PROTO(struct mt76_dev *dev, u8 val), - TP_ARGS(dev, val) -); - -TRACE_EVENT(mt76x0_set_shared_key, - TP_PROTO(struct mt76_dev *dev, u8 vid, u8 key), - TP_ARGS(dev, vid, key), - TP_STRUCT__entry( - DEV_ENTRY - __field(u8, vid) - __field(u8, key) - ), - TP_fast_assign( - DEV_ASSIGN; - __entry->vid = vid; - __entry->key = key; - ), - TP_printk(DEV_PR_FMT "phy:%02hhx off:%02hhx", - DEV_PR_ARG, __entry->vid, __entry->key) -); - -#endif - -#undef TRACE_INCLUDE_PATH -#define TRACE_INCLUDE_PATH . -#undef TRACE_INCLUDE_FILE -#define TRACE_INCLUDE_FILE trace - -#include <trace/define_trace.h> diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c index a7fd36c2f633..0e6b43bb4678 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb.c @@ -17,7 +17,6 @@ #include "mt76x0.h" #include "mcu.h" -#include "trace.h" #include "../mt76x02_usb.h" static struct usb_device_id mt76x0_device_table[] = { @@ -117,6 +116,7 @@ static int mt76x0u_start(struct ieee80211_hw *hw) if (ret) goto out; + mt76x0_phy_calibrate(dev, true); ieee80211_queue_delayed_work(dev->mt76.hw, &dev->mac_work, MT_CALIBRATE_INTERVAL); ieee80211_queue_delayed_work(dev->mt76.hw, &dev->cal_work, @@ -145,17 +145,17 @@ static const struct ieee80211_ops mt76x0u_ops = { .remove_interface = mt76x02_remove_interface, .config = mt76x0_config, .configure_filter = mt76x02_configure_filter, - .bss_info_changed = mt76x0_bss_info_changed, - .sta_add = mt76x02_sta_add, - .sta_remove = mt76x02_sta_remove, + .bss_info_changed = mt76x02_bss_info_changed, + .sta_state = mt76_sta_state, .set_key = mt76x02_set_key, .conf_tx = mt76x02_conf_tx, - .sw_scan_start = mt76x0_sw_scan, - .sw_scan_complete = mt76x0_sw_scan_complete, + .sw_scan_start = mt76x02_sw_scan, + .sw_scan_complete = mt76x02_sw_scan_complete, .ampdu_action = mt76x02_ampdu_action, .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update, - .set_rts_threshold = mt76x0_set_rts_threshold, + .set_rts_threshold = mt76x02_set_rts_threshold, .wake_tx_queue = mt76_wake_tx_queue, + .get_txpower = mt76x02_get_txpower, }; static int mt76x0u_register_device(struct mt76x02_dev *dev) @@ -218,6 +218,8 @@ static int mt76x0u_probe(struct usb_interface *usb_intf, .tx_complete_skb = mt76x02u_tx_complete_skb, .tx_status_data = mt76x02_tx_status_data, .rx_skb = mt76x02_queue_rx_skb, + .sta_add = mt76x02_sta_add, + .sta_remove = mt76x02_sta_remove, }; struct usb_device *usb_dev = interface_to_usbdev(usb_intf); struct mt76x02_dev *dev; @@ -337,6 +339,8 @@ err: } MODULE_DEVICE_TABLE(usb, mt76x0_device_table); +MODULE_FIRMWARE(MT7610E_FIRMWARE); +MODULE_FIRMWARE(MT7610U_FIRMWARE); MODULE_LICENSE("GPL"); static struct usb_driver mt76x0_driver = { diff --git a/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c index a9f14d5149d1..9d7585029df9 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x0/usb_mcu.c @@ -22,7 +22,6 @@ #define MCU_FW_URB_MAX_PAYLOAD 0x38f8 #define MCU_FW_URB_SIZE (MCU_FW_URB_MAX_PAYLOAD + 12) -#define MT7610U_FIRMWARE "mediatek/mt7610u.bin" static int mt76x0u_upload_firmware(struct mt76x02_dev *dev, @@ -75,6 +74,24 @@ out: return err; } +static int mt76x0_get_firmware(struct mt76x02_dev *dev, + const struct firmware **fw) +{ + int err; + + /* try to load mt7610e fw if available + * otherwise fall back to mt7610u one + */ + err = firmware_request_nowarn(fw, MT7610E_FIRMWARE, dev->mt76.dev); + if (err) { + dev_info(dev->mt76.dev, "%s not found, switching to %s", + MT7610E_FIRMWARE, MT7610U_FIRMWARE); + return request_firmware(fw, MT7610U_FIRMWARE, + dev->mt76.dev); + } + return 0; +} + static int mt76x0u_load_firmware(struct mt76x02_dev *dev) { const struct firmware *fw; @@ -88,7 +105,7 @@ static int mt76x0u_load_firmware(struct mt76x02_dev *dev) if (mt76x0_firmware_running(dev)) return 0; - ret = request_firmware(&fw, MT7610U_FIRMWARE, dev->mt76.dev); + ret = mt76x0_get_firmware(dev, &fw); if (ret) return ret; @@ -171,5 +188,3 @@ int mt76x0u_mcu_init(struct mt76x02_dev *dev) return 0; } - -MODULE_FIRMWARE(MT7610U_FIRMWARE); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02.h b/drivers/net/wireless/mediatek/mt76/mt76x02.h index 7806963b1905..6782665049dd 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02.h @@ -26,13 +26,7 @@ #include "mt76x02_dfs.h" #include "mt76x02_dma.h" -struct mt76x02_mac_stats { - u64 rx_stat[6]; - u64 tx_stat[6]; - u64 aggr_stat[2]; - u64 aggr_n[32]; - u64 zero_len_del[2]; -}; +#define MT_CALIBRATE_INTERVAL HZ #define MT_MAX_CHAINS 2 struct mt76x02_rx_freq_cal { @@ -63,6 +57,10 @@ struct mt76x02_calibration { bool tssi_comp_pending; bool dpd_cal_done; bool channel_cal_done; + bool gain_init_done; + + int tssi_target; + s8 tssi_dc; }; struct mt76x02_dev { @@ -82,8 +80,6 @@ struct mt76x02_dev { struct delayed_work cal_work; struct delayed_work mac_work; - struct mt76x02_mac_stats stats; - atomic_t avg_ampdu_len; u32 aggr_stats[32]; struct sk_buff *beacons[8]; @@ -109,14 +105,16 @@ struct mt76x02_dev { extern struct ieee80211_rate mt76x02_rates[12]; +void mt76x02_init_device(struct mt76x02_dev *dev); void mt76x02_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *total_flags, u64 multicast); -int mt76x02_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta); -int mt76x02_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta); +int mt76x02_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +void mt76x02_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta); +void mt76x02_config_mac_addr_list(struct mt76x02_dev *dev); void mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif, unsigned int idx); int mt76x02_add_interface(struct ieee80211_hw *hw, @@ -139,9 +137,12 @@ s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev, s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr, s8 max_txpwr_adj); void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr); +void mt76x02_set_tx_ackto(struct mt76x02_dev *dev); +void mt76x02_set_coverage_class(struct ieee80211_hw *hw, + s16 coverage_class); +int mt76x02_set_rts_threshold(struct ieee80211_hw *hw, u32 val); int mt76x02_insert_hdr_pad(struct sk_buff *skb); void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len); -void mt76x02_tx_complete(struct mt76_dev *dev, struct sk_buff *skb); bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update); void mt76x02_queue_rx_skb(struct mt76_dev *mdev, enum mt76_rxq_id q, struct sk_buff *skb); @@ -153,12 +154,24 @@ int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi, struct sk_buff *skb, struct mt76_queue *q, struct mt76_wcid *wcid, struct ieee80211_sta *sta, u32 *tx_info); +void mt76x02_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + const u8 *mac); +void mt76x02_sw_scan_complete(struct ieee80211_hw *hw, + struct ieee80211_vif *vif); +int mt76x02_get_txpower(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, int *dbm); +void mt76x02_sta_ps(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps); +void mt76x02_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, u32 changed); extern const u16 mt76x02_beacon_offsets[16]; -void mt76x02_set_beacon_offsets(struct mt76x02_dev *dev); +void mt76x02_init_beacon_config(struct mt76x02_dev *dev); void mt76x02_set_irq_mask(struct mt76x02_dev *dev, u32 clear, u32 set); void mt76x02_mac_start(struct mt76x02_dev *dev); +void mt76x02_init_debugfs(struct mt76x02_dev *dev); + static inline bool is_mt76x2(struct mt76x02_dev *dev) { return mt76_chip(&dev->mt76) == 0x7612 || diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/debugfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c index e8f8ccc0a5ed..a9d52ba1e270 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/debugfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_debugfs.c @@ -15,10 +15,10 @@ */ #include <linux/debugfs.h> -#include "mt76x2.h" +#include "mt76x02.h" static int -mt76x2_ampdu_stat_read(struct seq_file *file, void *data) +mt76x02_ampdu_stat_read(struct seq_file *file, void *data) { struct mt76x02_dev *dev = file->private; int i, j; @@ -42,9 +42,9 @@ mt76x2_ampdu_stat_read(struct seq_file *file, void *data) } static int -mt76x2_ampdu_stat_open(struct inode *inode, struct file *f) +mt76x02_ampdu_stat_open(struct inode *inode, struct file *f) { - return single_open(f, mt76x2_ampdu_stat_read, inode->i_private); + return single_open(f, mt76x02_ampdu_stat_read, inode->i_private); } static int read_txpower(struct seq_file *file, void *data) @@ -59,14 +59,14 @@ static int read_txpower(struct seq_file *file, void *data) } static const struct file_operations fops_ampdu_stat = { - .open = mt76x2_ampdu_stat_open, + .open = mt76x02_ampdu_stat_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int -mt76x2_dfs_stat_read(struct seq_file *file, void *data) +mt76x02_dfs_stat_read(struct seq_file *file, void *data) { struct mt76x02_dev *dev = file->private; struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; @@ -92,13 +92,13 @@ mt76x2_dfs_stat_read(struct seq_file *file, void *data) } static int -mt76x2_dfs_stat_open(struct inode *inode, struct file *f) +mt76x02_dfs_stat_open(struct inode *inode, struct file *f) { - return single_open(f, mt76x2_dfs_stat_read, inode->i_private); + return single_open(f, mt76x02_dfs_stat_read, inode->i_private); } static const struct file_operations fops_dfs_stat = { - .open = mt76x2_dfs_stat_open, + .open = mt76x02_dfs_stat_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, @@ -116,7 +116,7 @@ static int read_agc(struct seq_file *file, void *data) return 0; } -void mt76x2_init_debugfs(struct mt76x02_dev *dev) +void mt76x02_init_debugfs(struct mt76x02_dev *dev) { struct dentry *dir; @@ -134,4 +134,4 @@ void mt76x2_init_debugfs(struct mt76x02_dev *dev) debugfs_create_devm_seqfile(dev->mt76.dev, "agc", dir, read_agc); } -EXPORT_SYMBOL_GPL(mt76x2_init_debugfs); +EXPORT_SYMBOL_GPL(mt76x02_init_debugfs); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_dfs.c b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c index b56febae8945..054609c634a2 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_dfs.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.c @@ -14,7 +14,7 @@ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ -#include "mt76x2.h" +#include "mt76x02.h" #define RADAR_SPEC(m, len, el, eh, wl, wh, \ w_tolerance, tl, th, t_tolerance, \ @@ -151,8 +151,7 @@ static const struct mt76x02_radar_specs jp_w53_radar_specs[] = { }; static void -mt76x2_dfs_set_capture_mode_ctrl(struct mt76x02_dev *dev, - u8 enable) +mt76x02_dfs_set_capture_mode_ctrl(struct mt76x02_dev *dev, u8 enable) { u32 data; @@ -160,8 +159,8 @@ mt76x2_dfs_set_capture_mode_ctrl(struct mt76x02_dev *dev, mt76_wr(dev, MT_BBP(DFS, 36), data); } -static void mt76x2_dfs_seq_pool_put(struct mt76x02_dev *dev, - struct mt76x02_dfs_sequence *seq) +static void mt76x02_dfs_seq_pool_put(struct mt76x02_dev *dev, + struct mt76x02_dfs_sequence *seq) { struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; @@ -172,7 +171,7 @@ static void mt76x2_dfs_seq_pool_put(struct mt76x02_dev *dev, } static struct mt76x02_dfs_sequence * -mt76x2_dfs_seq_pool_get(struct mt76x02_dev *dev) +mt76x02_dfs_seq_pool_get(struct mt76x02_dev *dev) { struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; struct mt76x02_dfs_sequence *seq; @@ -192,7 +191,7 @@ mt76x2_dfs_seq_pool_get(struct mt76x02_dev *dev) return seq; } -static int mt76x2_dfs_get_multiple(int val, int frac, int margin) +static int mt76x02_dfs_get_multiple(int val, int frac, int margin) { int remainder, factor; @@ -214,7 +213,7 @@ static int mt76x2_dfs_get_multiple(int val, int frac, int margin) return factor; } -static void mt76x2_dfs_detector_reset(struct mt76x02_dev *dev) +static void mt76x02_dfs_detector_reset(struct mt76x02_dev *dev) { struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; struct mt76x02_dfs_sequence *seq, *tmp_seq; @@ -231,11 +230,11 @@ static void mt76x2_dfs_detector_reset(struct mt76x02_dev *dev) list_for_each_entry_safe(seq, tmp_seq, &dfs_pd->sequences, head) { list_del_init(&seq->head); - mt76x2_dfs_seq_pool_put(dev, seq); + mt76x02_dfs_seq_pool_put(dev, seq); } } -static bool mt76x2_dfs_check_chirp(struct mt76x02_dev *dev) +static bool mt76x02_dfs_check_chirp(struct mt76x02_dev *dev) { bool ret = false; u32 current_ts, delta_ts; @@ -256,8 +255,8 @@ static bool mt76x2_dfs_check_chirp(struct mt76x02_dev *dev) return ret; } -static void mt76x2_dfs_get_hw_pulse(struct mt76x02_dev *dev, - struct mt76x02_dfs_hw_pulse *pulse) +static void mt76x02_dfs_get_hw_pulse(struct mt76x02_dev *dev, + struct mt76x02_dfs_hw_pulse *pulse) { u32 data; @@ -276,8 +275,8 @@ static void mt76x2_dfs_get_hw_pulse(struct mt76x02_dev *dev, pulse->burst = mt76_rr(dev, MT_BBP(DFS, 22)); } -static bool mt76x2_dfs_check_hw_pulse(struct mt76x02_dev *dev, - struct mt76x02_dfs_hw_pulse *pulse) +static bool mt76x02_dfs_check_hw_pulse(struct mt76x02_dev *dev, + struct mt76x02_dfs_hw_pulse *pulse) { bool ret = false; @@ -290,7 +289,7 @@ static bool mt76x2_dfs_check_hw_pulse(struct mt76x02_dev *dev, break; if (pulse->engine == 3) { - ret = mt76x2_dfs_check_chirp(dev); + ret = mt76x02_dfs_check_chirp(dev); break; } @@ -334,7 +333,7 @@ static bool mt76x2_dfs_check_hw_pulse(struct mt76x02_dev *dev, break; if (pulse->engine == 3) { - ret = mt76x2_dfs_check_chirp(dev); + ret = mt76x02_dfs_check_chirp(dev); break; } @@ -371,8 +370,8 @@ static bool mt76x2_dfs_check_hw_pulse(struct mt76x02_dev *dev, return ret; } -static bool mt76x2_dfs_fetch_event(struct mt76x02_dev *dev, - struct mt76x02_dfs_event *event) +static bool mt76x02_dfs_fetch_event(struct mt76x02_dev *dev, + struct mt76x02_dfs_event *event) { u32 data; @@ -398,8 +397,8 @@ static bool mt76x2_dfs_fetch_event(struct mt76x02_dev *dev, return true; } -static bool mt76x2_dfs_check_event(struct mt76x02_dev *dev, - struct mt76x02_dfs_event *event) +static bool mt76x02_dfs_check_event(struct mt76x02_dev *dev, + struct mt76x02_dfs_event *event) { if (event->engine == 2) { struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; @@ -417,8 +416,8 @@ static bool mt76x2_dfs_check_event(struct mt76x02_dev *dev, return true; } -static void mt76x2_dfs_queue_event(struct mt76x02_dev *dev, - struct mt76x02_dfs_event *event) +static void mt76x02_dfs_queue_event(struct mt76x02_dev *dev, + struct mt76x02_dfs_event *event) { struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; struct mt76x02_dfs_event_rb *event_buff; @@ -435,9 +434,9 @@ static void mt76x2_dfs_queue_event(struct mt76x02_dev *dev, MT_DFS_EVENT_BUFLEN); } -static int mt76x2_dfs_create_sequence(struct mt76x02_dev *dev, - struct mt76x02_dfs_event *event, - u16 cur_len) +static int mt76x02_dfs_create_sequence(struct mt76x02_dev *dev, + struct mt76x02_dfs_event *event, + u16 cur_len) { struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; struct mt76x02_dfs_sw_detector_params *sw_params; @@ -497,7 +496,7 @@ static int mt76x2_dfs_create_sequence(struct mt76x02_dev *dev, while (j != end) { cur_event = &event_rb->data[j]; cur_pri = event->ts - cur_event->ts; - factor = mt76x2_dfs_get_multiple(cur_pri, seq.pri, + factor = mt76x02_dfs_get_multiple(cur_pri, seq.pri, sw_params->pri_margin); if (factor > 0) { seq.first_ts = cur_event->ts; @@ -509,7 +508,7 @@ static int mt76x2_dfs_create_sequence(struct mt76x02_dev *dev, if (seq.count <= cur_len) goto next; - seq_p = mt76x2_dfs_seq_pool_get(dev); + seq_p = mt76x02_dfs_seq_pool_get(dev); if (!seq_p) return -ENOMEM; @@ -522,8 +521,8 @@ next: return 0; } -static u16 mt76x2_dfs_add_event_to_sequence(struct mt76x02_dev *dev, - struct mt76x02_dfs_event *event) +static u16 mt76x02_dfs_add_event_to_sequence(struct mt76x02_dev *dev, + struct mt76x02_dfs_event *event) { struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; struct mt76x02_dfs_sw_detector_params *sw_params; @@ -535,7 +534,7 @@ static u16 mt76x2_dfs_add_event_to_sequence(struct mt76x02_dev *dev, list_for_each_entry_safe(seq, tmp_seq, &dfs_pd->sequences, head) { if (event->ts > seq->first_ts + MT_DFS_SEQUENCE_WINDOW) { list_del_init(&seq->head); - mt76x2_dfs_seq_pool_put(dev, seq); + mt76x02_dfs_seq_pool_put(dev, seq); continue; } @@ -543,8 +542,8 @@ static u16 mt76x2_dfs_add_event_to_sequence(struct mt76x02_dev *dev, continue; pri = event->ts - seq->last_ts; - factor = mt76x2_dfs_get_multiple(pri, seq->pri, - sw_params->pri_margin); + factor = mt76x02_dfs_get_multiple(pri, seq->pri, + sw_params->pri_margin); if (factor > 0) { seq->last_ts = event->ts; seq->count++; @@ -554,7 +553,7 @@ static u16 mt76x2_dfs_add_event_to_sequence(struct mt76x02_dev *dev, return max_seq_len; } -static bool mt76x2_dfs_check_detection(struct mt76x02_dev *dev) +static bool mt76x02_dfs_check_detection(struct mt76x02_dev *dev) { struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; struct mt76x02_dfs_sequence *seq; @@ -571,34 +570,34 @@ static bool mt76x2_dfs_check_detection(struct mt76x02_dev *dev) return false; } -static void mt76x2_dfs_add_events(struct mt76x02_dev *dev) +static void mt76x02_dfs_add_events(struct mt76x02_dev *dev) { struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; struct mt76x02_dfs_event event; int i, seq_len; /* disable debug mode */ - mt76x2_dfs_set_capture_mode_ctrl(dev, false); + mt76x02_dfs_set_capture_mode_ctrl(dev, false); for (i = 0; i < MT_DFS_EVENT_LOOP; i++) { - if (!mt76x2_dfs_fetch_event(dev, &event)) + if (!mt76x02_dfs_fetch_event(dev, &event)) break; if (dfs_pd->last_event_ts > event.ts) - mt76x2_dfs_detector_reset(dev); + mt76x02_dfs_detector_reset(dev); dfs_pd->last_event_ts = event.ts; - if (!mt76x2_dfs_check_event(dev, &event)) + if (!mt76x02_dfs_check_event(dev, &event)) continue; - seq_len = mt76x2_dfs_add_event_to_sequence(dev, &event); - mt76x2_dfs_create_sequence(dev, &event, seq_len); + seq_len = mt76x02_dfs_add_event_to_sequence(dev, &event); + mt76x02_dfs_create_sequence(dev, &event, seq_len); - mt76x2_dfs_queue_event(dev, &event); + mt76x02_dfs_queue_event(dev, &event); } - mt76x2_dfs_set_capture_mode_ctrl(dev, true); + mt76x02_dfs_set_capture_mode_ctrl(dev, true); } -static void mt76x2_dfs_check_event_window(struct mt76x02_dev *dev) +static void mt76x02_dfs_check_event_window(struct mt76x02_dev *dev) { struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; struct mt76x02_dfs_event_rb *event_buff; @@ -621,7 +620,7 @@ static void mt76x2_dfs_check_event_window(struct mt76x02_dev *dev) } } -static void mt76x2_dfs_tasklet(unsigned long arg) +static void mt76x02_dfs_tasklet(unsigned long arg) { struct mt76x02_dev *dev = (struct mt76x02_dev *)arg; struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; @@ -637,16 +636,16 @@ static void mt76x2_dfs_tasklet(unsigned long arg) dfs_pd->last_sw_check = jiffies; - mt76x2_dfs_add_events(dev); - radar_detected = mt76x2_dfs_check_detection(dev); + mt76x02_dfs_add_events(dev); + radar_detected = mt76x02_dfs_check_detection(dev); if (radar_detected) { /* sw detector rx radar pattern */ ieee80211_radar_detected(dev->mt76.hw); - mt76x2_dfs_detector_reset(dev); + mt76x02_dfs_detector_reset(dev); return; } - mt76x2_dfs_check_event_window(dev); + mt76x02_dfs_check_event_window(dev); } engine_mask = mt76_rr(dev, MT_BBP(DFS, 1)); @@ -660,9 +659,9 @@ static void mt76x2_dfs_tasklet(unsigned long arg) continue; pulse.engine = i; - mt76x2_dfs_get_hw_pulse(dev, &pulse); + mt76x02_dfs_get_hw_pulse(dev, &pulse); - if (!mt76x2_dfs_check_hw_pulse(dev, &pulse)) { + if (!mt76x02_dfs_check_hw_pulse(dev, &pulse)) { dfs_pd->stats[i].hw_pulse_discarded++; continue; } @@ -670,7 +669,7 @@ static void mt76x2_dfs_tasklet(unsigned long arg) /* hw detector rx radar pattern */ dfs_pd->stats[i].hw_pattern++; ieee80211_radar_detected(dev->mt76.hw); - mt76x2_dfs_detector_reset(dev); + mt76x02_dfs_detector_reset(dev); return; } @@ -682,7 +681,7 @@ out: mt76x02_irq_enable(dev, MT_INT_GPTIMER); } -static void mt76x2_dfs_init_sw_detector(struct mt76x02_dev *dev) +static void mt76x02_dfs_init_sw_detector(struct mt76x02_dev *dev) { struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; @@ -708,7 +707,7 @@ static void mt76x2_dfs_init_sw_detector(struct mt76x02_dev *dev) } } -static void mt76x2_dfs_set_bbp_params(struct mt76x02_dev *dev) +static void mt76x02_dfs_set_bbp_params(struct mt76x02_dev *dev) { const struct mt76x02_radar_specs *radar_specs; u8 i, shift; @@ -800,10 +799,10 @@ static void mt76x2_dfs_set_bbp_params(struct mt76x02_dev *dev) /* enable detection*/ mt76_wr(dev, MT_BBP(DFS, 0), MT_DFS_CH_EN << 16); - mt76_wr(dev, 0x212c, 0x0c350001); + mt76_wr(dev, MT_BBP(IBI, 11), 0x0c350001); } -void mt76x2_dfs_adjust_agc(struct mt76x02_dev *dev) +void mt76x02_phy_dfs_adjust_agc(struct mt76x02_dev *dev) { u32 agc_r8, agc_r4, val_r8, val_r4, dfs_r31; @@ -821,19 +820,27 @@ void mt76x2_dfs_adjust_agc(struct mt76x02_dev *dev) dfs_r31 = (dfs_r31 << 16) | 0x00000307; mt76_wr(dev, MT_BBP(DFS, 31), dfs_r31); - mt76_wr(dev, MT_BBP(DFS, 32), 0x00040071); + if (is_mt76x2(dev)) { + mt76_wr(dev, MT_BBP(DFS, 32), 0x00040071); + } else { + /* disable hw detector */ + mt76_wr(dev, MT_BBP(DFS, 0), 0); + /* enable hw detector */ + mt76_wr(dev, MT_BBP(DFS, 0), MT_DFS_CH_EN << 16); + } } +EXPORT_SYMBOL_GPL(mt76x02_phy_dfs_adjust_agc); -void mt76x2_dfs_init_params(struct mt76x02_dev *dev) +void mt76x02_dfs_init_params(struct mt76x02_dev *dev) { struct cfg80211_chan_def *chandef = &dev->mt76.chandef; if ((chandef->chan->flags & IEEE80211_CHAN_RADAR) && dev->dfs_pd.region != NL80211_DFS_UNSET) { - mt76x2_dfs_init_sw_detector(dev); - mt76x2_dfs_set_bbp_params(dev); + mt76x02_dfs_init_sw_detector(dev); + mt76x02_dfs_set_bbp_params(dev); /* enable debug mode */ - mt76x2_dfs_set_capture_mode_ctrl(dev, true); + mt76x02_dfs_set_capture_mode_ctrl(dev, true); mt76x02_irq_enable(dev, MT_INT_GPTIMER); mt76_rmw_field(dev, MT_INT_TIMER_EN, @@ -843,15 +850,20 @@ void mt76x2_dfs_init_params(struct mt76x02_dev *dev) mt76_wr(dev, MT_BBP(DFS, 0), 0); /* clear detector status */ mt76_wr(dev, MT_BBP(DFS, 1), 0xf); - mt76_wr(dev, 0x212c, 0); + if (mt76_chip(&dev->mt76) == 0x7610 || + mt76_chip(&dev->mt76) == 0x7630) + mt76_wr(dev, MT_BBP(IBI, 11), 0xfde8081); + else + mt76_wr(dev, MT_BBP(IBI, 11), 0); mt76x02_irq_disable(dev, MT_INT_GPTIMER); mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_GP_TIMER_EN, 0); } } +EXPORT_SYMBOL_GPL(mt76x02_dfs_init_params); -void mt76x2_dfs_init_detector(struct mt76x02_dev *dev) +void mt76x02_dfs_init_detector(struct mt76x02_dev *dev) { struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; @@ -859,20 +871,29 @@ void mt76x2_dfs_init_detector(struct mt76x02_dev *dev) INIT_LIST_HEAD(&dfs_pd->seq_pool); dfs_pd->region = NL80211_DFS_UNSET; dfs_pd->last_sw_check = jiffies; - tasklet_init(&dfs_pd->dfs_tasklet, mt76x2_dfs_tasklet, + tasklet_init(&dfs_pd->dfs_tasklet, mt76x02_dfs_tasklet, (unsigned long)dev); } -void mt76x2_dfs_set_domain(struct mt76x02_dev *dev, - enum nl80211_dfs_regions region) +static void +mt76x02_dfs_set_domain(struct mt76x02_dev *dev, + enum nl80211_dfs_regions region) { struct mt76x02_dfs_pattern_detector *dfs_pd = &dev->dfs_pd; if (dfs_pd->region != region) { tasklet_disable(&dfs_pd->dfs_tasklet); dfs_pd->region = region; - mt76x2_dfs_init_params(dev); + mt76x02_dfs_init_params(dev); tasklet_enable(&dfs_pd->dfs_tasklet); } } +void mt76x02_regd_notifier(struct wiphy *wiphy, + struct regulatory_request *request) +{ + struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); + struct mt76x02_dev *dev = hw->priv; + + mt76x02_dfs_set_domain(dev, request->dfs_region); +} diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h index 7e177c934592..70b394e17340 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_dfs.h @@ -137,4 +137,9 @@ struct mt76x02_dfs_pattern_detector { struct tasklet_struct dfs_tasklet; }; +void mt76x02_dfs_init_params(struct mt76x02_dev *dev); +void mt76x02_dfs_init_detector(struct mt76x02_dev *dev); +void mt76x02_regd_notifier(struct wiphy *wiphy, + struct regulatory_request *request); +void mt76x02_phy_dfs_adjust_agc(struct mt76x02_dev *dev); #endif /* __MT76x02_DFS_H */ diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c index 9390de2a323e..07f0496d828a 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.c @@ -53,6 +53,18 @@ mt76x02_efuse_read(struct mt76x02_dev *dev, u16 addr, u8 *data, return 0; } +int mt76x02_eeprom_copy(struct mt76x02_dev *dev, + enum mt76x02_eeprom_field field, + void *dest, int len) +{ + if (field + len > dev->mt76.eeprom.size) + return -1; + + memcpy(dest, dev->mt76.eeprom.data + field, len); + return 0; +} +EXPORT_SYMBOL_GPL(mt76x02_eeprom_copy); + int mt76x02_get_efuse_data(struct mt76x02_dev *dev, u16 base, void *buf, int len, enum mt76x02_eeprom_modes mode) { diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h index b3ec74835d10..e3442bc4e0a4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_eeprom.h @@ -25,6 +25,7 @@ enum mt76x02_eeprom_field { MT_EE_VERSION = 0x002, MT_EE_MAC_ADDR = 0x004, MT_EE_PCI_ID = 0x00A, + MT_EE_ANTENNA = 0x022, MT_EE_NIC_CONF_0 = 0x034, MT_EE_NIC_CONF_1 = 0x036, MT_EE_COUNTRY_REGION_5GHZ = 0x038, @@ -55,6 +56,7 @@ enum mt76x02_eeprom_field { #define MT_TX_POWER_GROUP_SIZE_5G 5 #define MT_TX_POWER_GROUPS_5G 6 MT_EE_TX_POWER_0_START_5G = 0x062, + MT_EE_TSSI_SLOPE_2G = 0x06e, MT_EE_TX_POWER_0_GRP3_TX_POWER_DELTA = 0x074, MT_EE_TX_POWER_0_GRP4_TSSI_SLOPE = 0x076, @@ -85,6 +87,7 @@ enum mt76x02_eeprom_field { MT_EE_TSSI_BOUND5 = 0x0dc, MT_EE_TX_POWER_BYRATE_BASE = 0x0de, + MT_EE_TSSI_SLOPE_5G = 0x0f0, MT_EE_RF_TEMP_COMP_SLOPE_5G = 0x0f2, MT_EE_RF_TEMP_COMP_SLOPE_2G = 0x0f4, @@ -104,6 +107,8 @@ enum mt76x02_eeprom_field { __MT_EE_MAX }; +#define MT_EE_ANTENNA_DUAL BIT(15) + #define MT_EE_NIC_CONF_0_RX_PATH GENMASK(3, 0) #define MT_EE_NIC_CONF_0_TX_PATH GENMASK(7, 4) #define MT_EE_NIC_CONF_0_PA_TYPE GENMASK(9, 8) @@ -118,12 +123,9 @@ enum mt76x02_eeprom_field { #define MT_EE_NIC_CONF_1_LNA_EXT_5G BIT(3) #define MT_EE_NIC_CONF_1_TX_ALC_EN BIT(13) -#define MT_EE_NIC_CONF_2_RX_STREAM GENMASK(3, 0) -#define MT_EE_NIC_CONF_2_TX_STREAM GENMASK(7, 4) -#define MT_EE_NIC_CONF_2_HW_ANTDIV BIT(8) +#define MT_EE_NIC_CONF_2_ANT_OPT BIT(3) +#define MT_EE_NIC_CONF_2_ANT_DIV BIT(4) #define MT_EE_NIC_CONF_2_XTAL_OPTION GENMASK(10, 9) -#define MT_EE_NIC_CONF_2_TEMP_DISABLE BIT(11) -#define MT_EE_NIC_CONF_2_COEX_METHOD GENMASK(15, 13) #define MT_EFUSE_USAGE_MAP_SIZE (MT_EE_USAGE_MAP_END - \ MT_EE_USAGE_MAP_START + 1) @@ -188,5 +190,8 @@ u8 mt76x02_get_lna_gain(struct mt76x02_dev *dev, s8 *lna_2g, s8 *lna_5g, struct ieee80211_channel *chan); void mt76x02_eeprom_parse_hw_cap(struct mt76x02_dev *dev); +int mt76x02_eeprom_copy(struct mt76x02_dev *dev, + enum mt76x02_eeprom_field field, + void *dest, int len); #endif /* __MT76x02_EEPROM_H */ diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c index 10578e4cb269..c08bf371e527 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.c @@ -18,7 +18,7 @@ #include "mt76x02.h" #include "mt76x02_trace.h" -enum mt76x02_cipher_type +static enum mt76x02_cipher_type mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data) { memset(key_data, 0, 32); @@ -43,7 +43,6 @@ mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data) return MT_CIPHER_NONE; } } -EXPORT_SYMBOL_GPL(mt76x02_mac_get_key_info); int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx, u8 key_idx, struct ieee80211_key_conf *key) @@ -95,7 +94,6 @@ int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx, return 0; } -EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_set_key); void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx, u8 vif_idx, u8 *mac) @@ -108,9 +106,6 @@ void mt76x02_mac_wcid_setup(struct mt76x02_dev *dev, u8 idx, mt76_wr(dev, MT_WCID_ATTR(idx), attr); - mt76_wr(dev, MT_WCID_TX_RATE(idx), 0); - mt76_wr(dev, MT_WCID_TX_RATE(idx) + 4, 0); - if (idx >= 128) return; @@ -130,31 +125,6 @@ void mt76x02_mac_wcid_set_drop(struct mt76x02_dev *dev, u8 idx, bool drop) if ((val & bit) != (bit * drop)) mt76_wr(dev, MT_WCID_DROP(idx), (val & ~bit) | (bit * drop)); } -EXPORT_SYMBOL_GPL(mt76x02_mac_wcid_set_drop); - -void mt76x02_txq_init(struct mt76x02_dev *dev, struct ieee80211_txq *txq) -{ - struct mt76_txq *mtxq; - - if (!txq) - return; - - mtxq = (struct mt76_txq *) txq->drv_priv; - if (txq->sta) { - struct mt76x02_sta *sta; - - sta = (struct mt76x02_sta *) txq->sta->drv_priv; - mtxq->wcid = &sta->wcid; - } else { - struct mt76x02_vif *mvif; - - mvif = (struct mt76x02_vif *) txq->vif->drv_priv; - mtxq->wcid = &mvif->group_wcid; - } - - mt76_txq_init(&dev->mt76, txq); -} -EXPORT_SYMBOL_GPL(mt76x02_txq_init); static __le16 mt76x02_mac_tx_rate_val(struct mt76x02_dev *dev, @@ -216,6 +186,14 @@ void mt76x02_mac_wcid_set_rate(struct mt76x02_dev *dev, struct mt76_wcid *wcid, spin_unlock_bh(&dev->mt76.lock); } +void mt76x02_mac_set_short_preamble(struct mt76x02_dev *dev, bool enable) +{ + if (enable) + mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT); + else + mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT); +} + bool mt76x02_mac_load_tx_status(struct mt76x02_dev *dev, struct mt76x02_tx_status *stat) { @@ -237,9 +215,10 @@ bool mt76x02_mac_load_tx_status(struct mt76x02_dev *dev, stat->retry = FIELD_GET(MT_TX_STAT_FIFO_EXT_RETRY, stat2); stat->pktid = FIELD_GET(MT_TX_STAT_FIFO_EXT_PKTID, stat2); + trace_mac_txstat_fetch(dev, stat); + return true; } -EXPORT_SYMBOL_GPL(mt76x02_mac_load_tx_status); static int mt76x02_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate, @@ -319,8 +298,6 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi, else txwi->wcid = 0xff; - txwi->pktid = 1; - if (wcid && wcid->sw_iv && key) { u64 pn = atomic64_inc_return(&key->tx_pn); ccmp_pn[0] = pn; @@ -366,8 +343,6 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi, txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ; if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ; - if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) - txwi->pktid |= MT_TXWI_PKTID_PROBE; if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) { u8 ba_size = IEEE80211_MIN_AMPDU_BUF; @@ -420,9 +395,6 @@ mt76x02_mac_fill_tx_status(struct mt76x02_dev *dev, info->status.ampdu_len = n_frames; info->status.ampdu_ack_len = st->success ? n_frames : 0; - if (st->pktid & MT_TXWI_PKTID_PROBE) - info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; - if (st->aggr) info->flags |= IEEE80211_TX_CTL_AMPDU | IEEE80211_TX_STAT_AMPDU; @@ -437,23 +409,40 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev, struct mt76x02_tx_status *stat, u8 *update) { struct ieee80211_tx_info info = {}; - struct ieee80211_sta *sta = NULL; + struct ieee80211_tx_status status = { + .info = &info + }; struct mt76_wcid *wcid = NULL; struct mt76x02_sta *msta = NULL; + struct mt76_dev *mdev = &dev->mt76; + struct sk_buff_head list; + + if (stat->pktid == MT_PACKET_ID_NO_ACK) + return; rcu_read_lock(); + mt76_tx_status_lock(mdev, &list); + if (stat->wcid < ARRAY_SIZE(dev->mt76.wcid)) wcid = rcu_dereference(dev->mt76.wcid[stat->wcid]); - if (wcid) { + if (wcid && wcid->sta) { void *priv; priv = msta = container_of(wcid, struct mt76x02_sta, wcid); - sta = container_of(priv, struct ieee80211_sta, - drv_priv); + status.sta = container_of(priv, struct ieee80211_sta, + drv_priv); + } + + if (wcid) { + if (stat->pktid) + status.skb = mt76_tx_status_skb_get(mdev, wcid, + stat->pktid, &list); + if (status.skb) + status.info = IEEE80211_SKB_CB(status.skb); } - if (msta && stat->aggr) { + if (msta && stat->aggr && !status.skb) { u32 stat_val, stat_cache; stat_val = stat->rate; @@ -467,25 +456,28 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev, goto out; } - mt76x02_mac_fill_tx_status(dev, &info, &msta->status, + mt76x02_mac_fill_tx_status(dev, status.info, &msta->status, msta->n_frames); msta->status = *stat; msta->n_frames = 1; *update = 0; } else { - mt76x02_mac_fill_tx_status(dev, &info, stat, 1); + mt76x02_mac_fill_tx_status(dev, status.info, stat, 1); *update = 1; } - ieee80211_tx_status_noskb(dev->mt76.hw, sta, &info); + if (status.skb) + mt76_tx_status_skb_done(mdev, status.skb, &list); + else + ieee80211_tx_status_ext(mt76_hw(dev), &status); out: + mt76_tx_status_unlock(mdev, &list); rcu_read_unlock(); } -EXPORT_SYMBOL_GPL(mt76x02_send_tx_status); -int +static int mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate) { u8 idx = FIELD_GET(MT_RXWI_RATE_INDEX, rate); @@ -551,7 +543,6 @@ mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate) return 0; } -EXPORT_SYMBOL_GPL(mt76x02_mac_process_rate); void mt76x02_mac_setaddr(struct mt76x02_dev *dev, u8 *addr) { @@ -695,8 +686,6 @@ void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq) if (!ret) break; - trace_mac_txstat_fetch(dev, &stat); - if (!irq) { mt76x02_send_tx_status(dev, &stat, &update); continue; @@ -705,33 +694,230 @@ void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq) kfifo_put(&dev->txstatus_fifo, stat); } } -EXPORT_SYMBOL_GPL(mt76x02_mac_poll_tx_status); -static void -mt76x02_mac_queue_txdone(struct mt76x02_dev *dev, struct sk_buff *skb, - void *txwi_ptr) +void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q, + struct mt76_queue_entry *e, bool flush) { - struct mt76x02_tx_info *txi = mt76x02_skb_tx_info(skb); - struct mt76x02_txwi *txwi = txwi_ptr; + struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); + struct mt76x02_txwi *txwi; + + if (!e->txwi) { + dev_kfree_skb_any(e->skb); + return; + } mt76x02_mac_poll_tx_status(dev, false); - txi->tries = 0; - txi->jiffies = jiffies; - txi->wcid = txwi->wcid; - txi->pktid = txwi->pktid; + txwi = (struct mt76x02_txwi *) &e->txwi->txwi; trace_mac_txdone_add(dev, txwi->wcid, txwi->pktid); - mt76x02_tx_complete(&dev->mt76, skb); + + mt76_tx_complete_skb(mdev, e->skb); } +EXPORT_SYMBOL_GPL(mt76x02_tx_complete_skb); -void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q, - struct mt76_queue_entry *e, bool flush) +void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, u32 val) +{ + u32 data = 0; + + if (val != ~0) + data = FIELD_PREP(MT_PROT_CFG_CTRL, 1) | + MT_PROT_CFG_RTS_THRESH; + + mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, val); + + mt76_rmw(dev, MT_CCK_PROT_CFG, + MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); + mt76_rmw(dev, MT_OFDM_PROT_CFG, + MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); + mt76_rmw(dev, MT_MM20_PROT_CFG, + MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); + mt76_rmw(dev, MT_MM40_PROT_CFG, + MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); + mt76_rmw(dev, MT_GF20_PROT_CFG, + MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); + mt76_rmw(dev, MT_GF40_PROT_CFG, + MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); + mt76_rmw(dev, MT_TX_PROT_CFG6, + MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); + mt76_rmw(dev, MT_TX_PROT_CFG7, + MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); + mt76_rmw(dev, MT_TX_PROT_CFG8, + MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); +} + +void mt76x02_update_channel(struct mt76_dev *mdev) { struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); + struct mt76_channel_state *state; + u32 active, busy; + + state = mt76_channel_state(&dev->mt76, dev->mt76.chandef.chan); + + busy = mt76_rr(dev, MT_CH_BUSY); + active = busy + mt76_rr(dev, MT_CH_IDLE); + + spin_lock_bh(&dev->mt76.cc_lock); + state->cc_busy += busy; + state->cc_active += active; + spin_unlock_bh(&dev->mt76.cc_lock); +} +EXPORT_SYMBOL_GPL(mt76x02_update_channel); + +static void mt76x02_check_mac_err(struct mt76x02_dev *dev) +{ + u32 val = mt76_rr(dev, 0x10f4); + + if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5)))) + return; + + dev_err(dev->mt76.dev, "mac specific condition occurred\n"); + + mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR); + udelay(10); + mt76_clear(dev, MT_MAC_SYS_CTRL, + MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX); +} + +void mt76x02_mac_work(struct work_struct *work) +{ + struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev, + mac_work.work); + int i, idx; + + mt76x02_update_channel(&dev->mt76); + for (i = 0, idx = 0; i < 16; i++) { + u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i)); - if (e->txwi) - mt76x02_mac_queue_txdone(dev, e->skb, &e->txwi->txwi); + dev->aggr_stats[idx++] += val & 0xffff; + dev->aggr_stats[idx++] += val >> 16; + } + + /* XXX: check beacon stuck for ap mode */ + if (!dev->beacon_mask) + mt76x02_check_mac_err(dev); + + mt76_tx_status_check(&dev->mt76, NULL, false); + + ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work, + MT_CALIBRATE_INTERVAL); +} + +void mt76x02_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr) +{ + idx &= 7; + mt76_wr(dev, MT_MAC_APC_BSSID_L(idx), get_unaligned_le32(addr)); + mt76_rmw_field(dev, MT_MAC_APC_BSSID_H(idx), MT_MAC_APC_BSSID_H_ADDR, + get_unaligned_le16(addr + 4)); +} + +static int +mt76x02_write_beacon(struct mt76x02_dev *dev, int offset, struct sk_buff *skb) +{ + int beacon_len = mt76x02_beacon_offsets[1] - mt76x02_beacon_offsets[0]; + struct mt76x02_txwi txwi; + + if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x02_txwi))) + return -ENOSPC; + + mt76x02_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len); + + mt76_wr_copy(dev, offset, &txwi, sizeof(txwi)); + offset += sizeof(txwi); + + mt76_wr_copy(dev, offset, skb->data, skb->len); + return 0; +} + +static int +__mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 bcn_idx, + struct sk_buff *skb) +{ + int beacon_len = mt76x02_beacon_offsets[1] - mt76x02_beacon_offsets[0]; + int beacon_addr = mt76x02_beacon_offsets[bcn_idx]; + int ret = 0; + int i; + + /* Prevent corrupt transmissions during update */ + mt76_set(dev, MT_BCN_BYPASS_MASK, BIT(bcn_idx)); + + if (skb) { + ret = mt76x02_write_beacon(dev, beacon_addr, skb); + if (!ret) + dev->beacon_data_mask |= BIT(bcn_idx); + } else { + dev->beacon_data_mask &= ~BIT(bcn_idx); + for (i = 0; i < beacon_len; i += 4) + mt76_wr(dev, beacon_addr + i, 0); + } + + mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xff00 | ~dev->beacon_data_mask); + + return ret; +} + +int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx, + struct sk_buff *skb) +{ + bool force_update = false; + int bcn_idx = 0; + int i; + + for (i = 0; i < ARRAY_SIZE(dev->beacons); i++) { + if (vif_idx == i) { + force_update = !!dev->beacons[i] ^ !!skb; + + if (dev->beacons[i]) + dev_kfree_skb(dev->beacons[i]); + + dev->beacons[i] = skb; + __mt76x02_mac_set_beacon(dev, bcn_idx, skb); + } else if (force_update && dev->beacons[i]) { + __mt76x02_mac_set_beacon(dev, bcn_idx, + dev->beacons[i]); + } + + bcn_idx += !!dev->beacons[i]; + } + + for (i = bcn_idx; i < ARRAY_SIZE(dev->beacons); i++) { + if (!(dev->beacon_data_mask & BIT(i))) + break; + + __mt76x02_mac_set_beacon(dev, i, NULL); + } + + mt76_rmw_field(dev, MT_MAC_BSSID_DW1, MT_MAC_BSSID_DW1_MBEACON_N, + bcn_idx - 1); + return 0; +} + +void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev, + u8 vif_idx, bool val) +{ + u8 old_mask = dev->beacon_mask; + bool en; + u32 reg; + + if (val) { + dev->beacon_mask |= BIT(vif_idx); + } else { + dev->beacon_mask &= ~BIT(vif_idx); + mt76x02_mac_set_beacon(dev, vif_idx, NULL); + } + + if (!!old_mask == !!dev->beacon_mask) + return; + + en = dev->beacon_mask; + + mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en); + reg = MT_BEACON_TIME_CFG_BEACON_TX | + MT_BEACON_TIME_CFG_TBTT_EN | + MT_BEACON_TIME_CFG_TIMER_EN; + mt76_rmw(dev, MT_BEACON_TIME_CFG, reg, reg * en); + + if (en) + mt76x02_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT); else - dev_kfree_skb_any(e->skb); + mt76x02_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT); } -EXPORT_SYMBOL_GPL(mt76x02_tx_complete_skb); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h index d99c18743969..4e597004c445 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mac.h @@ -37,18 +37,8 @@ struct mt76x02_tx_status { #define MT_MAX_VIFS 8 struct mt76x02_vif { + struct mt76_wcid group_wcid; /* must be first */ u8 idx; - - struct mt76_wcid group_wcid; -}; - -struct mt76x02_tx_info { - unsigned long jiffies; - u8 tries; - - u8 wcid; - u8 pktid; - u8 retry; }; DECLARE_EWMA(signal, 10, 8); @@ -153,8 +143,6 @@ enum mt76x2_phy_bandwidth { #define MT_TXWI_ACK_CTL_NSEQ BIT(1) #define MT_TXWI_ACK_CTL_BA_WINDOW GENMASK(7, 2) -#define MT_TXWI_PKTID_PROBE BIT(7) - struct mt76x02_txwi { __le16 flags; __le16 rate; @@ -190,18 +178,7 @@ static inline bool mt76x02_wait_for_mac(struct mt76_dev *dev) return false; } -static inline struct mt76x02_tx_info * -mt76x02_skb_tx_info(struct sk_buff *skb) -{ - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - - return (void *)info->status.status_driver_data; -} - -void mt76x02_txq_init(struct mt76x02_dev *dev, struct ieee80211_txq *txq); -enum mt76x02_cipher_type -mt76x02_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data); - +void mt76x02_mac_set_short_preamble(struct mt76x02_dev *dev, bool enable); int mt76x02_mac_shared_key_setup(struct mt76x02_dev *dev, u8 vif_idx, u8 key_idx, struct ieee80211_key_conf *key); int mt76x02_mac_wcid_set_key(struct mt76x02_dev *dev, u8 idx, @@ -217,8 +194,7 @@ void mt76x02_send_tx_status(struct mt76x02_dev *dev, struct mt76x02_tx_status *stat, u8 *update); int mt76x02_mac_process_rx(struct mt76x02_dev *dev, struct sk_buff *skb, void *rxi); -int -mt76x02_mac_process_rate(struct mt76_rx_status *status, u16 rate); +void mt76x02_mac_set_tx_protection(struct mt76x02_dev *dev, u32 val); void mt76x02_mac_setaddr(struct mt76x02_dev *dev, u8 *addr); void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi, struct sk_buff *skb, struct mt76_wcid *wcid, @@ -226,4 +202,12 @@ void mt76x02_mac_write_txwi(struct mt76x02_dev *dev, struct mt76x02_txwi *txwi, void mt76x02_mac_poll_tx_status(struct mt76x02_dev *dev, bool irq); void mt76x02_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q, struct mt76_queue_entry *e, bool flush); +void mt76x02_update_channel(struct mt76_dev *mdev); +void mt76x02_mac_work(struct work_struct *work); + +void mt76x02_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr); +int mt76x02_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx, + struct sk_buff *skb); +void mt76x02_mac_set_beacon_enable(struct mt76x02_dev *dev, u8 vif_idx, + bool val); #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c index 1b853bb723fb..b7f4edb729e3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.c @@ -21,7 +21,7 @@ #include "mt76x02_mcu.h" -struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len) +static struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len) { struct sk_buff *skb; @@ -32,7 +32,6 @@ struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len) return skb; } -EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_alloc); static struct sk_buff * mt76x02_mcu_get_response(struct mt76x02_dev *dev, unsigned long expires) @@ -80,16 +79,18 @@ mt76x02_tx_queue_mcu(struct mt76x02_dev *dev, enum mt76_txq_id qid, return 0; } -int mt76x02_mcu_msg_send(struct mt76_dev *mdev, struct sk_buff *skb, - int cmd, bool wait_resp) +int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data, + int len, bool wait_resp) { struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); unsigned long expires = jiffies + HZ; + struct sk_buff *skb; int ret; u8 seq; + skb = mt76x02_mcu_msg_alloc(data, len); if (!skb) - return -EINVAL; + return -ENOMEM; mutex_lock(&mdev->mmio.mcu.mutex); @@ -131,11 +132,9 @@ out: } EXPORT_SYMBOL_GPL(mt76x02_mcu_msg_send); -int mt76x02_mcu_function_select(struct mt76x02_dev *dev, - enum mcu_function func, - u32 val, bool wait_resp) +int mt76x02_mcu_function_select(struct mt76x02_dev *dev, enum mcu_function func, + u32 val) { - struct sk_buff *skb; struct { __le32 id; __le32 value; @@ -143,16 +142,17 @@ int mt76x02_mcu_function_select(struct mt76x02_dev *dev, .id = cpu_to_le32(func), .value = cpu_to_le32(val), }; + bool wait = false; - skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg)); - return mt76_mcu_send_msg(dev, skb, CMD_FUN_SET_OP, wait_resp); + if (func != Q_SELECT) + wait = true; + + return mt76_mcu_send_msg(dev, CMD_FUN_SET_OP, &msg, sizeof(msg), wait); } EXPORT_SYMBOL_GPL(mt76x02_mcu_function_select); -int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on, - bool wait_resp) +int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on) { - struct sk_buff *skb; struct { __le32 mode; __le32 level; @@ -161,15 +161,12 @@ int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on, .level = cpu_to_le32(0), }; - skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg)); - return mt76_mcu_send_msg(dev, skb, CMD_POWER_SAVING_OP, wait_resp); + return mt76_mcu_send_msg(dev, CMD_POWER_SAVING_OP, &msg, sizeof(msg), false); } EXPORT_SYMBOL_GPL(mt76x02_mcu_set_radio_state); -int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type, - u32 param, bool wait) +int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type, u32 param) { - struct sk_buff *skb; struct { __le32 id; __le32 value; @@ -177,17 +174,18 @@ int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type, .id = cpu_to_le32(type), .value = cpu_to_le32(param), }; + bool is_mt76x2e = mt76_is_mmio(dev) && is_mt76x2(dev); int ret; - if (wait) + if (is_mt76x2e) mt76_rmw(dev, MT_MCU_COM_REG0, BIT(31), 0); - skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg)); - ret = mt76_mcu_send_msg(dev, skb, CMD_CALIBRATION_OP, true); + ret = mt76_mcu_send_msg(dev, CMD_CALIBRATION_OP, &msg, sizeof(msg), + true); if (ret) return ret; - if (wait && + if (is_mt76x2e && WARN_ON(!mt76_poll_msec(dev, MT_MCU_COM_REG0, BIT(31), BIT(31), 100))) return -ETIMEDOUT; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h index 2d8fd2514570..7e4004120102 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mcu.h @@ -97,16 +97,12 @@ struct mt76x02_patch_header { }; int mt76x02_mcu_cleanup(struct mt76x02_dev *dev); -int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type, - u32 param, bool wait); -struct sk_buff *mt76x02_mcu_msg_alloc(const void *data, int len); -int mt76x02_mcu_msg_send(struct mt76_dev *mdev, struct sk_buff *skb, - int cmd, bool wait_resp); -int mt76x02_mcu_function_select(struct mt76x02_dev *dev, - enum mcu_function func, - u32 val, bool wait_resp); -int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on, - bool wait_resp); +int mt76x02_mcu_calibrate(struct mt76x02_dev *dev, int type, u32 param); +int mt76x02_mcu_msg_send(struct mt76_dev *mdev, int cmd, const void *data, + int len, bool wait_resp); +int mt76x02_mcu_function_select(struct mt76x02_dev *dev, enum mcu_function func, + u32 val); +int mt76x02_mcu_set_radio_state(struct mt76x02_dev *dev, bool on); void mt76x02_set_ethtool_fwver(struct mt76x02_dev *dev, const struct mt76x02_fw_header *h); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c index 39f092034240..66315410aebe 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_mmio.c @@ -21,6 +21,130 @@ #include "mt76x02.h" #include "mt76x02_trace.h" +struct beacon_bc_data { + struct mt76x02_dev *dev; + struct sk_buff_head q; + struct sk_buff *tail[8]; +}; + +static void +mt76x02_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) +{ + struct mt76x02_dev *dev = (struct mt76x02_dev *)priv; + struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; + struct sk_buff *skb = NULL; + + if (!(dev->beacon_mask & BIT(mvif->idx))) + return; + + skb = ieee80211_beacon_get(mt76_hw(dev), vif); + if (!skb) + return; + + mt76x02_mac_set_beacon(dev, mvif->idx, skb); +} + +static void +mt76x02_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif) +{ + struct beacon_bc_data *data = priv; + struct mt76x02_dev *dev = data->dev; + struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; + struct ieee80211_tx_info *info; + struct sk_buff *skb; + + if (!(dev->beacon_mask & BIT(mvif->idx))) + return; + + skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif); + if (!skb) + return; + + info = IEEE80211_SKB_CB(skb); + info->control.vif = vif; + info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; + mt76_skb_set_moredata(skb, true); + __skb_queue_tail(&data->q, skb); + data->tail[mvif->idx] = skb; +} + +static void +mt76x02_resync_beacon_timer(struct mt76x02_dev *dev) +{ + u32 timer_val = dev->beacon_int << 4; + + dev->tbtt_count++; + + /* + * Beacon timer drifts by 1us every tick, the timer is configured + * in 1/16 TU (64us) units. + */ + if (dev->tbtt_count < 62) + return; + + if (dev->tbtt_count >= 64) { + dev->tbtt_count = 0; + return; + } + + /* + * The updated beacon interval takes effect after two TBTT, because + * at this point the original interval has already been loaded into + * the next TBTT_TIMER value + */ + if (dev->tbtt_count == 62) + timer_val -= 1; + + mt76_rmw_field(dev, MT_BEACON_TIME_CFG, + MT_BEACON_TIME_CFG_INTVAL, timer_val); +} + +static void mt76x02_pre_tbtt_tasklet(unsigned long arg) +{ + struct mt76x02_dev *dev = (struct mt76x02_dev *)arg; + struct mt76_queue *q = &dev->mt76.q_tx[MT_TXQ_PSD]; + struct beacon_bc_data data = {}; + struct sk_buff *skb; + int i, nframes; + + mt76x02_resync_beacon_timer(dev); + + data.dev = dev; + __skb_queue_head_init(&data.q); + + ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), + IEEE80211_IFACE_ITER_RESUME_ALL, + mt76x02_update_beacon_iter, dev); + + do { + nframes = skb_queue_len(&data.q); + ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), + IEEE80211_IFACE_ITER_RESUME_ALL, + mt76x02_add_buffered_bc, &data); + } while (nframes != skb_queue_len(&data.q)); + + if (!nframes) + return; + + for (i = 0; i < ARRAY_SIZE(data.tail); i++) { + if (!data.tail[i]) + continue; + + mt76_skb_set_moredata(data.tail[i], false); + } + + spin_lock_bh(&q->lock); + while ((skb = __skb_dequeue(&data.q)) != NULL) { + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct ieee80211_vif *vif = info->control.vif; + struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; + + mt76_dma_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid, + NULL); + } + spin_unlock_bh(&q->lock); +} + static int mt76x02_init_tx_queue(struct mt76x02_dev *dev, struct mt76_queue *q, int idx, int n_desc) @@ -98,6 +222,9 @@ int mt76x02_dma_init(struct mt76x02_dev *dev) return -ENOMEM; tasklet_init(&dev->tx_tasklet, mt76x02_tx_tasklet, (unsigned long) dev); + tasklet_init(&dev->pre_tbtt_tasklet, mt76x02_pre_tbtt_tasklet, + (unsigned long)dev); + kfifo_init(&dev->txstatus_fifo, status_fifo, fifo_size); mt76_dma_attach(&dev->mt76); @@ -225,7 +352,6 @@ static void mt76x02_dma_enable(struct mt76x02_dev *dev) mt76_clear(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_WRITEBACK_DONE); } -EXPORT_SYMBOL_GPL(mt76x02_dma_enable); void mt76x02_dma_cleanup(struct mt76x02_dev *dev) { diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c index 0f1d7b5c9f68..977a8e7e26df 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_phy.c @@ -254,5 +254,6 @@ void mt76x02_init_agc_gain(struct mt76x02_dev *dev) memcpy(dev->cal.agc_gain_cur, dev->cal.agc_gain_init, sizeof(dev->cal.agc_gain_cur)); dev->cal.low_gain = -1; + dev->cal.gain_init_done = true; } EXPORT_SYMBOL_GPL(mt76x02_init_agc_gain); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c index d3de08872d6e..4598cb2cc3ff 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_txrx.c @@ -22,6 +22,7 @@ void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb) { + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct mt76x02_dev *dev = hw->priv; struct ieee80211_vif *vif = info->control.vif; @@ -33,7 +34,8 @@ void mt76x02_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, msta = (struct mt76x02_sta *)control->sta->drv_priv; wcid = &msta->wcid; /* sw encrypted frames */ - if (!info->control.hw_key && wcid->hw_key_idx != 0xff) + if (!info->control.hw_key && wcid->hw_key_idx != 0xff && + ieee80211_has_protected(hdr->frame_control)) control->sta = NULL; } @@ -110,7 +112,6 @@ s8 mt76x02_tx_get_max_txpwr_adj(struct mt76x02_dev *dev, return max_txpwr; } -EXPORT_SYMBOL_GPL(mt76x02_tx_get_max_txpwr_adj); s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr, s8 max_txpwr_adj) { @@ -125,7 +126,6 @@ s8 mt76x02_tx_get_txpwr_adj(struct mt76x02_dev *dev, s8 txpwr, s8 max_txpwr_adj) else return (txpwr < -16) ? 8 : (txpwr + 32) / 2; } -EXPORT_SYMBOL_GPL(mt76x02_tx_get_txpwr_adj); void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr) { @@ -140,21 +140,6 @@ void mt76x02_tx_set_txpwr_auto(struct mt76x02_dev *dev, s8 txpwr) } EXPORT_SYMBOL_GPL(mt76x02_tx_set_txpwr_auto); -void mt76x02_tx_complete(struct mt76_dev *dev, struct sk_buff *skb) -{ - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - - if (info->flags & IEEE80211_TX_CTL_AMPDU) { - ieee80211_free_txskb(dev->hw, skb); - } else { - ieee80211_tx_info_clear_status(info); - info->status.rates[0].idx = -1; - info->flags |= IEEE80211_TX_STAT_ACK; - ieee80211_tx_status(dev->hw, skb); - } -} -EXPORT_SYMBOL_GPL(mt76x02_tx_complete); - bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update) { struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); @@ -169,14 +154,15 @@ bool mt76x02_tx_status_data(struct mt76_dev *mdev, u8 *update) } EXPORT_SYMBOL_GPL(mt76x02_tx_status_data); -int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi, +int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi_ptr, struct sk_buff *skb, struct mt76_queue *q, struct mt76_wcid *wcid, struct ieee80211_sta *sta, u32 *tx_info) { struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct mt76x02_txwi *txwi = txwi_ptr; int qsel = MT_QSEL_EDCA; + int pid; int ret; if (q == &dev->mt76.q_tx[MT_TXQ_PSD] && wcid && wcid->idx < 128) @@ -184,11 +170,14 @@ int mt76x02_tx_prepare_skb(struct mt76_dev *mdev, void *txwi, mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, skb->len); + pid = mt76_tx_status_skb_add(mdev, wcid, skb); + txwi->pktid = pid; + ret = mt76x02_insert_hdr_pad(skb); if (ret < 0) return ret; - if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) + if (pid && pid != MT_PACKET_ID_NO_ACK) qsel = MT_QSEL_MGMT; *tx_info = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) | diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c index dc2226c722dd..81970cf777c0 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_core.c @@ -30,7 +30,7 @@ void mt76x02u_tx_complete_skb(struct mt76_dev *mdev, struct mt76_queue *q, struct mt76_queue_entry *e, bool flush) { mt76x02u_remove_dma_hdr(e->skb); - mt76x02_tx_complete(mdev, e->skb); + mt76_tx_complete_skb(mdev, e->skb); } EXPORT_SYMBOL_GPL(mt76x02u_tx_complete_skb); @@ -67,27 +67,6 @@ int mt76x02u_skb_dma_info(struct sk_buff *skb, int port, u32 flags) return 0; } -static int -mt76x02u_set_txinfo(struct sk_buff *skb, struct mt76_wcid *wcid, u8 ep) -{ - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - enum mt76_qsel qsel; - u32 flags; - - if ((info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) || - ep == MT_EP_OUT_HCCA) - qsel = MT_QSEL_MGMT; - else - qsel = MT_QSEL_EDCA; - - flags = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) | - MT_TXD_INFO_80211; - if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv) - flags |= MT_TXD_INFO_WIV; - - return mt76x02u_skb_dma_info(skb, WLAN_PORT, flags); -} - int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data, struct sk_buff *skb, struct mt76_queue *q, struct mt76_wcid *wcid, struct ieee80211_sta *sta, @@ -95,13 +74,30 @@ int mt76x02u_tx_prepare_skb(struct mt76_dev *mdev, void *data, { struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); struct mt76x02_txwi *txwi; + enum mt76_qsel qsel; int len = skb->len; + u32 flags; + int pid; mt76x02_insert_hdr_pad(skb); txwi = skb_push(skb, sizeof(struct mt76x02_txwi)); mt76x02_mac_write_txwi(dev, txwi, skb, wcid, sta, len); - return mt76x02u_set_txinfo(skb, wcid, q2ep(q->hw_idx)); + pid = mt76_tx_status_skb_add(mdev, wcid, skb); + txwi->pktid = pid; + + if ((pid && pid != MT_PACKET_ID_NO_ACK) || + q2ep(q->hw_idx) == MT_EP_OUT_HCCA) + qsel = MT_QSEL_MGMT; + else + qsel = MT_QSEL_EDCA; + + flags = FIELD_PREP(MT_TXD_INFO_QSEL, qsel) | + MT_TXD_INFO_80211; + if (!wcid || wcid->hw_key_idx == 0xff || wcid->sw_iv) + flags |= MT_TXD_INFO_WIV; + + return mt76x02u_skb_dma_info(skb, WLAN_PORT, flags); } EXPORT_SYMBOL_GPL(mt76x02u_tx_prepare_skb); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c index da299b8a1334..6db789f90269 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_usb_mcu.c @@ -129,9 +129,6 @@ __mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb, u8 seq = 0; u32 info; - if (!skb) - return -EINVAL; - if (test_bit(MT76_REMOVED, &dev->state)) return 0; @@ -162,12 +159,17 @@ __mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb, } static int -mt76x02u_mcu_send_msg(struct mt76_dev *dev, struct sk_buff *skb, - int cmd, bool wait_resp) +mt76x02u_mcu_send_msg(struct mt76_dev *dev, int cmd, const void *data, + int len, bool wait_resp) { struct mt76_usb *usb = &dev->usb; + struct sk_buff *skb; int err; + skb = mt76x02u_mcu_msg_alloc(data, len); + if (!skb) + return -ENOMEM; + mutex_lock(&usb->mcu.mutex); err = __mt76x02u_mcu_send_msg(dev, skb, cmd, wait_resp); mutex_unlock(&usb->mcu.mutex); @@ -186,6 +188,7 @@ mt76x02u_mcu_wr_rp(struct mt76_dev *dev, u32 base, { const int CMD_RANDOM_WRITE = 12; const int max_vals_per_cmd = MT_INBAND_PACKET_MAX_LEN / 8; + struct mt76_usb *usb = &dev->usb; struct sk_buff *skb; int cnt, i, ret; @@ -204,7 +207,9 @@ mt76x02u_mcu_wr_rp(struct mt76_dev *dev, u32 base, skb_put_le32(skb, data[i].value); } - ret = mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_WRITE, cnt == n); + mutex_lock(&usb->mcu.mutex); + ret = __mt76x02u_mcu_send_msg(dev, skb, CMD_RANDOM_WRITE, cnt == n); + mutex_unlock(&usb->mcu.mutex); if (ret) return ret; @@ -345,7 +350,6 @@ EXPORT_SYMBOL_GPL(mt76x02u_mcu_fw_send_data); void mt76x02u_init_mcu(struct mt76_dev *dev) { static const struct mt76_mcu_ops mt76x02u_mcu_ops = { - .mcu_msg_alloc = mt76x02u_mcu_msg_alloc, .mcu_send_msg = mt76x02u_mcu_send_msg, .mcu_wr_rp = mt76x02u_mcu_wr_rp, .mcu_rd_rp = mt76x02u_mcu_rd_rp, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c index ca05332f81fc..38bd466cff16 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x02_util.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x02_util.c @@ -47,6 +47,92 @@ struct ieee80211_rate mt76x02_rates[] = { }; EXPORT_SYMBOL_GPL(mt76x02_rates); +static const struct ieee80211_iface_limit mt76x02_if_limits[] = { + { + .max = 1, + .types = BIT(NL80211_IFTYPE_ADHOC) + }, { + .max = 8, + .types = BIT(NL80211_IFTYPE_STATION) | +#ifdef CONFIG_MAC80211_MESH + BIT(NL80211_IFTYPE_MESH_POINT) | +#endif + BIT(NL80211_IFTYPE_AP) + }, +}; + +static const struct ieee80211_iface_combination mt76x02_if_comb[] = { + { + .limits = mt76x02_if_limits, + .n_limits = ARRAY_SIZE(mt76x02_if_limits), + .max_interfaces = 8, + .num_different_channels = 1, + .beacon_int_infra_match = true, + .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | + BIT(NL80211_CHAN_WIDTH_20) | + BIT(NL80211_CHAN_WIDTH_40) | + BIT(NL80211_CHAN_WIDTH_80), + } +}; + +void mt76x02_init_device(struct mt76x02_dev *dev) +{ + struct ieee80211_hw *hw = mt76_hw(dev); + struct wiphy *wiphy = hw->wiphy; + + INIT_DELAYED_WORK(&dev->mac_work, mt76x02_mac_work); + + hw->queues = 4; + hw->max_rates = 1; + hw->max_report_rates = 7; + hw->max_rate_tries = 1; + hw->extra_tx_headroom = 2; + + if (mt76_is_usb(dev)) { + hw->extra_tx_headroom += sizeof(struct mt76x02_txwi) + + MT_DMA_HDR_LEN; + wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); + } else { + mt76x02_dfs_init_detector(dev); + + wiphy->reg_notifier = mt76x02_regd_notifier; + wiphy->iface_combinations = mt76x02_if_comb; + wiphy->n_iface_combinations = ARRAY_SIZE(mt76x02_if_comb); + wiphy->interface_modes = + BIT(NL80211_IFTYPE_STATION) | + BIT(NL80211_IFTYPE_AP) | +#ifdef CONFIG_MAC80211_MESH + BIT(NL80211_IFTYPE_MESH_POINT) | +#endif + BIT(NL80211_IFTYPE_ADHOC); + + wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS); + } + + hw->sta_data_size = sizeof(struct mt76x02_sta); + hw->vif_data_size = sizeof(struct mt76x02_vif); + + ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES); + ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); + + dev->mt76.global_wcid.idx = 255; + dev->mt76.global_wcid.hw_key_idx = -1; + dev->slottime = 9; + + if (is_mt76x2(dev)) { + dev->mt76.sband_2g.sband.ht_cap.cap |= + IEEE80211_HT_CAP_LDPC_CODING; + dev->mt76.sband_5g.sband.ht_cap.cap |= + IEEE80211_HT_CAP_LDPC_CODING; + dev->mt76.chainmask = 0x202; + dev->mt76.antenna_mask = 3; + } else { + dev->mt76.chainmask = 0x101; + dev->mt76.antenna_mask = 1; + } +} +EXPORT_SYMBOL_GPL(mt76x02_init_device); + void mt76x02_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *total_flags, u64 multicast) @@ -81,23 +167,17 @@ void mt76x02_configure_filter(struct ieee80211_hw *hw, } EXPORT_SYMBOL_GPL(mt76x02_configure_filter); -int mt76x02_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, +int mt76x02_sta_add(struct mt76_dev *mdev, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { - struct mt76x02_dev *dev = hw->priv; + struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv; struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; - int ret = 0; int idx = 0; - int i; - - mutex_lock(&dev->mt76.mutex); idx = mt76_wcid_alloc(dev->mt76.wcid_mask, ARRAY_SIZE(dev->mt76.wcid)); - if (idx < 0) { - ret = -ENOSPC; - goto out; - } + if (idx < 0) + return -ENOSPC; msta->vif = mvif; msta->wcid.sta = 1; @@ -105,41 +185,25 @@ int mt76x02_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, msta->wcid.hw_key_idx = -1; mt76x02_mac_wcid_setup(dev, idx, mvif->idx, sta->addr); mt76x02_mac_wcid_set_drop(dev, idx, false); - for (i = 0; i < ARRAY_SIZE(sta->txq); i++) - mt76x02_txq_init(dev, sta->txq[i]); if (vif->type == NL80211_IFTYPE_AP) set_bit(MT_WCID_FLAG_CHECK_PS, &msta->wcid.flags); ewma_signal_init(&msta->rssi); - rcu_assign_pointer(dev->mt76.wcid[idx], &msta->wcid); - -out: - mutex_unlock(&dev->mt76.mutex); - - return ret; + return 0; } EXPORT_SYMBOL_GPL(mt76x02_sta_add); -int mt76x02_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_sta *sta) +void mt76x02_sta_remove(struct mt76_dev *mdev, struct ieee80211_vif *vif, + struct ieee80211_sta *sta) { - struct mt76x02_dev *dev = hw->priv; - struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv; - int idx = msta->wcid.idx; - int i; + struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); + struct mt76_wcid *wcid = (struct mt76_wcid *)sta->drv_priv; + int idx = wcid->idx; - mutex_lock(&dev->mt76.mutex); - rcu_assign_pointer(dev->mt76.wcid[idx], NULL); - for (i = 0; i < ARRAY_SIZE(sta->txq); i++) - mt76_txq_remove(&dev->mt76, sta->txq[i]); mt76x02_mac_wcid_set_drop(dev, idx, true); - mt76_wcid_free(dev->mt76.wcid_mask, idx); mt76x02_mac_wcid_setup(dev, idx, 0, NULL); - mutex_unlock(&dev->mt76.mutex); - - return 0; } EXPORT_SYMBOL_GPL(mt76x02_sta_remove); @@ -147,11 +211,15 @@ void mt76x02_vif_init(struct mt76x02_dev *dev, struct ieee80211_vif *vif, unsigned int idx) { struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; + struct mt76_txq *mtxq; mvif->idx = idx; mvif->group_wcid.idx = MT_VIF_WCID(idx); mvif->group_wcid.hw_key_idx = -1; - mt76x02_txq_init(dev, vif->txq); + mtxq = (struct mt76_txq *) vif->txq->drv_priv; + mtxq->wcid = &mvif->group_wcid; + + mt76_txq_init(&dev->mt76, vif->txq); } EXPORT_SYMBOL_GPL(mt76x02_vif_init); @@ -357,6 +425,51 @@ int mt76x02_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, } EXPORT_SYMBOL_GPL(mt76x02_conf_tx); +void mt76x02_set_tx_ackto(struct mt76x02_dev *dev) +{ + u8 ackto, sifs, slottime = dev->slottime; + + /* As defined by IEEE 802.11-2007 17.3.8.6 */ + slottime += 3 * dev->coverage_class; + mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG, + MT_BKOFF_SLOT_CFG_SLOTTIME, slottime); + + sifs = mt76_get_field(dev, MT_XIFS_TIME_CFG, + MT_XIFS_TIME_CFG_OFDM_SIFS); + + ackto = slottime + sifs; + mt76_rmw_field(dev, MT_TX_TIMEOUT_CFG, + MT_TX_TIMEOUT_CFG_ACKTO, ackto); +} +EXPORT_SYMBOL_GPL(mt76x02_set_tx_ackto); + +void mt76x02_set_coverage_class(struct ieee80211_hw *hw, + s16 coverage_class) +{ + struct mt76x02_dev *dev = hw->priv; + + mutex_lock(&dev->mt76.mutex); + dev->coverage_class = coverage_class; + mt76x02_set_tx_ackto(dev); + mutex_unlock(&dev->mt76.mutex); +} +EXPORT_SYMBOL_GPL(mt76x02_set_coverage_class); + +int mt76x02_set_rts_threshold(struct ieee80211_hw *hw, u32 val) +{ + struct mt76x02_dev *dev = hw->priv; + + if (val != ~0 && val > 0xffff) + return -EINVAL; + + mutex_lock(&dev->mt76.mutex); + mt76x02_mac_set_tx_protection(dev, val); + mutex_unlock(&dev->mt76.mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(mt76x02_set_rts_threshold); + void mt76x02_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) @@ -405,6 +518,64 @@ void mt76x02_remove_hdr_pad(struct sk_buff *skb, int len) } EXPORT_SYMBOL_GPL(mt76x02_remove_hdr_pad); +void mt76x02_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, + const u8 *mac) +{ + struct mt76x02_dev *dev = hw->priv; + + if (mt76_is_mmio(dev)) + tasklet_disable(&dev->pre_tbtt_tasklet); + set_bit(MT76_SCANNING, &dev->mt76.state); +} +EXPORT_SYMBOL_GPL(mt76x02_sw_scan); + +void mt76x02_sw_scan_complete(struct ieee80211_hw *hw, + struct ieee80211_vif *vif) +{ + struct mt76x02_dev *dev = hw->priv; + + clear_bit(MT76_SCANNING, &dev->mt76.state); + if (mt76_is_mmio(dev)) + tasklet_enable(&dev->pre_tbtt_tasklet); + + if (dev->cal.gain_init_done) { + /* Restore AGC gain and resume calibration after scanning. */ + dev->cal.low_gain = -1; + ieee80211_queue_delayed_work(hw, &dev->cal_work, 0); + } +} +EXPORT_SYMBOL_GPL(mt76x02_sw_scan_complete); + +int mt76x02_get_txpower(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, int *dbm) +{ + struct mt76x02_dev *dev = hw->priv; + u8 nstreams = dev->mt76.chainmask & 0xf; + + *dbm = dev->mt76.txpower_cur / 2; + + /* convert from per-chain power to combined + * output on 2x2 devices + */ + if (nstreams > 1) + *dbm += 3; + + return 0; +} +EXPORT_SYMBOL_GPL(mt76x02_get_txpower); + +void mt76x02_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, + bool ps) +{ + struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); + struct mt76x02_sta *msta = (struct mt76x02_sta *)sta->drv_priv; + int idx = msta->wcid.idx; + + mt76_stop_tx_queues(&dev->mt76, sta, true); + mt76x02_mac_wcid_set_drop(dev, idx, ps); +} +EXPORT_SYMBOL_GPL(mt76x02_sta_ps); + const u16 mt76x02_beacon_offsets[16] = { /* 1024 byte per beacon */ 0xc000, @@ -425,9 +596,8 @@ const u16 mt76x02_beacon_offsets[16] = { 0xc000, 0xc000, }; -EXPORT_SYMBOL_GPL(mt76x02_beacon_offsets); -void mt76x02_set_beacon_offsets(struct mt76x02_dev *dev) +static void mt76x02_set_beacon_offsets(struct mt76x02_dev *dev) { u16 val, base = MT_BEACON_BASE; u32 regs[4] = {}; @@ -441,6 +611,98 @@ void mt76x02_set_beacon_offsets(struct mt76x02_dev *dev) for (i = 0; i < 4; i++) mt76_wr(dev, MT_BCN_OFFSET(i), regs[i]); } -EXPORT_SYMBOL_GPL(mt76x02_set_beacon_offsets); + +void mt76x02_init_beacon_config(struct mt76x02_dev *dev) +{ + static const u8 null_addr[ETH_ALEN] = {}; + int i; + + mt76_wr(dev, MT_MAC_BSSID_DW0, + get_unaligned_le32(dev->mt76.macaddr)); + mt76_wr(dev, MT_MAC_BSSID_DW1, + get_unaligned_le16(dev->mt76.macaddr + 4) | + FIELD_PREP(MT_MAC_BSSID_DW1_MBSS_MODE, 3) | /* 8 beacons */ + MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT); + + /* Fire a pre-TBTT interrupt 8 ms before TBTT */ + mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT, + 8 << 4); + mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER, + MT_DFS_GP_INTERVAL); + mt76_wr(dev, MT_INT_TIMER_EN, 0); + + mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xffff); + + for (i = 0; i < 8; i++) { + mt76x02_mac_set_bssid(dev, i, null_addr); + mt76x02_mac_set_beacon(dev, i, NULL); + } + mt76x02_set_beacon_offsets(dev); +} +EXPORT_SYMBOL_GPL(mt76x02_init_beacon_config); + +void mt76x02_bss_info_changed(struct ieee80211_hw *hw, + struct ieee80211_vif *vif, + struct ieee80211_bss_conf *info, + u32 changed) +{ + struct mt76x02_vif *mvif = (struct mt76x02_vif *)vif->drv_priv; + struct mt76x02_dev *dev = hw->priv; + + mutex_lock(&dev->mt76.mutex); + + if (changed & BSS_CHANGED_BSSID) + mt76x02_mac_set_bssid(dev, mvif->idx, info->bssid); + + if (changed & BSS_CHANGED_BEACON_ENABLED) { + tasklet_disable(&dev->pre_tbtt_tasklet); + mt76x02_mac_set_beacon_enable(dev, mvif->idx, + info->enable_beacon); + tasklet_enable(&dev->pre_tbtt_tasklet); + } + + if (changed & BSS_CHANGED_BEACON_INT) { + mt76_rmw_field(dev, MT_BEACON_TIME_CFG, + MT_BEACON_TIME_CFG_INTVAL, + info->beacon_int << 4); + dev->beacon_int = info->beacon_int; + dev->tbtt_count = 0; + } + + if (changed & BSS_CHANGED_ERP_PREAMBLE) + mt76x02_mac_set_short_preamble(dev, info->use_short_preamble); + + if (changed & BSS_CHANGED_ERP_SLOT) { + int slottime = info->use_short_slot ? 9 : 20; + + dev->slottime = slottime; + mt76x02_set_tx_ackto(dev); + } + + mutex_unlock(&dev->mt76.mutex); +} +EXPORT_SYMBOL_GPL(mt76x02_bss_info_changed); + +void mt76x02_config_mac_addr_list(struct mt76x02_dev *dev) +{ + struct ieee80211_hw *hw = mt76_hw(dev); + struct wiphy *wiphy = hw->wiphy; + int i; + + for (i = 0; i < ARRAY_SIZE(dev->macaddr_list); i++) { + u8 *addr = dev->macaddr_list[i].addr; + + memcpy(addr, dev->mt76.macaddr, ETH_ALEN); + + if (!i) + continue; + + addr[0] |= BIT(1); + addr[0] ^= ((i - 1) << 2); + } + wiphy->addresses = dev->macaddr_list; + wiphy->n_addresses = ARRAY_SIZE(dev->macaddr_list); +} +EXPORT_SYMBOL_GPL(mt76x02_config_mac_addr_list); MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile b/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile index b71bb1049170..9297b850bbba 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/Makefile @@ -3,11 +3,11 @@ obj-$(CONFIG_MT76x2E) += mt76x2e.o obj-$(CONFIG_MT76x2U) += mt76x2u.o mt76x2-common-y := \ - eeprom.o mac.o init.o phy.o debugfs.o mcu.o + eeprom.o mac.o init.o phy.o mcu.o mt76x2e-y := \ - pci.o pci_main.o pci_init.o pci_tx.o \ - pci_mac.o pci_mcu.o pci_phy.o pci_dfs.o + pci.o pci_main.o pci_init.o pci_mcu.o \ + pci_phy.o mt76x2u-y := \ usb.o usb_init.o usb_main.o usb_mac.o usb_mcu.o \ diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/dfs.h b/drivers/net/wireless/mediatek/mt76/mt76x2/dfs.h deleted file mode 100644 index 3cb9d1864286..000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/dfs.h +++ /dev/null @@ -1,26 +0,0 @@ -/* - * Copyright (C) 2016 Lorenzo Bianconi <lorenzo.bianconi83@gmail.com> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#ifndef __DFS_H -#define __DFS_H - -void mt76x2_dfs_init_params(struct mt76x02_dev *dev); -void mt76x2_dfs_init_detector(struct mt76x02_dev *dev); -void mt76x2_dfs_adjust_agc(struct mt76x02_dev *dev); -void mt76x2_dfs_set_domain(struct mt76x02_dev *dev, - enum nl80211_dfs_regions region); - -#endif /* __DFS_H */ diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c index f39b622d03f4..6f6998561d9d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/eeprom.c @@ -22,17 +22,6 @@ #define EE_FIELD(_name, _value) [MT_EE_##_name] = (_value) | 1 static int -mt76x2_eeprom_copy(struct mt76x02_dev *dev, enum mt76x02_eeprom_field field, - void *dest, int len) -{ - if (field + len > dev->mt76.eeprom.size) - return -1; - - memcpy(dest, dev->mt76.eeprom.data + field, len); - return 0; -} - -static int mt76x2_eeprom_get_macaddr(struct mt76x02_dev *dev) { void *src = dev->mt76.eeprom.data + MT_EE_MAC_ADDR; @@ -378,7 +367,7 @@ mt76x2_get_power_info_2g(struct mt76x02_dev *dev, else delta_idx = 5; - mt76x2_eeprom_copy(dev, offset, data, sizeof(data)); + mt76x02_eeprom_copy(dev, offset, data, sizeof(data)); t->chain[chain].tssi_slope = data[0]; t->chain[chain].tssi_offset = data[1]; @@ -429,7 +418,7 @@ mt76x2_get_power_info_5g(struct mt76x02_dev *dev, else delta_idx = 4; - mt76x2_eeprom_copy(dev, offset, data, sizeof(data)); + mt76x02_eeprom_copy(dev, offset, data, sizeof(data)); t->chain[chain].tssi_slope = data[0]; t->chain[chain].tssi_offset = data[1]; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c index 3c73fdeaf30f..54a9b5fac787 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/init.c @@ -158,38 +158,6 @@ void mt76_write_mac_initvals(struct mt76x02_dev *dev) } EXPORT_SYMBOL_GPL(mt76_write_mac_initvals); -void mt76x2_init_device(struct mt76x02_dev *dev) -{ - struct ieee80211_hw *hw = mt76_hw(dev); - - hw->queues = 4; - hw->max_rates = 1; - hw->max_report_rates = 7; - hw->max_rate_tries = 1; - hw->extra_tx_headroom = 2; - if (mt76_is_usb(dev)) - hw->extra_tx_headroom += sizeof(struct mt76x02_txwi) + - MT_DMA_HDR_LEN; - - hw->sta_data_size = sizeof(struct mt76x02_sta); - hw->vif_data_size = sizeof(struct mt76x02_vif); - - ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES); - ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); - - dev->mt76.sband_2g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; - dev->mt76.sband_5g.sband.ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; - - dev->mt76.chainmask = 0x202; - dev->mt76.global_wcid.idx = 255; - dev->mt76.global_wcid.hw_key_idx = -1; - dev->slottime = 9; - - /* init antenna configuration */ - dev->mt76.antenna_mask = 3; -} -EXPORT_SYMBOL_GPL(mt76x2_init_device); - void mt76x2_init_txpower(struct mt76x02_dev *dev, struct ieee80211_supported_band *sband) { diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h index a31bd49ae6cb..4c8e20bce920 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mac.h @@ -26,12 +26,5 @@ struct mt76x02_vif; int mt76x2_mac_start(struct mt76x02_dev *dev); void mt76x2_mac_stop(struct mt76x02_dev *dev, bool force); void mt76x2_mac_resume(struct mt76x02_dev *dev); -void mt76x2_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr); - -int mt76x2_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx, - struct sk_buff *skb); -void mt76x2_mac_set_beacon_enable(struct mt76x02_dev *dev, u8 vif_idx, bool val); - -void mt76x2_mac_work(struct work_struct *work); #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c index 88bd62cfbdf9..cd3e082f486c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mcu.c @@ -26,7 +26,6 @@ int mt76x2_mcu_set_channel(struct mt76x02_dev *dev, u8 channel, u8 bw, u8 bw_index, bool scan) { - struct sk_buff *skb; struct { u8 idx; u8 scan; @@ -45,21 +44,19 @@ int mt76x2_mcu_set_channel(struct mt76x02_dev *dev, u8 channel, u8 bw, }; /* first set the channel without the extension channel info */ - skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg)); - mt76_mcu_send_msg(dev, skb, CMD_SWITCH_CHANNEL_OP, true); + mt76_mcu_send_msg(dev, CMD_SWITCH_CHANNEL_OP, &msg, sizeof(msg), true); usleep_range(5000, 10000); msg.ext_chan = 0xe0 + bw_index; - skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg)); - return mt76_mcu_send_msg(dev, skb, CMD_SWITCH_CHANNEL_OP, true); + return mt76_mcu_send_msg(dev, CMD_SWITCH_CHANNEL_OP, &msg, sizeof(msg), + true); } EXPORT_SYMBOL_GPL(mt76x2_mcu_set_channel); int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level, u8 channel) { - struct sk_buff *skb; struct { u8 cr_mode; u8 temp; @@ -80,15 +77,13 @@ int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level, msg.cfg = cpu_to_le32(val); /* first set the channel without the extension channel info */ - skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg)); - return mt76_mcu_send_msg(dev, skb, CMD_LOAD_CR, true); + return mt76_mcu_send_msg(dev, CMD_LOAD_CR, &msg, sizeof(msg), true); } EXPORT_SYMBOL_GPL(mt76x2_mcu_load_cr); int mt76x2_mcu_init_gain(struct mt76x02_dev *dev, u8 channel, u32 gain, bool force) { - struct sk_buff *skb; struct { __le32 channel; __le32 gain_val; @@ -100,15 +95,14 @@ int mt76x2_mcu_init_gain(struct mt76x02_dev *dev, u8 channel, u32 gain, if (force) msg.channel |= cpu_to_le32(BIT(31)); - skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg)); - return mt76_mcu_send_msg(dev, skb, CMD_INIT_GAIN_OP, true); + return mt76_mcu_send_msg(dev, CMD_INIT_GAIN_OP, &msg, sizeof(msg), + true); } EXPORT_SYMBOL_GPL(mt76x2_mcu_init_gain); int mt76x2_mcu_tssi_comp(struct mt76x02_dev *dev, struct mt76x2_tssi_comp *tssi_data) { - struct sk_buff *skb; struct { __le32 id; struct mt76x2_tssi_comp data; @@ -117,7 +111,7 @@ int mt76x2_mcu_tssi_comp(struct mt76x02_dev *dev, .data = *tssi_data, }; - skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg)); - return mt76_mcu_send_msg(dev, skb, CMD_CALIBRATION_OP, true); + return mt76_mcu_send_msg(dev, CMD_CALIBRATION_OP, &msg, sizeof(msg), + true); } EXPORT_SYMBOL_GPL(mt76x2_mcu_tssi_comp); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h index ab93125f46de..b259e4b50f1e 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2.h @@ -31,14 +31,8 @@ #define MT7662_ROM_PATCH "mt7662_rom_patch.bin" #define MT7662_EEPROM_SIZE 512 -#define MT7662U_FIRMWARE "mediatek/mt7662u.bin" -#define MT7662U_ROM_PATCH "mediatek/mt7662u_rom_patch.bin" - -#define MT_CALIBRATE_INTERVAL HZ - #include "../mt76x02.h" #include "mac.h" -#include "dfs.h" static inline bool is_mt7612(struct mt76x02_dev *dev) { @@ -57,15 +51,12 @@ extern const struct ieee80211_ops mt76x2_ops; struct mt76x02_dev *mt76x2_alloc_device(struct device *pdev); int mt76x2_register_device(struct mt76x02_dev *dev); -void mt76x2_init_debugfs(struct mt76x02_dev *dev); -void mt76x2_init_device(struct mt76x02_dev *dev); void mt76x2_phy_power_on(struct mt76x02_dev *dev); int mt76x2_init_hardware(struct mt76x02_dev *dev); void mt76x2_stop_hardware(struct mt76x02_dev *dev); int mt76x2_eeprom_init(struct mt76x02_dev *dev); int mt76x2_apply_calibration_data(struct mt76x02_dev *dev, int channel); -void mt76x2_set_tx_ackto(struct mt76x02_dev *dev); void mt76x2_phy_set_antenna(struct mt76x02_dev *dev); int mt76x2_phy_start(struct mt76x02_dev *dev); @@ -82,24 +73,17 @@ int mt76x2_mcu_load_cr(struct mt76x02_dev *dev, u8 type, u8 temp_level, void mt76x2_cleanup(struct mt76x02_dev *dev); -void mt76x2_mac_set_tx_protection(struct mt76x02_dev *dev, u32 val); - -void mt76x2_pre_tbtt_tasklet(unsigned long arg); - -void mt76x2_sta_ps(struct mt76_dev *dev, struct ieee80211_sta *sta, bool ps); - -void mt76x2_update_channel(struct mt76_dev *mdev); - void mt76x2_reset_wlan(struct mt76x02_dev *dev, bool enable); void mt76x2_init_txpower(struct mt76x02_dev *dev, struct ieee80211_supported_band *sband); void mt76_write_mac_initvals(struct mt76x02_dev *dev); -void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev, bool wait); +void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev); void mt76x2_phy_set_txpower_regs(struct mt76x02_dev *dev, enum nl80211_band band); void mt76x2_configure_tx_delay(struct mt76x02_dev *dev, enum nl80211_band band, u8 bw); void mt76x2_apply_gain_adj(struct mt76x02_dev *dev); +void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev); #endif diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h index 6e932b5010ef..0b0075411b34 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/mt76x2u.h @@ -43,11 +43,8 @@ int mt76x2u_mac_stop(struct mt76x02_dev *dev); int mt76x2u_phy_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef); void mt76x2u_phy_calibrate(struct work_struct *work); -void mt76x2u_phy_channel_calibrate(struct mt76x02_dev *dev); void mt76x2u_mcu_complete_urb(struct urb *urb); -int mt76x2u_mcu_set_dynamic_vga(struct mt76x02_dev *dev, u8 channel, bool ap, - bool ext, int rssi, u32 false_cca); int mt76x2u_mcu_init(struct mt76x02_dev *dev); int mt76x2u_mcu_fw_init(struct mt76x02_dev *dev); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c index fd125722d1fb..7f4ea2d00f42 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_init.c @@ -79,7 +79,6 @@ mt76x2_fixup_xtal(struct mt76x02_dev *dev) static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard) { - static const u8 null_addr[ETH_ALEN] = {}; const u8 *macaddr = dev->mt76.macaddr; u32 val; int i, k; @@ -123,27 +122,18 @@ static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard) mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(macaddr)); mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(macaddr + 4)); - mt76_wr(dev, MT_MAC_BSSID_DW0, get_unaligned_le32(macaddr)); - mt76_wr(dev, MT_MAC_BSSID_DW1, get_unaligned_le16(macaddr + 4) | - FIELD_PREP(MT_MAC_BSSID_DW1_MBSS_MODE, 3) | /* 8 beacons */ - MT_MAC_BSSID_DW1_MBSS_LOCAL_BIT); - - /* Fire a pre-TBTT interrupt 8 ms before TBTT */ - mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_PRE_TBTT, - 8 << 4); - mt76_rmw_field(dev, MT_INT_TIMER_CFG, MT_INT_TIMER_CFG_GP_TIMER, - MT_DFS_GP_INTERVAL); - mt76_wr(dev, MT_INT_TIMER_EN, 0); - - mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xffff); + mt76x02_init_beacon_config(dev); if (!hard) return 0; for (i = 0; i < 256 / 32; i++) mt76_wr(dev, MT_WCID_DROP_BASE + i * 4, 0); - for (i = 0; i < 256; i++) + for (i = 0; i < 256; i++) { mt76x02_mac_wcid_setup(dev, i, 0, NULL); + mt76_wr(dev, MT_WCID_TX_RATE(i), 0); + mt76_wr(dev, MT_WCID_TX_RATE(i) + 4, 0); + } for (i = 0; i < MT_MAX_VIFS; i++) mt76x02_mac_wcid_setup(dev, MT_VIF_WCID(i), i, NULL); @@ -152,11 +142,6 @@ static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard) for (k = 0; k < 4; k++) mt76x02_mac_shared_key_setup(dev, i, k, NULL); - for (i = 0; i < 8; i++) { - mt76x2_mac_set_bssid(dev, i, null_addr); - mt76x2_mac_set_beacon(dev, i, NULL); - } - for (i = 0; i < 16; i++) mt76_rr(dev, MT_TX_STAT_FIFO); @@ -168,9 +153,7 @@ static int mt76x2_mac_reset(struct mt76x02_dev *dev, bool hard) MT_CH_TIME_CFG_EIFS_AS_BUSY | FIELD_PREP(MT_CH_TIME_CFG_CH_TIMER_CLR, 1)); - mt76x02_set_beacon_offsets(dev); - - mt76x2_set_tx_ackto(dev); + mt76x02_set_tx_ackto(dev); return 0; } @@ -277,30 +260,10 @@ mt76x2_power_on(struct mt76x02_dev *dev) mt76x2_power_on_rf(dev, 1); } -void mt76x2_set_tx_ackto(struct mt76x02_dev *dev) -{ - u8 ackto, sifs, slottime = dev->slottime; - - /* As defined by IEEE 802.11-2007 17.3.8.6 */ - slottime += 3 * dev->coverage_class; - mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG, - MT_BKOFF_SLOT_CFG_SLOTTIME, slottime); - - sifs = mt76_get_field(dev, MT_XIFS_TIME_CFG, - MT_XIFS_TIME_CFG_OFDM_SIFS); - - ackto = slottime + sifs; - mt76_rmw_field(dev, MT_TX_TIMEOUT_CFG, - MT_TX_TIMEOUT_CFG_ACKTO, ackto); -} - int mt76x2_init_hardware(struct mt76x02_dev *dev) { int ret; - tasklet_init(&dev->pre_tbtt_tasklet, mt76x2_pre_tbtt_tasklet, - (unsigned long) dev); - mt76x02_dma_disable(dev); mt76x2_reset_wlan(dev, true); mt76x2_power_on(dev); @@ -337,7 +300,7 @@ void mt76x2_stop_hardware(struct mt76x02_dev *dev) { cancel_delayed_work_sync(&dev->cal_work); cancel_delayed_work_sync(&dev->mac_work); - mt76x02_mcu_set_radio_state(dev, false, true); + mt76x02_mcu_set_radio_state(dev, false); mt76x2_mac_stop(dev, false); } @@ -354,12 +317,14 @@ struct mt76x02_dev *mt76x2_alloc_device(struct device *pdev) { static const struct mt76_driver_ops drv_ops = { .txwi_size = sizeof(struct mt76x02_txwi), - .update_survey = mt76x2_update_channel, + .update_survey = mt76x02_update_channel, .tx_prepare_skb = mt76x02_tx_prepare_skb, .tx_complete_skb = mt76x02_tx_complete_skb, .rx_skb = mt76x02_queue_rx_skb, .rx_poll_complete = mt76x02_rx_poll_complete, - .sta_ps = mt76x2_sta_ps, + .sta_ps = mt76x02_sta_ps, + .sta_add = mt76x02_sta_add, + .sta_remove = mt76x02_sta_remove, }; struct mt76x02_dev *dev; struct mt76_dev *mdev; @@ -375,43 +340,6 @@ struct mt76x02_dev *mt76x2_alloc_device(struct device *pdev) return dev; } -static void mt76x2_regd_notifier(struct wiphy *wiphy, - struct regulatory_request *request) -{ - struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); - struct mt76x02_dev *dev = hw->priv; - - mt76x2_dfs_set_domain(dev, request->dfs_region); -} - -static const struct ieee80211_iface_limit if_limits[] = { - { - .max = 1, - .types = BIT(NL80211_IFTYPE_ADHOC) - }, { - .max = 8, - .types = BIT(NL80211_IFTYPE_STATION) | -#ifdef CONFIG_MAC80211_MESH - BIT(NL80211_IFTYPE_MESH_POINT) | -#endif - BIT(NL80211_IFTYPE_AP) - }, -}; - -static const struct ieee80211_iface_combination if_comb[] = { - { - .limits = if_limits, - .n_limits = ARRAY_SIZE(if_limits), - .max_interfaces = 8, - .num_different_channels = 1, - .beacon_int_infra_match = true, - .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | - BIT(NL80211_CHAN_WIDTH_20) | - BIT(NL80211_CHAN_WIDTH_40) | - BIT(NL80211_CHAN_WIDTH_80), - } -}; - static void mt76x2_led_set_config(struct mt76_dev *mt76, u8 delay_on, u8 delay_off) { @@ -462,49 +390,17 @@ static void mt76x2_led_set_brightness(struct led_classdev *led_cdev, int mt76x2_register_device(struct mt76x02_dev *dev) { - struct ieee80211_hw *hw = mt76_hw(dev); - struct wiphy *wiphy = hw->wiphy; - int i, ret; + int ret; INIT_DELAYED_WORK(&dev->cal_work, mt76x2_phy_calibrate); - INIT_DELAYED_WORK(&dev->mac_work, mt76x2_mac_work); - mt76x2_init_device(dev); + mt76x02_init_device(dev); ret = mt76x2_init_hardware(dev); if (ret) return ret; - for (i = 0; i < ARRAY_SIZE(dev->macaddr_list); i++) { - u8 *addr = dev->macaddr_list[i].addr; - - memcpy(addr, dev->mt76.macaddr, ETH_ALEN); - - if (!i) - continue; - - addr[0] |= BIT(1); - addr[0] ^= ((i - 1) << 2); - } - wiphy->addresses = dev->macaddr_list; - wiphy->n_addresses = ARRAY_SIZE(dev->macaddr_list); - - wiphy->iface_combinations = if_comb; - wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); - - wiphy->reg_notifier = mt76x2_regd_notifier; - - wiphy->interface_modes = - BIT(NL80211_IFTYPE_STATION) | - BIT(NL80211_IFTYPE_AP) | -#ifdef CONFIG_MAC80211_MESH - BIT(NL80211_IFTYPE_MESH_POINT) | -#endif - BIT(NL80211_IFTYPE_ADHOC); - - wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_VHT_IBSS); - - mt76x2_dfs_init_detector(dev); + mt76x02_config_mac_addr_list(dev); /* init led callbacks */ if (IS_ENABLED(CONFIG_MT76_LEDS)) { @@ -517,7 +413,7 @@ int mt76x2_register_device(struct mt76x02_dev *dev) if (ret) goto fail; - mt76x2_init_debugfs(dev); + mt76x02_init_debugfs(dev); mt76x2_init_txpower(dev, &dev->mt76.sband_2g.sband); mt76x2_init_txpower(dev, &dev->mt76.sband_5g.sband); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mac.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mac.c deleted file mode 100644 index 4b331ed14bb2..000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mac.c +++ /dev/null @@ -1,203 +0,0 @@ -/* - * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#include <linux/delay.h> -#include "mt76x2.h" -#include "mcu.h" -#include "eeprom.h" - -void mt76x2_mac_set_bssid(struct mt76x02_dev *dev, u8 idx, const u8 *addr) -{ - idx &= 7; - mt76_wr(dev, MT_MAC_APC_BSSID_L(idx), get_unaligned_le32(addr)); - mt76_rmw_field(dev, MT_MAC_APC_BSSID_H(idx), MT_MAC_APC_BSSID_H_ADDR, - get_unaligned_le16(addr + 4)); -} - -static int -mt76_write_beacon(struct mt76x02_dev *dev, int offset, struct sk_buff *skb) -{ - int beacon_len = mt76x02_beacon_offsets[1] - mt76x02_beacon_offsets[0]; - struct mt76x02_txwi txwi; - - if (WARN_ON_ONCE(beacon_len < skb->len + sizeof(struct mt76x02_txwi))) - return -ENOSPC; - - mt76x02_mac_write_txwi(dev, &txwi, skb, NULL, NULL, skb->len); - - mt76_wr_copy(dev, offset, &txwi, sizeof(txwi)); - offset += sizeof(txwi); - - mt76_wr_copy(dev, offset, skb->data, skb->len); - return 0; -} - -static int -__mt76x2_mac_set_beacon(struct mt76x02_dev *dev, u8 bcn_idx, struct sk_buff *skb) -{ - int beacon_len = mt76x02_beacon_offsets[1] - mt76x02_beacon_offsets[0]; - int beacon_addr = mt76x02_beacon_offsets[bcn_idx]; - int ret = 0; - int i; - - /* Prevent corrupt transmissions during update */ - mt76_set(dev, MT_BCN_BYPASS_MASK, BIT(bcn_idx)); - - if (skb) { - ret = mt76_write_beacon(dev, beacon_addr, skb); - if (!ret) - dev->beacon_data_mask |= BIT(bcn_idx); - } else { - dev->beacon_data_mask &= ~BIT(bcn_idx); - for (i = 0; i < beacon_len; i += 4) - mt76_wr(dev, beacon_addr + i, 0); - } - - mt76_wr(dev, MT_BCN_BYPASS_MASK, 0xff00 | ~dev->beacon_data_mask); - - return ret; -} - -int mt76x2_mac_set_beacon(struct mt76x02_dev *dev, u8 vif_idx, - struct sk_buff *skb) -{ - bool force_update = false; - int bcn_idx = 0; - int i; - - for (i = 0; i < ARRAY_SIZE(dev->beacons); i++) { - if (vif_idx == i) { - force_update = !!dev->beacons[i] ^ !!skb; - - if (dev->beacons[i]) - dev_kfree_skb(dev->beacons[i]); - - dev->beacons[i] = skb; - __mt76x2_mac_set_beacon(dev, bcn_idx, skb); - } else if (force_update && dev->beacons[i]) { - __mt76x2_mac_set_beacon(dev, bcn_idx, dev->beacons[i]); - } - - bcn_idx += !!dev->beacons[i]; - } - - for (i = bcn_idx; i < ARRAY_SIZE(dev->beacons); i++) { - if (!(dev->beacon_data_mask & BIT(i))) - break; - - __mt76x2_mac_set_beacon(dev, i, NULL); - } - - mt76_rmw_field(dev, MT_MAC_BSSID_DW1, MT_MAC_BSSID_DW1_MBEACON_N, - bcn_idx - 1); - return 0; -} - -void mt76x2_mac_set_beacon_enable(struct mt76x02_dev *dev, - u8 vif_idx, bool val) -{ - u8 old_mask = dev->beacon_mask; - bool en; - u32 reg; - - if (val) { - dev->beacon_mask |= BIT(vif_idx); - } else { - dev->beacon_mask &= ~BIT(vif_idx); - mt76x2_mac_set_beacon(dev, vif_idx, NULL); - } - - if (!!old_mask == !!dev->beacon_mask) - return; - - en = dev->beacon_mask; - - mt76_rmw_field(dev, MT_INT_TIMER_EN, MT_INT_TIMER_EN_PRE_TBTT_EN, en); - reg = MT_BEACON_TIME_CFG_BEACON_TX | - MT_BEACON_TIME_CFG_TBTT_EN | - MT_BEACON_TIME_CFG_TIMER_EN; - mt76_rmw(dev, MT_BEACON_TIME_CFG, reg, reg * en); - - if (en) - mt76x02_irq_enable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT); - else - mt76x02_irq_disable(dev, MT_INT_PRE_TBTT | MT_INT_TBTT); -} - -void mt76x2_update_channel(struct mt76_dev *mdev) -{ - struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); - struct mt76_channel_state *state; - u32 active, busy; - - state = mt76_channel_state(&dev->mt76, dev->mt76.chandef.chan); - - busy = mt76_rr(dev, MT_CH_BUSY); - active = busy + mt76_rr(dev, MT_CH_IDLE); - - spin_lock_bh(&dev->mt76.cc_lock); - state->cc_busy += busy; - state->cc_active += active; - spin_unlock_bh(&dev->mt76.cc_lock); -} - -void mt76x2_mac_work(struct work_struct *work) -{ - struct mt76x02_dev *dev = container_of(work, struct mt76x02_dev, - mac_work.work); - int i, idx; - - mt76x2_update_channel(&dev->mt76); - for (i = 0, idx = 0; i < 16; i++) { - u32 val = mt76_rr(dev, MT_TX_AGG_CNT(i)); - - dev->aggr_stats[idx++] += val & 0xffff; - dev->aggr_stats[idx++] += val >> 16; - } - - ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work, - MT_CALIBRATE_INTERVAL); -} - -void mt76x2_mac_set_tx_protection(struct mt76x02_dev *dev, u32 val) -{ - u32 data = 0; - - if (val != ~0) - data = FIELD_PREP(MT_PROT_CFG_CTRL, 1) | - MT_PROT_CFG_RTS_THRESH; - - mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, val); - - mt76_rmw(dev, MT_CCK_PROT_CFG, - MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); - mt76_rmw(dev, MT_OFDM_PROT_CFG, - MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); - mt76_rmw(dev, MT_MM20_PROT_CFG, - MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); - mt76_rmw(dev, MT_MM40_PROT_CFG, - MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); - mt76_rmw(dev, MT_GF20_PROT_CFG, - MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); - mt76_rmw(dev, MT_GF40_PROT_CFG, - MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); - mt76_rmw(dev, MT_TX_PROT_CFG6, - MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); - mt76_rmw(dev, MT_TX_PROT_CFG7, - MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); - mt76_rmw(dev, MT_TX_PROT_CFG8, - MT_PROT_CFG_CTRL | MT_PROT_CFG_RTS_THRESH, data); -} diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c index 3f001bd6806c..b54a32397486 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_main.c @@ -74,7 +74,7 @@ mt76x2_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef) mt76_rr(dev, MT_CH_IDLE); mt76_rr(dev, MT_CH_BUSY); - mt76x2_dfs_init_params(dev); + mt76x02_dfs_init_params(dev); mt76x2_mac_resume(dev); tasklet_enable(&dev->dfs_pd.dfs_tasklet); @@ -128,103 +128,12 @@ mt76x2_config(struct ieee80211_hw *hw, u32 changed) } static void -mt76x2_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_bss_conf *info, u32 changed) -{ - struct mt76x02_dev *dev = hw->priv; - struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv; - - mutex_lock(&dev->mt76.mutex); - - if (changed & BSS_CHANGED_BSSID) - mt76x2_mac_set_bssid(dev, mvif->idx, info->bssid); - - if (changed & BSS_CHANGED_BEACON_INT) { - mt76_rmw_field(dev, MT_BEACON_TIME_CFG, - MT_BEACON_TIME_CFG_INTVAL, - info->beacon_int << 4); - dev->beacon_int = info->beacon_int; - dev->tbtt_count = 0; - } - - if (changed & BSS_CHANGED_BEACON_ENABLED) { - tasklet_disable(&dev->pre_tbtt_tasklet); - mt76x2_mac_set_beacon_enable(dev, mvif->idx, - info->enable_beacon); - tasklet_enable(&dev->pre_tbtt_tasklet); - } - - if (changed & BSS_CHANGED_ERP_SLOT) { - int slottime = info->use_short_slot ? 9 : 20; - - dev->slottime = slottime; - mt76x2_set_tx_ackto(dev); - } - - mutex_unlock(&dev->mt76.mutex); -} - -void -mt76x2_sta_ps(struct mt76_dev *mdev, struct ieee80211_sta *sta, bool ps) -{ - struct mt76x02_sta *msta = (struct mt76x02_sta *) sta->drv_priv; - struct mt76x02_dev *dev = container_of(mdev, struct mt76x02_dev, mt76); - int idx = msta->wcid.idx; - - mt76_stop_tx_queues(&dev->mt76, sta, true); - mt76x02_mac_wcid_set_drop(dev, idx, ps); -} - -static void -mt76x2_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - const u8 *mac) -{ - struct mt76x02_dev *dev = hw->priv; - - tasklet_disable(&dev->pre_tbtt_tasklet); - set_bit(MT76_SCANNING, &dev->mt76.state); -} - -static void -mt76x2_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif) -{ - struct mt76x02_dev *dev = hw->priv; - - clear_bit(MT76_SCANNING, &dev->mt76.state); - tasklet_enable(&dev->pre_tbtt_tasklet); -} - -static void mt76x2_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop) { } static int -mt76x2_get_txpower(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int *dbm) -{ - struct mt76x02_dev *dev = hw->priv; - - *dbm = dev->mt76.txpower_cur / 2; - - /* convert from per-chain power to combined output on 2x2 devices */ - *dbm += 3; - - return 0; -} - -static void mt76x2_set_coverage_class(struct ieee80211_hw *hw, - s16 coverage_class) -{ - struct mt76x02_dev *dev = hw->priv; - - mutex_lock(&dev->mt76.mutex); - dev->coverage_class = coverage_class; - mt76x2_set_tx_ackto(dev); - mutex_unlock(&dev->mt76.mutex); -} - -static int mt76x2_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set) { return 0; @@ -264,21 +173,6 @@ static int mt76x2_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, return 0; } -static int -mt76x2_set_rts_threshold(struct ieee80211_hw *hw, u32 val) -{ - struct mt76x02_dev *dev = hw->priv; - - if (val != ~0 && val > 0xffff) - return -EINVAL; - - mutex_lock(&dev->mt76.mutex); - mt76x2_mac_set_tx_protection(dev, val); - mutex_unlock(&dev->mt76.mutex); - - return 0; -} - const struct ieee80211_ops mt76x2_ops = { .tx = mt76x02_tx, .start = mt76x2_start, @@ -287,24 +181,23 @@ const struct ieee80211_ops mt76x2_ops = { .remove_interface = mt76x02_remove_interface, .config = mt76x2_config, .configure_filter = mt76x02_configure_filter, - .bss_info_changed = mt76x2_bss_info_changed, - .sta_add = mt76x02_sta_add, - .sta_remove = mt76x02_sta_remove, + .bss_info_changed = mt76x02_bss_info_changed, + .sta_state = mt76_sta_state, .set_key = mt76x02_set_key, .conf_tx = mt76x02_conf_tx, - .sw_scan_start = mt76x2_sw_scan, - .sw_scan_complete = mt76x2_sw_scan_complete, + .sw_scan_start = mt76x02_sw_scan, + .sw_scan_complete = mt76x02_sw_scan_complete, .flush = mt76x2_flush, .ampdu_action = mt76x02_ampdu_action, - .get_txpower = mt76x2_get_txpower, + .get_txpower = mt76x02_get_txpower, .wake_tx_queue = mt76_wake_tx_queue, .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update, .release_buffered_frames = mt76_release_buffered_frames, - .set_coverage_class = mt76x2_set_coverage_class, + .set_coverage_class = mt76x02_set_coverage_class, .get_survey = mt76_get_survey, .set_tim = mt76x2_set_tim, .set_antenna = mt76x2_set_antenna, .get_antenna = mt76x2_get_antenna, - .set_rts_threshold = mt76x2_set_rts_threshold, + .set_rts_threshold = mt76x02_set_rts_threshold, }; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c index d8fa9ba56437..03e24ae7f66c 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_mcu.c @@ -168,7 +168,6 @@ error: int mt76x2_mcu_init(struct mt76x02_dev *dev) { static const struct mt76_mcu_ops mt76x2_mcu_ops = { - .mcu_msg_alloc = mt76x02_mcu_msg_alloc, .mcu_send_msg = mt76x02_mcu_msg_send, }; int ret; @@ -183,6 +182,6 @@ int mt76x2_mcu_init(struct mt76x02_dev *dev) if (ret) return ret; - mt76x02_mcu_function_select(dev, Q_SELECT, 1, true); + mt76x02_mcu_function_select(dev, Q_SELECT, 1); return 0; } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c index 5bda44540225..da7cd40f56ff 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_phy.c @@ -38,7 +38,7 @@ mt76x2_phy_tssi_init_cal(struct mt76x02_dev *dev) if (mt76x02_ext_pa_enabled(dev, chan->band)) flag |= BIT(8); - mt76x02_mcu_calibrate(dev, MCU_CAL_TSSI, flag, true); + mt76x02_mcu_calibrate(dev, MCU_CAL_TSSI, flag); dev->cal.tssi_cal_done = true; return true; } @@ -62,13 +62,13 @@ mt76x2_phy_channel_calibrate(struct mt76x02_dev *dev, bool mac_stopped) mt76x2_mac_stop(dev, false); if (is_5ghz) - mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 0, true); + mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 0); - mt76x02_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz, true); - mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz, true); - mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz, true); - mt76x02_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0, true); - mt76x02_mcu_calibrate(dev, MCU_CAL_TX_SHAPING, 0, true); + mt76x02_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz); + mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz); + mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz); + mt76x02_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0); + mt76x02_mcu_calibrate(dev, MCU_CAL_TX_SHAPING, 0); if (!mac_stopped) mt76x2_mac_resume(dev); @@ -124,96 +124,6 @@ void mt76x2_phy_set_antenna(struct mt76x02_dev *dev) mt76_wr(dev, MT_BBP(AGC, 0), val); } -static void -mt76x2_phy_set_gain_val(struct mt76x02_dev *dev) -{ - u32 val; - u8 gain_val[2]; - - gain_val[0] = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust; - gain_val[1] = dev->cal.agc_gain_cur[1] - dev->cal.agc_gain_adjust; - - if (dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40) - val = 0x1e42 << 16; - else - val = 0x1836 << 16; - - val |= 0xf8; - - mt76_wr(dev, MT_BBP(AGC, 8), - val | FIELD_PREP(MT_BBP_AGC_GAIN, gain_val[0])); - mt76_wr(dev, MT_BBP(AGC, 9), - val | FIELD_PREP(MT_BBP_AGC_GAIN, gain_val[1])); - - if (dev->mt76.chandef.chan->flags & IEEE80211_CHAN_RADAR) - mt76x2_dfs_adjust_agc(dev); -} - -static void -mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev) -{ - u8 *gain = dev->cal.agc_gain_init; - u8 low_gain_delta, gain_delta; - bool gain_change; - int low_gain; - u32 val; - - dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev); - - low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) + - (dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev)); - - gain_change = (dev->cal.low_gain & 2) ^ (low_gain & 2); - dev->cal.low_gain = low_gain; - - if (!gain_change) { - if (mt76x02_phy_adjust_vga_gain(dev)) - mt76x2_phy_set_gain_val(dev); - return; - } - - if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80) { - mt76_wr(dev, MT_BBP(RXO, 14), 0x00560211); - val = mt76_rr(dev, MT_BBP(AGC, 26)) & ~0xf; - if (low_gain == 2) - val |= 0x3; - else - val |= 0x5; - mt76_wr(dev, MT_BBP(AGC, 26), val); - } else { - mt76_wr(dev, MT_BBP(RXO, 14), 0x00560423); - } - - if (mt76x2_has_ext_lna(dev)) - low_gain_delta = 10; - else - low_gain_delta = 14; - - if (low_gain == 2) { - mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990); - mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808); - mt76_wr(dev, MT_BBP(AGC, 37), 0x08080808); - gain_delta = low_gain_delta; - dev->cal.agc_gain_adjust = 0; - } else { - mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991); - if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80) - mt76_wr(dev, MT_BBP(AGC, 35), 0x10101014); - else - mt76_wr(dev, MT_BBP(AGC, 35), 0x11111116); - mt76_wr(dev, MT_BBP(AGC, 37), 0x2121262C); - gain_delta = 0; - dev->cal.agc_gain_adjust = low_gain_delta; - } - - dev->cal.agc_gain_cur[0] = gain[0] - gain_delta; - dev->cal.agc_gain_cur[1] = gain[1] - gain_delta; - mt76x2_phy_set_gain_val(dev); - - /* clear false CCA counters */ - mt76_rr(dev, MT_RX_STAT_1); -} - int mt76x2_phy_set_channel(struct mt76x02_dev *dev, struct cfg80211_chan_def *chandef) { @@ -313,14 +223,14 @@ int mt76x2_phy_set_channel(struct mt76x02_dev *dev, u8 val = mt76x02_eeprom_get(dev, MT_EE_BT_RCAL_RESULT); if (val != 0xff) - mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, true); + mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0); } - mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel, true); + mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel); /* Rx LPF calibration */ if (!dev->cal.init_cal_done) - mt76x02_mcu_calibrate(dev, MCU_CAL_RC, 0, true); + mt76x02_mcu_calibrate(dev, MCU_CAL_RC, 0); dev->cal.init_cal_done = true; @@ -384,7 +294,7 @@ void mt76x2_phy_calibrate(struct work_struct *work) dev = container_of(work, struct mt76x02_dev, cal_work.work); mt76x2_phy_channel_calibrate(dev, false); - mt76x2_phy_tssi_compensate(dev, true); + mt76x2_phy_tssi_compensate(dev); mt76x2_phy_temp_compensate(dev); mt76x2_phy_update_channel_gain(dev); ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work, @@ -395,7 +305,7 @@ int mt76x2_phy_start(struct mt76x02_dev *dev) { int ret; - ret = mt76x02_mcu_set_radio_state(dev, true, true); + ret = mt76x02_mcu_set_radio_state(dev, true); if (ret) return ret; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_tx.c b/drivers/net/wireless/mediatek/mt76/mt76x2/pci_tx.c deleted file mode 100644 index 3a2ec86d3e88..000000000000 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/pci_tx.c +++ /dev/null @@ -1,142 +0,0 @@ -/* - * Copyright (C) 2016 Felix Fietkau <nbd@nbd.name> - * - * Permission to use, copy, modify, and/or distribute this software for any - * purpose with or without fee is hereby granted, provided that the above - * copyright notice and this permission notice appear in all copies. - * - * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES - * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR - * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES - * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN - * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF - * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - */ - -#include "mt76x2.h" - -struct beacon_bc_data { - struct mt76x02_dev *dev; - struct sk_buff_head q; - struct sk_buff *tail[8]; -}; - -static void -mt76x2_update_beacon_iter(void *priv, u8 *mac, struct ieee80211_vif *vif) -{ - struct mt76x02_dev *dev = (struct mt76x02_dev *) priv; - struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv; - struct sk_buff *skb = NULL; - - if (!(dev->beacon_mask & BIT(mvif->idx))) - return; - - skb = ieee80211_beacon_get(mt76_hw(dev), vif); - if (!skb) - return; - - mt76x2_mac_set_beacon(dev, mvif->idx, skb); -} - -static void -mt76x2_add_buffered_bc(void *priv, u8 *mac, struct ieee80211_vif *vif) -{ - struct beacon_bc_data *data = priv; - struct mt76x02_dev *dev = data->dev; - struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv; - struct ieee80211_tx_info *info; - struct sk_buff *skb; - - if (!(dev->beacon_mask & BIT(mvif->idx))) - return; - - skb = ieee80211_get_buffered_bc(mt76_hw(dev), vif); - if (!skb) - return; - - info = IEEE80211_SKB_CB(skb); - info->control.vif = vif; - info->flags |= IEEE80211_TX_CTL_ASSIGN_SEQ; - mt76_skb_set_moredata(skb, true); - __skb_queue_tail(&data->q, skb); - data->tail[mvif->idx] = skb; -} - -static void -mt76x2_resync_beacon_timer(struct mt76x02_dev *dev) -{ - u32 timer_val = dev->beacon_int << 4; - - dev->tbtt_count++; - - /* - * Beacon timer drifts by 1us every tick, the timer is configured - * in 1/16 TU (64us) units. - */ - if (dev->tbtt_count < 62) - return; - - if (dev->tbtt_count >= 64) { - dev->tbtt_count = 0; - return; - } - - /* - * The updated beacon interval takes effect after two TBTT, because - * at this point the original interval has already been loaded into - * the next TBTT_TIMER value - */ - if (dev->tbtt_count == 62) - timer_val -= 1; - - mt76_rmw_field(dev, MT_BEACON_TIME_CFG, - MT_BEACON_TIME_CFG_INTVAL, timer_val); -} - -void mt76x2_pre_tbtt_tasklet(unsigned long arg) -{ - struct mt76x02_dev *dev = (struct mt76x02_dev *) arg; - struct mt76_queue *q = &dev->mt76.q_tx[MT_TXQ_PSD]; - struct beacon_bc_data data = {}; - struct sk_buff *skb; - int i, nframes; - - mt76x2_resync_beacon_timer(dev); - - data.dev = dev; - __skb_queue_head_init(&data.q); - - ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), - IEEE80211_IFACE_ITER_RESUME_ALL, - mt76x2_update_beacon_iter, dev); - - do { - nframes = skb_queue_len(&data.q); - ieee80211_iterate_active_interfaces_atomic(mt76_hw(dev), - IEEE80211_IFACE_ITER_RESUME_ALL, - mt76x2_add_buffered_bc, &data); - } while (nframes != skb_queue_len(&data.q)); - - if (!nframes) - return; - - for (i = 0; i < ARRAY_SIZE(data.tail); i++) { - if (!data.tail[i]) - continue; - - mt76_skb_set_moredata(data.tail[i], false); - } - - spin_lock_bh(&q->lock); - while ((skb = __skb_dequeue(&data.q)) != NULL) { - struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); - struct ieee80211_vif *vif = info->control.vif; - struct mt76x02_vif *mvif = (struct mt76x02_vif *) vif->drv_priv; - - mt76_dma_tx_queue_skb(&dev->mt76, q, skb, &mvif->group_wcid, - NULL); - } - spin_unlock_bh(&q->lock); -} - diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c index e9fff5b7f125..c9634a774705 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/phy.c @@ -210,7 +210,7 @@ void mt76x2_configure_tx_delay(struct mt76x02_dev *dev, } EXPORT_SYMBOL_GPL(mt76x2_configure_tx_delay); -void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev, bool wait) +void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev) { struct ieee80211_channel *chan = dev->mt76.chandef.chan; struct mt76x2_tx_power_info txp; @@ -245,8 +245,99 @@ void mt76x2_phy_tssi_compensate(struct mt76x02_dev *dev, bool wait) return; usleep_range(10000, 20000); - mt76x02_mcu_calibrate(dev, MCU_CAL_DPD, chan->hw_value, wait); + mt76x02_mcu_calibrate(dev, MCU_CAL_DPD, chan->hw_value); dev->cal.dpd_cal_done = true; } } EXPORT_SYMBOL_GPL(mt76x2_phy_tssi_compensate); + +static void +mt76x2_phy_set_gain_val(struct mt76x02_dev *dev) +{ + u32 val; + u8 gain_val[2]; + + gain_val[0] = dev->cal.agc_gain_cur[0] - dev->cal.agc_gain_adjust; + gain_val[1] = dev->cal.agc_gain_cur[1] - dev->cal.agc_gain_adjust; + + if (dev->mt76.chandef.width >= NL80211_CHAN_WIDTH_40) + val = 0x1e42 << 16; + else + val = 0x1836 << 16; + + val |= 0xf8; + + mt76_wr(dev, MT_BBP(AGC, 8), + val | FIELD_PREP(MT_BBP_AGC_GAIN, gain_val[0])); + mt76_wr(dev, MT_BBP(AGC, 9), + val | FIELD_PREP(MT_BBP_AGC_GAIN, gain_val[1])); + + if (dev->mt76.chandef.chan->flags & IEEE80211_CHAN_RADAR) + mt76x02_phy_dfs_adjust_agc(dev); +} + +void mt76x2_phy_update_channel_gain(struct mt76x02_dev *dev) +{ + u8 *gain = dev->cal.agc_gain_init; + u8 low_gain_delta, gain_delta; + bool gain_change; + int low_gain; + u32 val; + + dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev); + + low_gain = (dev->cal.avg_rssi_all > mt76x02_get_rssi_gain_thresh(dev)) + + (dev->cal.avg_rssi_all > mt76x02_get_low_rssi_gain_thresh(dev)); + + gain_change = dev->cal.low_gain < 0 || + (dev->cal.low_gain & 2) ^ (low_gain & 2); + dev->cal.low_gain = low_gain; + + if (!gain_change) { + if (mt76x02_phy_adjust_vga_gain(dev)) + mt76x2_phy_set_gain_val(dev); + return; + } + + if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80) { + mt76_wr(dev, MT_BBP(RXO, 14), 0x00560211); + val = mt76_rr(dev, MT_BBP(AGC, 26)) & ~0xf; + if (low_gain == 2) + val |= 0x3; + else + val |= 0x5; + mt76_wr(dev, MT_BBP(AGC, 26), val); + } else { + mt76_wr(dev, MT_BBP(RXO, 14), 0x00560423); + } + + if (mt76x2_has_ext_lna(dev)) + low_gain_delta = 10; + else + low_gain_delta = 14; + + if (low_gain == 2) { + mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a990); + mt76_wr(dev, MT_BBP(AGC, 35), 0x08080808); + mt76_wr(dev, MT_BBP(AGC, 37), 0x08080808); + gain_delta = low_gain_delta; + dev->cal.agc_gain_adjust = 0; + } else { + mt76_wr(dev, MT_BBP(RXO, 18), 0xf000a991); + if (dev->mt76.chandef.width == NL80211_CHAN_WIDTH_80) + mt76_wr(dev, MT_BBP(AGC, 35), 0x10101014); + else + mt76_wr(dev, MT_BBP(AGC, 35), 0x11111116); + mt76_wr(dev, MT_BBP(AGC, 37), 0x2121262C); + gain_delta = 0; + dev->cal.agc_gain_adjust = low_gain_delta; + } + + dev->cal.agc_gain_cur[0] = gain[0] - gain_delta; + dev->cal.agc_gain_cur[1] = gain[1] - gain_delta; + mt76x2_phy_set_gain_val(dev); + + /* clear false CCA counters */ + mt76_rr(dev, MT_RX_STAT_1); +} +EXPORT_SYMBOL_GPL(mt76x2_phy_update_channel_gain); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c index 57baf8d1c830..4d1788eb3812 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb.c @@ -131,8 +131,8 @@ err: } MODULE_DEVICE_TABLE(usb, mt76x2u_device_table); -MODULE_FIRMWARE(MT7662U_FIRMWARE); -MODULE_FIRMWARE(MT7662U_ROM_PATCH); +MODULE_FIRMWARE(MT7662_FIRMWARE); +MODULE_FIRMWARE(MT7662_ROM_PATCH); static struct usb_driver mt76x2u_driver = { .name = KBUILD_MODNAME, diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c index 13cce2937573..0be3784f44fb 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_init.c @@ -141,6 +141,8 @@ struct mt76x02_dev *mt76x2u_alloc_device(struct device *pdev) .tx_complete_skb = mt76x02u_tx_complete_skb, .tx_status_data = mt76x02_tx_status_data, .rx_skb = mt76x02_queue_rx_skb, + .sta_add = mt76x02_sta_add, + .sta_remove = mt76x02_sta_remove, }; struct mt76x02_dev *dev; struct mt76_dev *mdev; @@ -156,21 +158,9 @@ struct mt76x02_dev *mt76x2u_alloc_device(struct device *pdev) return dev; } -static void mt76x2u_init_beacon_offsets(struct mt76x02_dev *dev) -{ - mt76_wr(dev, MT_BCN_OFFSET(0), 0x18100800); - mt76_wr(dev, MT_BCN_OFFSET(1), 0x38302820); - mt76_wr(dev, MT_BCN_OFFSET(2), 0x58504840); - mt76_wr(dev, MT_BCN_OFFSET(3), 0x78706860); -} - int mt76x2u_init_hardware(struct mt76x02_dev *dev) { - const struct mt76_wcid_addr addr = { - .macaddr = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}, - .ba_mask = 0, - }; - int i, err; + int i, k, err; mt76x2_reset_wlan(dev, true); mt76x2u_power_on(dev); @@ -191,9 +181,6 @@ int mt76x2u_init_hardware(struct mt76x02_dev *dev) if (!mt76x02_wait_for_mac(&dev->mt76)) return -ETIMEDOUT; - mt76_wr(dev, MT_HEADER_TRANS_CTRL_REG, 0); - mt76_wr(dev, MT_TSO_CTRL, 0); - mt76x2u_init_dma(dev); err = mt76x2u_mcu_init(dev); @@ -207,21 +194,18 @@ int mt76x2u_init_hardware(struct mt76x02_dev *dev) mt76x02_mac_setaddr(dev, dev->mt76.eeprom.data + MT_EE_MAC_ADDR); dev->mt76.rxfilter = mt76_rr(dev, MT_RX_FILTR_CFG); - mt76x2u_init_beacon_offsets(dev); - if (!mt76x02_wait_for_txrx_idle(&dev->mt76)) return -ETIMEDOUT; /* reset wcid table */ - for (i = 0; i < 254; i++) - mt76_wr_copy(dev, MT_WCID_ADDR(i), &addr, - sizeof(struct mt76_wcid_addr)); + for (i = 0; i < 256; i++) + mt76x02_mac_wcid_setup(dev, i, 0, NULL); /* reset shared key table and pairwise key table */ - for (i = 0; i < 4; i++) - mt76_wr(dev, MT_SKEY_MODE_BASE_0 + 4 * i, 0); - for (i = 0; i < 256; i++) - mt76_wr(dev, MT_WCID_ATTR(i), 1); + for (i = 0; i < 16; i++) { + for (k = 0; k < 4; k++) + mt76x02_mac_shared_key_setup(dev, i, k, NULL); + } mt76_clear(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_TIMER_EN | @@ -245,11 +229,10 @@ int mt76x2u_init_hardware(struct mt76x02_dev *dev) int mt76x2u_register_device(struct mt76x02_dev *dev) { struct ieee80211_hw *hw = mt76_hw(dev); - struct wiphy *wiphy = hw->wiphy; int err; INIT_DELAYED_WORK(&dev->cal_work, mt76x2u_phy_calibrate); - mt76x2_init_device(dev); + mt76x02_init_device(dev); err = mt76x2u_init_eeprom(dev); if (err < 0) @@ -267,8 +250,6 @@ int mt76x2u_register_device(struct mt76x02_dev *dev) if (err < 0) goto fail; - wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); - err = mt76_register_device(&dev->mt76, true, mt76x02_rates, ARRAY_SIZE(mt76x02_rates)); if (err) @@ -282,7 +263,7 @@ int mt76x2u_register_device(struct mt76x02_dev *dev) set_bit(MT76_STATE_INITIALIZED, &dev->mt76.state); - mt76x2_init_debugfs(dev); + mt76x02_init_debugfs(dev); mt76x2_init_txpower(dev, &dev->mt76.sband_2g.sband); mt76x2_init_txpower(dev, &dev->mt76.sband_5g.sband); @@ -297,12 +278,13 @@ void mt76x2u_stop_hw(struct mt76x02_dev *dev) { mt76u_stop_stat_wk(&dev->mt76); cancel_delayed_work_sync(&dev->cal_work); + cancel_delayed_work_sync(&dev->mac_work); mt76x2u_mac_stop(dev); } void mt76x2u_cleanup(struct mt76x02_dev *dev) { - mt76x02_mcu_set_radio_state(dev, false, false); + mt76x02_mcu_set_radio_state(dev, false); mt76x2u_stop_hw(dev); mt76u_queues_deinit(&dev->mt76); mt76u_mcu_deinit(&dev->mt76); diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c index 1971a1b00038..2b48cc51a30d 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_main.c @@ -27,6 +27,8 @@ static int mt76x2u_start(struct ieee80211_hw *hw) if (ret) goto out; + ieee80211_queue_delayed_work(mt76_hw(dev), &dev->mac_work, + MT_CALIBRATE_INTERVAL); set_bit(MT76_STATE_RUNNING, &dev->mt76.state); out: @@ -48,11 +50,12 @@ static int mt76x2u_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct mt76x02_dev *dev = hw->priv; + unsigned int idx = 8; if (!ether_addr_equal(dev->mt76.macaddr, vif->addr)) mt76x02_mac_setaddr(dev, vif->addr); - mt76x02_vif_init(dev, vif, 0); + mt76x02_vif_init(dev, vif, idx); return 0; } @@ -81,29 +84,6 @@ mt76x2u_set_channel(struct mt76x02_dev *dev, return err; } -static void -mt76x2u_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - struct ieee80211_bss_conf *info, u32 changed) -{ - struct mt76x02_dev *dev = hw->priv; - - mutex_lock(&dev->mt76.mutex); - - if (changed & BSS_CHANGED_ASSOC) { - mt76x2u_phy_channel_calibrate(dev); - mt76x2_apply_gain_adj(dev); - } - - if (changed & BSS_CHANGED_BSSID) { - mt76_wr(dev, MT_MAC_BSSID_DW0, - get_unaligned_le32(info->bssid)); - mt76_wr(dev, MT_MAC_BSSID_DW1, - get_unaligned_le16(info->bssid + 4)); - } - - mutex_unlock(&dev->mt76.mutex); -} - static int mt76x2u_config(struct ieee80211_hw *hw, u32 changed) { @@ -141,39 +121,22 @@ mt76x2u_config(struct ieee80211_hw *hw, u32 changed) return err; } -static void -mt76x2u_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, - const u8 *mac) -{ - struct mt76x02_dev *dev = hw->priv; - - set_bit(MT76_SCANNING, &dev->mt76.state); -} - -static void -mt76x2u_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif) -{ - struct mt76x02_dev *dev = hw->priv; - - clear_bit(MT76_SCANNING, &dev->mt76.state); -} - const struct ieee80211_ops mt76x2u_ops = { .tx = mt76x02_tx, .start = mt76x2u_start, .stop = mt76x2u_stop, .add_interface = mt76x2u_add_interface, .remove_interface = mt76x02_remove_interface, - .sta_add = mt76x02_sta_add, - .sta_remove = mt76x02_sta_remove, + .sta_state = mt76_sta_state, .set_key = mt76x02_set_key, .ampdu_action = mt76x02_ampdu_action, .config = mt76x2u_config, .wake_tx_queue = mt76_wake_tx_queue, - .bss_info_changed = mt76x2u_bss_info_changed, + .bss_info_changed = mt76x02_bss_info_changed, .configure_filter = mt76x02_configure_filter, .conf_tx = mt76x02_conf_tx, - .sw_scan_start = mt76x2u_sw_scan, - .sw_scan_complete = mt76x2u_sw_scan_complete, + .sw_scan_start = mt76x02_sw_scan, + .sw_scan_complete = mt76x02_sw_scan_complete, .sta_rate_tbl_update = mt76x02_sta_rate_tbl_update, + .get_txpower = mt76x02_get_txpower, }; diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c index 3f1e558e5e6d..45a95ee3a415 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_mcu.c @@ -29,30 +29,6 @@ #define MT76U_MCU_DLM_OFFSET 0x110000 #define MT76U_MCU_ROM_PATCH_OFFSET 0x90000 -int mt76x2u_mcu_set_dynamic_vga(struct mt76x02_dev *dev, u8 channel, bool ap, - bool ext, int rssi, u32 false_cca) -{ - struct { - __le32 channel; - __le32 rssi_val; - __le32 false_cca_val; - } __packed __aligned(4) msg = { - .rssi_val = cpu_to_le32(rssi), - .false_cca_val = cpu_to_le32(false_cca), - }; - struct sk_buff *skb; - u32 val = channel; - - if (ap) - val |= BIT(31); - if (ext) - val |= BIT(30); - msg.channel = cpu_to_le32(val); - - skb = mt76_mcu_msg_alloc(dev, &msg, sizeof(msg)); - return mt76_mcu_send_msg(dev, skb, CMD_DYNC_VGA_OP, true); -} - static void mt76x2u_mcu_load_ivb(struct mt76x02_dev *dev) { mt76u_vendor_request(&dev->mt76, MT_VEND_DEV_MODE, @@ -117,7 +93,7 @@ static int mt76x2u_mcu_load_rom_patch(struct mt76x02_dev *dev) return 0; } - err = request_firmware(&fw, MT7662U_ROM_PATCH, dev->mt76.dev); + err = request_firmware(&fw, MT7662_ROM_PATCH, dev->mt76.dev); if (err < 0) return err; @@ -183,7 +159,7 @@ static int mt76x2u_mcu_load_firmware(struct mt76x02_dev *dev) int err, len, ilm_len, dlm_len; const struct firmware *fw; - err = request_firmware(&fw, MT7662U_FIRMWARE, dev->mt76.dev); + err = request_firmware(&fw, MT7662_FIRMWARE, dev->mt76.dev); if (err < 0) return err; @@ -282,9 +258,9 @@ int mt76x2u_mcu_init(struct mt76x02_dev *dev) { int err; - err = mt76x02_mcu_function_select(dev, Q_SELECT, 1, false); + err = mt76x02_mcu_function_select(dev, Q_SELECT, 1); if (err < 0) return err; - return mt76x02_mcu_set_radio_state(dev, true, false); + return mt76x02_mcu_set_radio_state(dev, true); } diff --git a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c index ca96ba60510e..11d414d86c68 100644 --- a/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c +++ b/drivers/net/wireless/mediatek/mt76/mt76x2/usb_phy.c @@ -18,63 +18,35 @@ #include "eeprom.h" #include "../mt76x02_phy.h" -void mt76x2u_phy_channel_calibrate(struct mt76x02_dev *dev) +static void +mt76x2u_phy_channel_calibrate(struct mt76x02_dev *dev, bool mac_stopped) { struct ieee80211_channel *chan = dev->mt76.chandef.chan; bool is_5ghz = chan->band == NL80211_BAND_5GHZ; + if (dev->cal.channel_cal_done) + return; + if (mt76x2_channel_silent(dev)) return; - mt76x2u_mac_stop(dev); + if (!mac_stopped) + mt76x2u_mac_stop(dev); if (is_5ghz) - mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 0, false); - - mt76x02_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz, false); - mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz, false); - mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz, false); - mt76x02_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0, false); - - mt76x2u_mac_resume(dev); -} + mt76x02_mcu_calibrate(dev, MCU_CAL_LC, 0); -static void -mt76x2u_phy_update_channel_gain(struct mt76x02_dev *dev) -{ - u8 channel = dev->mt76.chandef.chan->hw_value; - int freq, freq1; - u32 false_cca; - - freq = dev->mt76.chandef.chan->center_freq; - freq1 = dev->mt76.chandef.center_freq1; + mt76x02_mcu_calibrate(dev, MCU_CAL_TX_LOFT, is_5ghz); + mt76x02_mcu_calibrate(dev, MCU_CAL_TXIQ, is_5ghz); + mt76x02_mcu_calibrate(dev, MCU_CAL_RXIQC_FI, is_5ghz); + mt76x02_mcu_calibrate(dev, MCU_CAL_TEMP_SENSOR, 0); + mt76x02_mcu_calibrate(dev, MCU_CAL_TX_SHAPING, 0); - switch (dev->mt76.chandef.width) { - case NL80211_CHAN_WIDTH_80: { - int ch_group_index; + if (!mac_stopped) + mt76x2u_mac_resume(dev); + mt76x2_apply_gain_adj(dev); - ch_group_index = (freq - freq1 + 30) / 20; - if (WARN_ON(ch_group_index < 0 || ch_group_index > 3)) - ch_group_index = 0; - channel += 6 - ch_group_index * 4; - break; - } - case NL80211_CHAN_WIDTH_40: - if (freq1 > freq) - channel += 2; - else - channel -= 2; - break; - default: - break; - } - - dev->cal.avg_rssi_all = mt76x02_phy_get_min_avg_rssi(dev); - false_cca = FIELD_GET(MT_RX_STAT_1_CCA_ERRORS, - mt76_rr(dev, MT_RX_STAT_1)); - - mt76x2u_mcu_set_dynamic_vga(dev, channel, false, false, - dev->cal.avg_rssi_all, false_cca); + dev->cal.channel_cal_done = true; } void mt76x2u_phy_calibrate(struct work_struct *work) @@ -82,8 +54,9 @@ void mt76x2u_phy_calibrate(struct work_struct *work) struct mt76x02_dev *dev; dev = container_of(work, struct mt76x02_dev, cal_work.work); - mt76x2_phy_tssi_compensate(dev, false); - mt76x2u_phy_update_channel_gain(dev); + mt76x2u_phy_channel_calibrate(dev, false); + mt76x2_phy_tssi_compensate(dev); + mt76x2_phy_update_channel_gain(dev); ieee80211_queue_delayed_work(mt76_hw(dev), &dev->cal_work, MT_CALIBRATE_INTERVAL); @@ -180,14 +153,14 @@ int mt76x2u_phy_set_channel(struct mt76x02_dev *dev, u8 val = mt76x02_eeprom_get(dev, MT_EE_BT_RCAL_RESULT); if (val != 0xff) - mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0, false); + mt76x02_mcu_calibrate(dev, MCU_CAL_R, 0); } - mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel, false); + mt76x02_mcu_calibrate(dev, MCU_CAL_RXDCOC, channel); /* Rx LPF calibration */ if (!dev->cal.init_cal_done) - mt76x02_mcu_calibrate(dev, MCU_CAL_RC, 0, false); + mt76x02_mcu_calibrate(dev, MCU_CAL_RC, 0); dev->cal.init_cal_done = true; mt76_wr(dev, MT_BBP(AGC, 61), 0xff64a4e2); @@ -202,6 +175,9 @@ int mt76x2u_phy_set_channel(struct mt76x02_dev *dev, if (scan) return 0; + mt76x2u_phy_channel_calibrate(dev, true); + mt76x02_init_agc_gain(dev); + if (mt76x2_tssi_enabled(dev)) { /* init default values for temp compensation */ mt76_rmw_field(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP, @@ -219,7 +195,7 @@ int mt76x2u_phy_set_channel(struct mt76x02_dev *dev, flag |= BIT(0); if (mt76x02_ext_pa_enabled(dev, chan->band)) flag |= BIT(8); - mt76x02_mcu_calibrate(dev, MCU_CAL_TSSI, flag, false); + mt76x02_mcu_calibrate(dev, MCU_CAL_TSSI, flag); dev->cal.tssi_cal_done = true; } } diff --git a/drivers/net/wireless/mediatek/mt76/tx.c b/drivers/net/wireless/mediatek/mt76/tx.c index aa426b838ffa..7b711058807d 100644 --- a/drivers/net/wireless/mediatek/mt76/tx.c +++ b/drivers/net/wireless/mediatek/mt76/tx.c @@ -104,6 +104,157 @@ mt76_check_agg_ssn(struct mt76_txq *mtxq, struct sk_buff *skb) } void +mt76_tx_status_lock(struct mt76_dev *dev, struct sk_buff_head *list) + __acquires(&dev->status_list.lock) +{ + __skb_queue_head_init(list); + spin_lock_bh(&dev->status_list.lock); + __acquire(&dev->status_list.lock); +} +EXPORT_SYMBOL_GPL(mt76_tx_status_lock); + +void +mt76_tx_status_unlock(struct mt76_dev *dev, struct sk_buff_head *list) + __releases(&dev->status_list.unlock) +{ + struct sk_buff *skb; + + spin_unlock_bh(&dev->status_list.lock); + __release(&dev->status_list.unlock); + + while ((skb = __skb_dequeue(list)) != NULL) + ieee80211_tx_status(dev->hw, skb); +} +EXPORT_SYMBOL_GPL(mt76_tx_status_unlock); + +static void +__mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, u8 flags, + struct sk_buff_head *list) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); + u8 done = MT_TX_CB_DMA_DONE | MT_TX_CB_TXS_DONE; + + flags |= cb->flags; + cb->flags = flags; + + if ((flags & done) != done) + return; + + __skb_unlink(skb, &dev->status_list); + + /* Tx status can be unreliable. if it fails, mark the frame as ACKed */ + if (flags & MT_TX_CB_TXS_FAILED) { + ieee80211_tx_info_clear_status(info); + info->status.rates[0].idx = -1; + info->flags |= IEEE80211_TX_STAT_ACK; + } + + __skb_queue_tail(list, skb); +} + +void +mt76_tx_status_skb_done(struct mt76_dev *dev, struct sk_buff *skb, + struct sk_buff_head *list) +{ + __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_DONE, list); +} +EXPORT_SYMBOL_GPL(mt76_tx_status_skb_done); + +int +mt76_tx_status_skb_add(struct mt76_dev *dev, struct mt76_wcid *wcid, + struct sk_buff *skb) +{ + struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); + struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); + int pid; + + if (!wcid) + return 0; + + if (info->flags & IEEE80211_TX_CTL_NO_ACK) + return MT_PACKET_ID_NO_ACK; + + if (!(info->flags & (IEEE80211_TX_CTL_REQ_TX_STATUS | + IEEE80211_TX_CTL_RATE_CTRL_PROBE))) + return 0; + + spin_lock_bh(&dev->status_list.lock); + + memset(cb, 0, sizeof(*cb)); + wcid->packet_id = (wcid->packet_id + 1) & MT_PACKET_ID_MASK; + if (!wcid->packet_id || wcid->packet_id == MT_PACKET_ID_NO_ACK) + wcid->packet_id = 1; + + pid = wcid->packet_id; + cb->wcid = wcid->idx; + cb->pktid = pid; + cb->jiffies = jiffies; + + __skb_queue_tail(&dev->status_list, skb); + spin_unlock_bh(&dev->status_list.lock); + + return pid; +} +EXPORT_SYMBOL_GPL(mt76_tx_status_skb_add); + +struct sk_buff * +mt76_tx_status_skb_get(struct mt76_dev *dev, struct mt76_wcid *wcid, int pktid, + struct sk_buff_head *list) +{ + struct sk_buff *skb, *tmp; + + if (pktid == MT_PACKET_ID_NO_ACK) + return NULL; + + skb_queue_walk_safe(&dev->status_list, skb, tmp) { + struct mt76_tx_cb *cb = mt76_tx_skb_cb(skb); + + if (wcid && cb->wcid != wcid->idx) + continue; + + if (cb->pktid == pktid) + return skb; + + if (!pktid && + !time_after(jiffies, cb->jiffies + MT_TX_STATUS_SKB_TIMEOUT)) + continue; + + __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_TXS_FAILED | + MT_TX_CB_TXS_DONE, list); + } + + return NULL; +} +EXPORT_SYMBOL_GPL(mt76_tx_status_skb_get); + +void +mt76_tx_status_check(struct mt76_dev *dev, struct mt76_wcid *wcid, bool flush) +{ + struct sk_buff_head list; + + mt76_tx_status_lock(dev, &list); + mt76_tx_status_skb_get(dev, wcid, flush ? -1 : 0, &list); + mt76_tx_status_unlock(dev, &list); +} +EXPORT_SYMBOL_GPL(mt76_tx_status_check); + +void mt76_tx_complete_skb(struct mt76_dev *dev, struct sk_buff *skb) +{ + struct sk_buff_head list; + + if (!skb->prev) { + ieee80211_free_txskb(dev->hw, skb); + return; + } + + mt76_tx_status_lock(dev, &list); + __mt76_tx_status_skb_done(dev, skb, MT_TX_CB_DMA_DONE, &list); + mt76_tx_status_unlock(dev, &list); +} +EXPORT_SYMBOL_GPL(mt76_tx_complete_skb); + +void mt76_tx(struct mt76_dev *dev, struct ieee80211_sta *sta, struct mt76_wcid *wcid, struct sk_buff *skb) { @@ -444,7 +595,7 @@ void mt76_txq_remove(struct mt76_dev *dev, struct ieee80211_txq *txq) spin_lock_bh(&hwq->lock); if (!list_empty(&mtxq->list)) - list_del(&mtxq->list); + list_del_init(&mtxq->list); spin_unlock_bh(&hwq->lock); while ((skb = skb_dequeue(&mtxq->retry_q)) != NULL) diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c index 5f0faf07c346..b061263453d4 100644 --- a/drivers/net/wireless/mediatek/mt76/usb.c +++ b/drivers/net/wireless/mediatek/mt76/usb.c @@ -100,7 +100,7 @@ static u32 __mt76u_rr(struct mt76_dev *dev, u32 addr) return data; } -u32 mt76u_rr(struct mt76_dev *dev, u32 addr) +static u32 mt76u_rr(struct mt76_dev *dev, u32 addr) { u32 ret; @@ -110,7 +110,6 @@ u32 mt76u_rr(struct mt76_dev *dev, u32 addr) return ret; } -EXPORT_SYMBOL_GPL(mt76u_rr); /* should be called with usb_ctrl_mtx locked */ static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val) @@ -136,13 +135,12 @@ static void __mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val) trace_usb_reg_wr(dev, addr, val); } -void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val) +static void mt76u_wr(struct mt76_dev *dev, u32 addr, u32 val) { mutex_lock(&dev->usb.usb_ctrl_mtx); __mt76u_wr(dev, addr, val); mutex_unlock(&dev->usb.usb_ctrl_mtx); } -EXPORT_SYMBOL_GPL(mt76u_wr); static u32 mt76u_rmw(struct mt76_dev *dev, u32 addr, u32 mask, u32 val) @@ -356,6 +354,7 @@ int mt76u_submit_buf(struct mt76_dev *dev, int dir, int index, usb_fill_bulk_urb(buf->urb, udev, pipe, NULL, buf->len, complete_fn, context); + trace_submit_urb(dev, buf->urb); return usb_submit_urb(buf->urb, gfp); } @@ -442,6 +441,8 @@ static void mt76u_complete_rx(struct urb *urb) struct mt76_queue *q = &dev->q_rx[MT_RXQ_MAIN]; unsigned long flags; + trace_rx_urb(dev, urb); + switch (urb->status) { case -ECONNRESET: case -ESHUTDOWN: @@ -699,6 +700,7 @@ mt76u_tx_queue_skb(struct mt76_dev *dev, struct mt76_queue *q, if (q->queued == q->ndesc) return -ENOSPC; + skb->prev = skb->next = NULL; err = dev->drv->tx_prepare_skb(dev, NULL, skb, q, wcid, sta, NULL); if (err < 0) return err; @@ -728,6 +730,8 @@ static void mt76u_tx_kick(struct mt76_dev *dev, struct mt76_queue *q) while (q->first != q->tail) { buf = &q->entry[q->first].ubuf; + + trace_submit_urb(dev, buf->urb); err = usb_submit_urb(buf->urb, GFP_ATOMIC); if (err < 0) { if (err == -ENODEV) diff --git a/drivers/net/wireless/mediatek/mt76/usb_trace.h b/drivers/net/wireless/mediatek/mt76/usb_trace.h index 52db7012304a..b56c32343eb1 100644 --- a/drivers/net/wireless/mediatek/mt76/usb_trace.h +++ b/drivers/net/wireless/mediatek/mt76/usb_trace.h @@ -26,12 +26,12 @@ #define MAXNAME 32 #define DEV_ENTRY __array(char, wiphy_name, 32) #define DEV_ASSIGN strlcpy(__entry->wiphy_name, wiphy_name(dev->hw->wiphy), MAXNAME) -#define DEV_PR_FMT "%s" +#define DEV_PR_FMT "%s " #define DEV_PR_ARG __entry->wiphy_name #define REG_ENTRY __field(u32, reg) __field(u32, val) #define REG_ASSIGN __entry->reg = reg; __entry->val = val -#define REG_PR_FMT " %04x=%08x" +#define REG_PR_FMT "reg:0x%04x=0x%08x" #define REG_PR_ARG __entry->reg, __entry->val DECLARE_EVENT_CLASS(dev_reg_evt, @@ -61,6 +61,31 @@ DEFINE_EVENT(dev_reg_evt, usb_reg_wr, TP_ARGS(dev, reg, val) ); +DECLARE_EVENT_CLASS(urb_transfer, + TP_PROTO(struct mt76_dev *dev, struct urb *u), + TP_ARGS(dev, u), + TP_STRUCT__entry( + DEV_ENTRY __field(unsigned, pipe) __field(u32, len) + ), + TP_fast_assign( + DEV_ASSIGN; + __entry->pipe = u->pipe; + __entry->len = u->transfer_buffer_length; + ), + TP_printk(DEV_PR_FMT "p:%08x len:%u", + DEV_PR_ARG, __entry->pipe, __entry->len) +); + +DEFINE_EVENT(urb_transfer, submit_urb, + TP_PROTO(struct mt76_dev *dev, struct urb *u), + TP_ARGS(dev, u) +); + +DEFINE_EVENT(urb_transfer, rx_urb, + TP_PROTO(struct mt76_dev *dev, struct urb *u), + TP_ARGS(dev, u) +); + #endif #undef TRACE_INCLUDE_PATH diff --git a/drivers/net/wireless/quantenna/qtnfmac/Kconfig b/drivers/net/wireless/quantenna/qtnfmac/Kconfig index b8c12a5f16b4..6cf5202c3666 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/Kconfig +++ b/drivers/net/wireless/quantenna/qtnfmac/Kconfig @@ -1,11 +1,11 @@ config QTNFMAC tristate - depends on QTNFMAC_PEARL_PCIE - default m if QTNFMAC_PEARL_PCIE=m - default y if QTNFMAC_PEARL_PCIE=y + depends on QTNFMAC_PCIE + default m if QTNFMAC_PCIE=m + default y if QTNFMAC_PCIE=y -config QTNFMAC_PEARL_PCIE - tristate "Quantenna QSR10g PCIe support" +config QTNFMAC_PCIE + tristate "Quantenna QSR1000/QSR2000/QSR10g PCIe support" default n depends on PCI && CFG80211 select QTNFMAC @@ -13,7 +13,8 @@ config QTNFMAC_PEARL_PCIE select CRC32 help This option adds support for wireless adapters based on Quantenna - 802.11ac QSR10g (aka Pearl) FullMAC chipset running over PCIe. + 802.11ac QSR10g (aka Pearl) and QSR1000/QSR2000 (aka Topaz) + FullMAC chipsets running over PCIe. If you choose to build it as a module, two modules will be built: - qtnfmac.ko and qtnfmac_pearl_pcie.ko. + qtnfmac.ko and qtnfmac_pcie.ko. diff --git a/drivers/net/wireless/quantenna/qtnfmac/Makefile b/drivers/net/wireless/quantenna/qtnfmac/Makefile index 17cd7adb4109..40dffbd2ea47 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/Makefile +++ b/drivers/net/wireless/quantenna/qtnfmac/Makefile @@ -19,11 +19,12 @@ qtnfmac-objs += \ # -obj-$(CONFIG_QTNFMAC_PEARL_PCIE) += qtnfmac_pearl_pcie.o +obj-$(CONFIG_QTNFMAC_PCIE) += qtnfmac_pcie.o -qtnfmac_pearl_pcie-objs += \ +qtnfmac_pcie-objs += \ shm_ipc.o \ pcie/pcie.o \ - pcie/pearl_pcie.o + pcie/pearl_pcie.o \ + pcie/topaz_pcie.o -qtnfmac_pearl_pcie-$(CONFIG_DEBUG_FS) += debug.o +qtnfmac_pcie-$(CONFIG_DEBUG_FS) += debug.o diff --git a/drivers/net/wireless/quantenna/qtnfmac/commands.c b/drivers/net/wireless/quantenna/qtnfmac/commands.c index bfdc1ad30c13..659e7649fe22 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/commands.c +++ b/drivers/net/wireless/quantenna/qtnfmac/commands.c @@ -84,7 +84,7 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus, size_t *var_resp_size) { struct qlink_cmd *cmd; - const struct qlink_resp *resp; + struct qlink_resp *resp = NULL; struct sk_buff *resp_skb = NULL; u16 cmd_id; u8 mac_id; @@ -113,7 +113,12 @@ static int qtnf_cmd_send_with_reply(struct qtnf_bus *bus, if (ret) goto out; - resp = (const struct qlink_resp *)resp_skb->data; + if (WARN_ON(!resp_skb || !resp_skb->data)) { + ret = -EFAULT; + goto out; + } + + resp = (struct qlink_resp *)resp_skb->data; ret = qtnf_cmd_check_reply_header(resp, cmd_id, mac_id, vif_id, const_resp_size); if (ret) @@ -686,7 +691,7 @@ int qtnf_cmd_get_sta_info(struct qtnf_vif *vif, const u8 *sta_mac, struct sk_buff *cmd_skb, *resp_skb = NULL; struct qlink_cmd_get_sta_info *cmd; const struct qlink_resp_get_sta_info *resp; - size_t var_resp_len; + size_t var_resp_len = 0; int ret = 0; cmd_skb = qtnf_cmd_alloc_new_cmdskb(vif->mac->macid, vif->vifid, @@ -1650,7 +1655,7 @@ int qtnf_cmd_get_mac_info(struct qtnf_wmac *mac) { struct sk_buff *cmd_skb, *resp_skb = NULL; const struct qlink_resp_get_mac_info *resp; - size_t var_data_len; + size_t var_data_len = 0; int ret = 0; cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD, @@ -1680,8 +1685,8 @@ int qtnf_cmd_get_hw_info(struct qtnf_bus *bus) { struct sk_buff *cmd_skb, *resp_skb = NULL; const struct qlink_resp_get_hw_info *resp; + size_t info_len = 0; int ret = 0; - size_t info_len; cmd_skb = qtnf_cmd_alloc_new_cmdskb(QLINK_MACID_RSVD, QLINK_VIFID_RSVD, QLINK_CMD_GET_HW_INFO, @@ -1709,9 +1714,9 @@ int qtnf_cmd_band_info_get(struct qtnf_wmac *mac, struct ieee80211_supported_band *band) { struct sk_buff *cmd_skb, *resp_skb = NULL; - size_t info_len; struct qlink_cmd_band_info_get *cmd; struct qlink_resp_band_info_get *resp; + size_t info_len = 0; int ret = 0; u8 qband; @@ -1764,8 +1769,8 @@ out: int qtnf_cmd_send_get_phy_params(struct qtnf_wmac *mac) { struct sk_buff *cmd_skb, *resp_skb = NULL; - size_t response_size; struct qlink_resp_phy_params *resp; + size_t response_size = 0; int ret = 0; cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, 0, @@ -2431,7 +2436,7 @@ int qtnf_cmd_get_chan_stats(struct qtnf_wmac *mac, u16 channel, struct sk_buff *cmd_skb, *resp_skb = NULL; struct qlink_cmd_get_chan_stats *cmd; struct qlink_resp_get_chan_stats *resp; - size_t var_data_len; + size_t var_data_len = 0; int ret = 0; cmd_skb = qtnf_cmd_alloc_new_cmdskb(mac->macid, QLINK_VIFID_RSVD, diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c index 16795dbe475b..c3a32effa6f0 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0+ /* Copyright (c) 2018 Quantenna Communications, Inc. All rights reserved. */ +#include <linux/module.h> #include <linux/printk.h> #include <linux/pci.h> #include <linux/spinlock.h> @@ -15,14 +16,37 @@ #include "shm_ipc.h" #include "core.h" #include "debug.h" - -#undef pr_fmt -#define pr_fmt(fmt) "qtnf_pcie: %s: " fmt, __func__ +#include "util.h" +#include "qtn_hw_ids.h" #define QTN_SYSCTL_BAR 0 #define QTN_SHMEM_BAR 2 #define QTN_DMA_BAR 3 +#define QTN_PCIE_MAX_FW_BUFSZ (1 * 1024 * 1024) + +static bool use_msi = true; +module_param(use_msi, bool, 0644); +MODULE_PARM_DESC(use_msi, "set 0 to use legacy interrupt"); + +static unsigned int tx_bd_size_param; +module_param(tx_bd_size_param, uint, 0644); +MODULE_PARM_DESC(tx_bd_size_param, "Tx descriptors queue size"); + +static unsigned int rx_bd_size_param = 256; +module_param(rx_bd_size_param, uint, 0644); +MODULE_PARM_DESC(rx_bd_size_param, "Rx descriptors queue size"); + +static u8 flashboot = 1; +module_param(flashboot, byte, 0644); +MODULE_PARM_DESC(flashboot, "set to 0 to use FW binary file on FS"); + +static unsigned int fw_blksize_param = QTN_PCIE_MAX_FW_BUFSZ; +module_param(fw_blksize_param, uint, 0644); +MODULE_PARM_DESC(fw_blksize_param, "firmware loading block size in bytes"); + +#define DRV_NAME "qtnfmac_pcie" + int qtnf_pcie_control_tx(struct qtnf_bus *bus, struct sk_buff *skb) { struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); @@ -58,7 +82,7 @@ int qtnf_pcie_alloc_skb_array(struct qtnf_pcie_bus_priv *priv) return 0; } -void qtnf_pcie_bringup_fw_async(struct qtnf_bus *bus) +static void qtnf_pcie_bringup_fw_async(struct qtnf_bus *bus) { struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); struct pci_dev *pdev = priv->pdev; @@ -72,7 +96,7 @@ static int qtnf_dbg_mps_show(struct seq_file *s, void *data) struct qtnf_bus *bus = dev_get_drvdata(s->private); struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); - seq_printf(s, "%d\n", priv->mps); + seq_printf(s, "%d\n", pcie_get_mps(priv->pdev)); return 0; } @@ -104,8 +128,7 @@ static int qtnf_dbg_shm_stats(struct seq_file *s, void *data) return 0; } -void qtnf_pcie_fw_boot_done(struct qtnf_bus *bus, bool boot_success, - const char *drv_name) +void qtnf_pcie_fw_boot_done(struct qtnf_bus *bus, bool boot_success) { struct qtnf_pcie_bus_priv *priv = get_bus_priv(bus); struct pci_dev *pdev = priv->pdev; @@ -122,7 +145,7 @@ void qtnf_pcie_fw_boot_done(struct qtnf_bus *bus, bool boot_success, } if (boot_success) { - qtnf_debugfs_init(bus, drv_name); + qtnf_debugfs_init(bus, DRV_NAME); qtnf_debugfs_add_entry(bus, "mps", qtnf_dbg_mps_show); qtnf_debugfs_add_entry(bus, "msi_enabled", qtnf_dbg_msi_show); qtnf_debugfs_add_entry(bus, "shm_stats", qtnf_dbg_shm_stats); @@ -133,9 +156,8 @@ void qtnf_pcie_fw_boot_done(struct qtnf_bus *bus, bool boot_success, put_device(&pdev->dev); } -static void qtnf_tune_pcie_mps(struct qtnf_pcie_bus_priv *priv) +static void qtnf_tune_pcie_mps(struct pci_dev *pdev) { - struct pci_dev *pdev = priv->pdev; struct pci_dev *parent; int mps_p, mps_o, mps_m, mps; int ret; @@ -163,12 +185,10 @@ static void qtnf_tune_pcie_mps(struct qtnf_pcie_bus_priv *priv) if (ret) { pr_err("failed to set mps to %d, keep using current %d\n", mps, mps_o); - priv->mps = mps_o; return; } pr_debug("set mps to %d (was %d, max %d)\n", mps, mps_o, mps_m); - priv->mps = mps; } static void qtnf_pcie_init_irq(struct qtnf_pcie_bus_priv *priv, bool use_msi) @@ -194,20 +214,20 @@ static void qtnf_pcie_init_irq(struct qtnf_pcie_bus_priv *priv, bool use_msi) } } -static void __iomem *qtnf_map_bar(struct qtnf_pcie_bus_priv *priv, u8 index) +static void __iomem *qtnf_map_bar(struct pci_dev *pdev, u8 index) { void __iomem *vaddr; dma_addr_t busaddr; size_t len; int ret; - ret = pcim_iomap_regions(priv->pdev, 1 << index, "qtnfmac_pcie"); + ret = pcim_iomap_regions(pdev, 1 << index, "qtnfmac_pcie"); if (ret) return IOMEM_ERR_PTR(ret); - busaddr = pci_resource_start(priv->pdev, index); - len = pci_resource_len(priv->pdev, index); - vaddr = pcim_iomap_table(priv->pdev)[index]; + busaddr = pci_resource_start(pdev, index); + len = pci_resource_len(pdev, index); + vaddr = pcim_iomap_table(pdev)[index]; if (!vaddr) return IOMEM_ERR_PTR(-ENOMEM); @@ -217,31 +237,6 @@ static void __iomem *qtnf_map_bar(struct qtnf_pcie_bus_priv *priv, u8 index) return vaddr; } -static int qtnf_pcie_init_memory(struct qtnf_pcie_bus_priv *priv) -{ - int ret = -ENOMEM; - - priv->sysctl_bar = qtnf_map_bar(priv, QTN_SYSCTL_BAR); - if (IS_ERR(priv->sysctl_bar)) { - pr_err("failed to map BAR%u\n", QTN_SYSCTL_BAR); - return ret; - } - - priv->dmareg_bar = qtnf_map_bar(priv, QTN_DMA_BAR); - if (IS_ERR(priv->dmareg_bar)) { - pr_err("failed to map BAR%u\n", QTN_DMA_BAR); - return ret; - } - - priv->epmem_bar = qtnf_map_bar(priv, QTN_SHMEM_BAR); - if (IS_ERR(priv->epmem_bar)) { - pr_err("failed to map BAR%u\n", QTN_SHMEM_BAR); - return ret; - } - - return 0; -} - static void qtnf_pcie_control_rx_callback(void *arg, const u8 __iomem *buf, size_t len) { @@ -282,27 +277,83 @@ void qtnf_pcie_init_shm_ipc(struct qtnf_pcie_bus_priv *priv, ipc_int, &rx_callback); } -int qtnf_pcie_probe(struct pci_dev *pdev, size_t priv_size, - const struct qtnf_bus_ops *bus_ops, u64 dma_mask, - bool use_msi) +static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct qtnf_pcie_bus_priv *pcie_priv; struct qtnf_bus *bus; + void __iomem *sysctl_bar; + void __iomem *epmem_bar; + void __iomem *dmareg_bar; + unsigned int chipid; int ret; - bus = devm_kzalloc(&pdev->dev, - sizeof(*bus) + priv_size, GFP_KERNEL); + if (!pci_is_pcie(pdev)) { + pr_err("device %s is not PCI Express\n", pci_name(pdev)); + return -EIO; + } + + qtnf_tune_pcie_mps(pdev); + + ret = pcim_enable_device(pdev); + if (ret) { + pr_err("failed to init PCI device %x\n", pdev->device); + return ret; + } + + pci_set_master(pdev); + + sysctl_bar = qtnf_map_bar(pdev, QTN_SYSCTL_BAR); + if (IS_ERR(sysctl_bar)) { + pr_err("failed to map BAR%u\n", QTN_SYSCTL_BAR); + return ret; + } + + dmareg_bar = qtnf_map_bar(pdev, QTN_DMA_BAR); + if (IS_ERR(dmareg_bar)) { + pr_err("failed to map BAR%u\n", QTN_DMA_BAR); + return ret; + } + + epmem_bar = qtnf_map_bar(pdev, QTN_SHMEM_BAR); + if (IS_ERR(epmem_bar)) { + pr_err("failed to map BAR%u\n", QTN_SHMEM_BAR); + return ret; + } + + chipid = qtnf_chip_id_get(sysctl_bar); + + pr_info("identified device: %s\n", qtnf_chipid_to_string(chipid)); + + switch (chipid) { + case QTN_CHIP_ID_PEARL: + case QTN_CHIP_ID_PEARL_B: + case QTN_CHIP_ID_PEARL_C: + bus = qtnf_pcie_pearl_alloc(pdev); + break; + case QTN_CHIP_ID_TOPAZ: + bus = qtnf_pcie_topaz_alloc(pdev); + break; + default: + pr_err("unsupported chip ID 0x%x\n", chipid); + return -ENOTSUPP; + } + if (!bus) return -ENOMEM; pcie_priv = get_bus_priv(bus); - pci_set_drvdata(pdev, bus); - bus->bus_ops = bus_ops; bus->dev = &pdev->dev; bus->fw_state = QTNF_FW_STATE_RESET; pcie_priv->pdev = pdev; pcie_priv->tx_stopped = 0; + pcie_priv->rx_bd_num = rx_bd_size_param; + pcie_priv->flashboot = flashboot; + + if (fw_blksize_param > QTN_PCIE_MAX_FW_BUFSZ) + pcie_priv->fw_blksize = QTN_PCIE_MAX_FW_BUFSZ; + else + pcie_priv->fw_blksize = fw_blksize_param; mutex_init(&bus->bus_lock); spin_lock_init(&pcie_priv->tx_lock); @@ -317,53 +368,35 @@ int qtnf_pcie_probe(struct pci_dev *pdev, size_t priv_size, pcie_priv->workqueue = create_singlethread_workqueue("QTNF_PCIE"); if (!pcie_priv->workqueue) { pr_err("failed to alloc bus workqueue\n"); - ret = -ENODEV; - goto err_init; - } - - init_dummy_netdev(&bus->mux_dev); - - if (!pci_is_pcie(pdev)) { - pr_err("device %s is not PCI Express\n", pci_name(pdev)); - ret = -EIO; - goto err_base; - } - - qtnf_tune_pcie_mps(pcie_priv); - - ret = pcim_enable_device(pdev); - if (ret) { - pr_err("failed to init PCI device %x\n", pdev->device); - goto err_base; - } else { - pr_debug("successful init of PCI device %x\n", pdev->device); + return -ENODEV; } - ret = dma_set_mask_and_coherent(&pdev->dev, dma_mask); + ret = dma_set_mask_and_coherent(&pdev->dev, + pcie_priv->dma_mask_get_cb()); if (ret) { - pr_err("PCIE DMA coherent mask init failed\n"); - goto err_base; + pr_err("PCIE DMA coherent mask init failed 0x%llx\n", + pcie_priv->dma_mask_get_cb()); + goto error; } - pci_set_master(pdev); + init_dummy_netdev(&bus->mux_dev); qtnf_pcie_init_irq(pcie_priv, use_msi); - - ret = qtnf_pcie_init_memory(pcie_priv); - if (ret < 0) { - pr_err("PCIE memory init failed\n"); - goto err_base; - } - + pcie_priv->sysctl_bar = sysctl_bar; + pcie_priv->dmareg_bar = dmareg_bar; + pcie_priv->epmem_bar = epmem_bar; pci_save_state(pdev); + ret = pcie_priv->probe_cb(bus, tx_bd_size_param); + if (ret) + goto error; + + qtnf_pcie_bringup_fw_async(bus); return 0; -err_base: +error: flush_workqueue(pcie_priv->workqueue); destroy_workqueue(pcie_priv->workqueue); -err_init: pci_set_drvdata(pdev, NULL); - return ret; } @@ -373,8 +406,17 @@ static void qtnf_pcie_free_shm_ipc(struct qtnf_pcie_bus_priv *priv) qtnf_shm_ipc_free(&priv->shm_ipc_ep_out); } -void qtnf_pcie_remove(struct qtnf_bus *bus, struct qtnf_pcie_bus_priv *priv) +static void qtnf_pcie_remove(struct pci_dev *dev) { + struct qtnf_pcie_bus_priv *priv; + struct qtnf_bus *bus; + + bus = pci_get_drvdata(dev); + if (!bus) + return; + + priv = get_bus_priv(bus); + cancel_work_sync(&bus->fw_work); if (bus->fw_state == QTNF_FW_STATE_ACTIVE || @@ -388,5 +430,77 @@ void qtnf_pcie_remove(struct qtnf_bus *bus, struct qtnf_pcie_bus_priv *priv) qtnf_pcie_free_shm_ipc(priv); qtnf_debugfs_remove(bus); + priv->remove_cb(bus); pci_set_drvdata(priv->pdev, NULL); } + +#ifdef CONFIG_PM_SLEEP +static int qtnf_pcie_suspend(struct device *dev) +{ + struct qtnf_pcie_bus_priv *priv; + struct qtnf_bus *bus; + + bus = pci_get_drvdata(to_pci_dev(dev)); + if (!bus) + return -EFAULT; + + priv = get_bus_priv(bus); + return priv->suspend_cb(bus); +} + +static int qtnf_pcie_resume(struct device *dev) +{ + struct qtnf_pcie_bus_priv *priv; + struct qtnf_bus *bus; + + bus = pci_get_drvdata(to_pci_dev(dev)); + if (!bus) + return -EFAULT; + + priv = get_bus_priv(bus); + return priv->resume_cb(bus); +} + +/* Power Management Hooks */ +static SIMPLE_DEV_PM_OPS(qtnf_pcie_pm_ops, qtnf_pcie_suspend, + qtnf_pcie_resume); +#endif + +static const struct pci_device_id qtnf_pcie_devid_table[] = { + { + PCIE_VENDOR_ID_QUANTENNA, PCIE_DEVICE_ID_QSR, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, + }, + { }, +}; + +MODULE_DEVICE_TABLE(pci, qtnf_pcie_devid_table); + +static struct pci_driver qtnf_pcie_drv_data = { + .name = DRV_NAME, + .id_table = qtnf_pcie_devid_table, + .probe = qtnf_pcie_probe, + .remove = qtnf_pcie_remove, +#ifdef CONFIG_PM_SLEEP + .driver = { + .pm = &qtnf_pcie_pm_ops, + }, +#endif +}; + +static int __init qtnf_pcie_register(void) +{ + return pci_register_driver(&qtnf_pcie_drv_data); +} + +static void __exit qtnf_pcie_exit(void) +{ + pci_unregister_driver(&qtnf_pcie_drv_data); +} + +module_init(qtnf_pcie_register); +module_exit(qtnf_pcie_exit); + +MODULE_AUTHOR("Quantenna Communications"); +MODULE_DESCRIPTION("Quantenna PCIe bus driver for 802.11 wireless LAN."); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h index 5c70fb4c0f92..bbc074e1f34d 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h +++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pcie_priv.h @@ -23,9 +23,14 @@ struct qtnf_pcie_bus_priv { struct pci_dev *pdev; + int (*probe_cb)(struct qtnf_bus *bus, unsigned int tx_bd_size); + void (*remove_cb)(struct qtnf_bus *bus); + int (*suspend_cb)(struct qtnf_bus *bus); + int (*resume_cb)(struct qtnf_bus *bus); + u64 (*dma_mask_get_cb)(void); + spinlock_t tx_reclaim_lock; spinlock_t tx_lock; - int mps; struct workqueue_struct *workqueue; struct tasklet_struct reclaim_tq; @@ -43,6 +48,8 @@ struct qtnf_pcie_bus_priv { struct sk_buff **tx_skb; struct sk_buff **rx_skb; + unsigned int fw_blksize; + u32 rx_bd_w_index; u32 rx_bd_r_index; @@ -58,21 +65,18 @@ struct qtnf_pcie_bus_priv { u8 msi_enabled; u8 tx_stopped; + bool flashboot; }; int qtnf_pcie_control_tx(struct qtnf_bus *bus, struct sk_buff *skb); int qtnf_pcie_alloc_skb_array(struct qtnf_pcie_bus_priv *priv); -void qtnf_pcie_bringup_fw_async(struct qtnf_bus *bus); -void qtnf_pcie_fw_boot_done(struct qtnf_bus *bus, bool boot_success, - const char *drv_name); +void qtnf_pcie_fw_boot_done(struct qtnf_bus *bus, bool boot_success); void qtnf_pcie_init_shm_ipc(struct qtnf_pcie_bus_priv *priv, struct qtnf_shm_ipc_region __iomem *ipc_tx_reg, struct qtnf_shm_ipc_region __iomem *ipc_rx_reg, const struct qtnf_shm_ipc_int *ipc_int); -int qtnf_pcie_probe(struct pci_dev *pdev, size_t priv_size, - const struct qtnf_bus_ops *bus_ops, u64 dma_mask, - bool use_msi); -void qtnf_pcie_remove(struct qtnf_bus *bus, struct qtnf_pcie_bus_priv *priv); +struct qtnf_bus *qtnf_pcie_pearl_alloc(struct pci_dev *pdev); +struct qtnf_bus *qtnf_pcie_topaz_alloc(struct pci_dev *pdev); static inline void qtnf_non_posted_write(u32 val, void __iomem *basereg) { diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c index 95c7b95c6f8a..1f5facbb8905 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c +++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/pearl_pcie.c @@ -2,7 +2,6 @@ /* Copyright (c) 2018 Quantenna Communications */ #include <linux/kernel.h> -#include <linux/module.h> #include <linux/firmware.h> #include <linux/pci.h> #include <linux/vmalloc.h> @@ -24,23 +23,7 @@ #include "shm_ipc.h" #include "debug.h" -static bool use_msi = true; -module_param(use_msi, bool, 0644); -MODULE_PARM_DESC(use_msi, "set 0 to use legacy interrupt"); - -static unsigned int tx_bd_size_param = 32; -module_param(tx_bd_size_param, uint, 0644); -MODULE_PARM_DESC(tx_bd_size_param, "Tx descriptors queue size, power of two"); - -static unsigned int rx_bd_size_param = 256; -module_param(rx_bd_size_param, uint, 0644); -MODULE_PARM_DESC(rx_bd_size_param, "Rx descriptors queue size, power of two"); - -static u8 flashboot = 1; -module_param(flashboot, byte, 0644); -MODULE_PARM_DESC(flashboot, "set to 0 to use FW binary file on FS"); - -#define DRV_NAME "qtnfmac_pearl_pcie" +#define PEARL_TX_BD_SIZE_DEFAULT 32 struct qtnf_pearl_bda { __le16 bda_len; @@ -415,30 +398,28 @@ static int pearl_hhbm_init(struct qtnf_pcie_pearl_state *ps) return 0; } -static int qtnf_pcie_pearl_init_xfer(struct qtnf_pcie_pearl_state *ps) +static int qtnf_pcie_pearl_init_xfer(struct qtnf_pcie_pearl_state *ps, + unsigned int tx_bd_size) { struct qtnf_pcie_bus_priv *priv = &ps->base; int ret; u32 val; - priv->tx_bd_num = tx_bd_size_param; - priv->rx_bd_num = rx_bd_size_param; - priv->rx_bd_w_index = 0; - priv->rx_bd_r_index = 0; + if (tx_bd_size == 0) + tx_bd_size = PEARL_TX_BD_SIZE_DEFAULT; - if (!priv->tx_bd_num || !is_power_of_2(priv->tx_bd_num)) { - pr_err("tx_bd_size_param %u is not power of two\n", - priv->tx_bd_num); - return -EINVAL; - } + val = tx_bd_size * sizeof(struct qtnf_pearl_tx_bd); - val = priv->tx_bd_num * sizeof(struct qtnf_pearl_tx_bd); - if (val > PCIE_HHBM_MAX_SIZE) { - pr_err("tx_bd_size_param %u is too large\n", - priv->tx_bd_num); - return -EINVAL; + if (!is_power_of_2(tx_bd_size) || val > PCIE_HHBM_MAX_SIZE) { + pr_warn("bad tx_bd_size value %u\n", tx_bd_size); + priv->tx_bd_num = PEARL_TX_BD_SIZE_DEFAULT; + } else { + priv->tx_bd_num = tx_bd_size; } + priv->rx_bd_w_index = 0; + priv->rx_bd_r_index = 0; + if (!priv->rx_bd_num || !is_power_of_2(priv->rx_bd_num)) { pr_err("rx_bd_size_param %u is not power of two\n", priv->rx_bd_num); @@ -1006,7 +987,7 @@ static void qtnf_pearl_fw_work_handler(struct work_struct *work) const char *fwname = QTN_PCI_PEARL_FW_NAME; bool fw_boot_success = false; - if (flashboot) { + if (ps->base.flashboot) { state |= QTN_RC_FW_FLASHBOOT; } else { ret = request_firmware(&fw, fwname, &pdev->dev); @@ -1022,7 +1003,7 @@ static void qtnf_pearl_fw_work_handler(struct work_struct *work) QTN_FW_DL_TIMEOUT_MS)) { pr_err("card is not ready\n"); - if (!flashboot) + if (!ps->base.flashboot) release_firmware(fw); goto fw_load_exit; @@ -1030,7 +1011,7 @@ static void qtnf_pearl_fw_work_handler(struct work_struct *work) qtnf_clear_state(&ps->bda->bda_ep_state, QTN_EP_FW_LOADRDY); - if (flashboot) { + if (ps->base.flashboot) { pr_info("booting firmware from flash\n"); } else { @@ -1061,7 +1042,7 @@ static void qtnf_pearl_fw_work_handler(struct work_struct *work) fw_boot_success = true; fw_load_exit: - qtnf_pcie_fw_boot_done(bus, fw_boot_success, DRV_NAME); + qtnf_pcie_fw_boot_done(bus, fw_boot_success); if (fw_boot_success) { qtnf_debugfs_add_entry(bus, "hdp_stats", qtnf_dbg_hdp_stats); @@ -1077,74 +1058,34 @@ static void qtnf_pearl_reclaim_tasklet_fn(unsigned long data) qtnf_en_txdone_irq(ps); } -static int qtnf_pearl_check_chip_id(struct qtnf_pcie_pearl_state *ps) +static u64 qtnf_pearl_dma_mask_get(void) { - unsigned int chipid; - - chipid = qtnf_chip_id_get(ps->base.sysctl_bar); - - switch (chipid) { - case QTN_CHIP_ID_PEARL: - case QTN_CHIP_ID_PEARL_B: - case QTN_CHIP_ID_PEARL_C: - pr_info("chip ID is 0x%x\n", chipid); - break; - default: - pr_err("incorrect chip ID 0x%x\n", chipid); - return -ENODEV; - } - - return 0; +#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT + return DMA_BIT_MASK(64); +#else + return DMA_BIT_MASK(32); +#endif } -static int qtnf_pcie_pearl_probe(struct pci_dev *pdev, - const struct pci_device_id *id) +static int qtnf_pcie_pearl_probe(struct qtnf_bus *bus, unsigned int tx_bd_size) { struct qtnf_shm_ipc_int ipc_int; - struct qtnf_pcie_pearl_state *ps; - struct qtnf_bus *bus; + struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus); + struct pci_dev *pdev = ps->base.pdev; int ret; - u64 dma_mask; - -#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT - dma_mask = DMA_BIT_MASK(64); -#else - dma_mask = DMA_BIT_MASK(32); -#endif - - ret = qtnf_pcie_probe(pdev, sizeof(*ps), &qtnf_pcie_pearl_bus_ops, - dma_mask, use_msi); - if (ret) - return ret; - - bus = pci_get_drvdata(pdev); - ps = get_bus_priv(bus); + bus->bus_ops = &qtnf_pcie_pearl_bus_ops; spin_lock_init(&ps->irq_lock); - - tasklet_init(&ps->base.reclaim_tq, qtnf_pearl_reclaim_tasklet_fn, - (unsigned long)ps); - netif_napi_add(&bus->mux_dev, &bus->mux_napi, - qtnf_pcie_pearl_rx_poll, 10); INIT_WORK(&bus->fw_work, qtnf_pearl_fw_work_handler); ps->pcie_reg_base = ps->base.dmareg_bar; ps->bda = ps->base.epmem_bar; writel(ps->base.msi_enabled, &ps->bda->bda_rc_msi_enabled); - ipc_int.fn = qtnf_pcie_pearl_ipc_gen_ep_int; - ipc_int.arg = ps; - qtnf_pcie_init_shm_ipc(&ps->base, &ps->bda->bda_shm_reg1, - &ps->bda->bda_shm_reg2, &ipc_int); - - ret = qtnf_pearl_check_chip_id(ps); - if (ret) - goto error; - - ret = qtnf_pcie_pearl_init_xfer(ps); + ret = qtnf_pcie_pearl_init_xfer(ps, tx_bd_size); if (ret) { pr_err("PCIE xfer init failed\n"); - goto error; + return ret; } /* init default irq settings */ @@ -1155,95 +1096,63 @@ static int qtnf_pcie_pearl_probe(struct pci_dev *pdev, ret = devm_request_irq(&pdev->dev, pdev->irq, &qtnf_pcie_pearl_interrupt, 0, - "qtnf_pcie_irq", (void *)bus); + "qtnf_pearl_irq", (void *)bus); if (ret) { pr_err("failed to request pcie irq %d\n", pdev->irq); - goto err_xfer; + qtnf_pearl_free_xfer_buffers(ps); + return ret; } - qtnf_pcie_bringup_fw_async(bus); - - return 0; + tasklet_init(&ps->base.reclaim_tq, qtnf_pearl_reclaim_tasklet_fn, + (unsigned long)ps); + netif_napi_add(&bus->mux_dev, &bus->mux_napi, + qtnf_pcie_pearl_rx_poll, 10); -err_xfer: - qtnf_pearl_free_xfer_buffers(ps); -error: - qtnf_pcie_remove(bus, &ps->base); + ipc_int.fn = qtnf_pcie_pearl_ipc_gen_ep_int; + ipc_int.arg = ps; + qtnf_pcie_init_shm_ipc(&ps->base, &ps->bda->bda_shm_reg1, + &ps->bda->bda_shm_reg2, &ipc_int); - return ret; + return 0; } -static void qtnf_pcie_pearl_remove(struct pci_dev *pdev) +static void qtnf_pcie_pearl_remove(struct qtnf_bus *bus) { - struct qtnf_pcie_pearl_state *ps; - struct qtnf_bus *bus; - - bus = pci_get_drvdata(pdev); - if (!bus) - return; - - ps = get_bus_priv(bus); + struct qtnf_pcie_pearl_state *ps = get_bus_priv(bus); - qtnf_pcie_remove(bus, &ps->base); qtnf_pearl_reset_ep(ps); qtnf_pearl_free_xfer_buffers(ps); } #ifdef CONFIG_PM_SLEEP -static int qtnf_pcie_pearl_suspend(struct device *dev) +static int qtnf_pcie_pearl_suspend(struct qtnf_bus *bus) { return -EOPNOTSUPP; } -static int qtnf_pcie_pearl_resume(struct device *dev) +static int qtnf_pcie_pearl_resume(struct qtnf_bus *bus) { return 0; } -#endif /* CONFIG_PM_SLEEP */ - -#ifdef CONFIG_PM_SLEEP -/* Power Management Hooks */ -static SIMPLE_DEV_PM_OPS(qtnf_pcie_pearl_pm_ops, qtnf_pcie_pearl_suspend, - qtnf_pcie_pearl_resume); #endif -static const struct pci_device_id qtnf_pcie_devid_table[] = { - { - PCIE_VENDOR_ID_QUANTENNA, PCIE_DEVICE_ID_QTN_PEARL, - PCI_ANY_ID, PCI_ANY_ID, 0, 0, - }, - { }, -}; +struct qtnf_bus *qtnf_pcie_pearl_alloc(struct pci_dev *pdev) +{ + struct qtnf_bus *bus; + struct qtnf_pcie_pearl_state *ps; -MODULE_DEVICE_TABLE(pci, qtnf_pcie_devid_table); + bus = devm_kzalloc(&pdev->dev, sizeof(*bus) + sizeof(*ps), GFP_KERNEL); + if (!bus) + return NULL; -static struct pci_driver qtnf_pcie_pearl_drv_data = { - .name = DRV_NAME, - .id_table = qtnf_pcie_devid_table, - .probe = qtnf_pcie_pearl_probe, - .remove = qtnf_pcie_pearl_remove, + ps = get_bus_priv(bus); + ps->base.probe_cb = qtnf_pcie_pearl_probe; + ps->base.remove_cb = qtnf_pcie_pearl_remove; + ps->base.dma_mask_get_cb = qtnf_pearl_dma_mask_get; #ifdef CONFIG_PM_SLEEP - .driver = { - .pm = &qtnf_pcie_pearl_pm_ops, - }, + ps->base.resume_cb = qtnf_pcie_pearl_resume; + ps->base.suspend_cb = qtnf_pcie_pearl_suspend; #endif -}; - -static int __init qtnf_pcie_pearl_register(void) -{ - pr_info("register Quantenna QSR10g FullMAC PCIE driver\n"); - return pci_register_driver(&qtnf_pcie_pearl_drv_data); -} -static void __exit qtnf_pcie_pearl_exit(void) -{ - pr_info("unregister Quantenna QSR10g FullMAC PCIE driver\n"); - pci_unregister_driver(&qtnf_pcie_pearl_drv_data); + return bus; } - -module_init(qtnf_pcie_pearl_register); -module_exit(qtnf_pcie_pearl_exit); - -MODULE_AUTHOR("Quantenna Communications"); -MODULE_DESCRIPTION("Quantenna QSR10g PCIe bus driver for 802.11 wireless LAN."); -MODULE_LICENSE("GPL"); diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c new file mode 100644 index 000000000000..598edb814421 --- /dev/null +++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie.c @@ -0,0 +1,1219 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* Copyright (c) 2018 Quantenna Communications */ + +#include <linux/kernel.h> +#include <linux/firmware.h> +#include <linux/pci.h> +#include <linux/vmalloc.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/sched.h> +#include <linux/crc32.h> +#include <linux/completion.h> +#include <linux/spinlock.h> +#include <linux/circ_buf.h> + +#include "pcie_priv.h" +#include "topaz_pcie_regs.h" +#include "topaz_pcie_ipc.h" +#include "qtn_hw_ids.h" +#include "core.h" +#include "bus.h" +#include "shm_ipc.h" +#include "debug.h" + +#define TOPAZ_TX_BD_SIZE_DEFAULT 128 + +struct qtnf_topaz_tx_bd { + __le32 addr; + __le32 info; +} __packed; + +struct qtnf_topaz_rx_bd { + __le32 addr; + __le32 info; +} __packed; + +struct qtnf_extra_bd_params { + __le32 param1; + __le32 param2; + __le32 param3; + __le32 param4; +} __packed; + +#define QTNF_BD_PARAM_OFFSET(n) offsetof(struct qtnf_extra_bd_params, param##n) + +struct vmac_pkt_info { + __le32 addr; + __le32 info; +}; + +struct qtnf_topaz_bda { + __le16 bda_len; + __le16 bda_version; + __le32 bda_bootstate; + __le32 bda_dma_mask; + __le32 bda_dma_offset; + __le32 bda_flags; + __le32 bda_img; + __le32 bda_img_size; + __le32 bda_ep2h_irqstatus; + __le32 bda_h2ep_irqstatus; + __le32 bda_msi_addr; + u8 reserved1[56]; + __le32 bda_flashsz; + u8 bda_boardname[PCIE_BDA_NAMELEN]; + __le32 bda_pci_pre_status; + __le32 bda_pci_endian; + __le32 bda_pci_post_status; + __le32 bda_h2ep_txd_budget; + __le32 bda_ep2h_txd_budget; + __le32 bda_rc_rx_bd_base; + __le32 bda_rc_rx_bd_num; + __le32 bda_rc_tx_bd_base; + __le32 bda_rc_tx_bd_num; + u8 bda_ep_link_state; + u8 bda_rc_link_state; + u8 bda_rc_msi_enabled; + u8 reserved2; + __le32 bda_ep_next_pkt; + struct vmac_pkt_info request[QTN_PCIE_RC_TX_QUEUE_LEN]; + struct qtnf_shm_ipc_region bda_shm_reg1 __aligned(4096); + struct qtnf_shm_ipc_region bda_shm_reg2 __aligned(4096); +} __packed; + +struct qtnf_pcie_topaz_state { + struct qtnf_pcie_bus_priv base; + struct qtnf_topaz_bda __iomem *bda; + + dma_addr_t dma_msi_dummy; + u32 dma_msi_imwr; + + struct qtnf_topaz_tx_bd *tx_bd_vbase; + struct qtnf_topaz_rx_bd *rx_bd_vbase; + + __le32 __iomem *ep_next_rx_pkt; + __le32 __iomem *txqueue_wake; + __le32 __iomem *ep_pmstate; + + unsigned long rx_pkt_count; +}; + +static void qtnf_deassert_intx(struct qtnf_pcie_topaz_state *ts) +{ + void __iomem *reg = ts->base.sysctl_bar + TOPAZ_PCIE_CFG0_OFFSET; + u32 cfg; + + cfg = readl(reg); + cfg &= ~TOPAZ_ASSERT_INTX; + qtnf_non_posted_write(cfg, reg); +} + +static inline int qtnf_topaz_intx_asserted(struct qtnf_pcie_topaz_state *ts) +{ + void __iomem *reg = ts->base.sysctl_bar + TOPAZ_PCIE_CFG0_OFFSET; + u32 cfg = readl(reg); + + return !!(cfg & TOPAZ_ASSERT_INTX); +} + +static void qtnf_topaz_reset_ep(struct qtnf_pcie_topaz_state *ts) +{ + writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_RST_EP_IRQ), + TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar)); + msleep(QTN_EP_RESET_WAIT_MS); + pci_restore_state(ts->base.pdev); +} + +static void setup_rx_irqs(struct qtnf_pcie_topaz_state *ts) +{ + void __iomem *reg = PCIE_DMA_WR_DONE_IMWR_ADDR_LOW(ts->base.dmareg_bar); + + ts->dma_msi_imwr = readl(reg); +} + +static void enable_rx_irqs(struct qtnf_pcie_topaz_state *ts) +{ + void __iomem *reg = PCIE_DMA_WR_DONE_IMWR_ADDR_LOW(ts->base.dmareg_bar); + + qtnf_non_posted_write(ts->dma_msi_imwr, reg); +} + +static void disable_rx_irqs(struct qtnf_pcie_topaz_state *ts) +{ + void __iomem *reg = PCIE_DMA_WR_DONE_IMWR_ADDR_LOW(ts->base.dmareg_bar); + + qtnf_non_posted_write(QTN_HOST_LO32(ts->dma_msi_dummy), reg); +} + +static void qtnf_topaz_ipc_gen_ep_int(void *arg) +{ + struct qtnf_pcie_topaz_state *ts = arg; + + writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_CTRL_IRQ), + TOPAZ_CTL_M2L_INT(ts->base.sysctl_bar)); +} + +static int qtnf_is_state(__le32 __iomem *reg, u32 state) +{ + u32 s = readl(reg); + + return (s == state); +} + +static void qtnf_set_state(__le32 __iomem *reg, u32 state) +{ + qtnf_non_posted_write(state, reg); +} + +static int qtnf_poll_state(__le32 __iomem *reg, u32 state, u32 delay_in_ms) +{ + u32 timeout = 0; + + while ((qtnf_is_state(reg, state) == 0)) { + usleep_range(1000, 1200); + if (++timeout > delay_in_ms) + return -1; + } + + return 0; +} + +static int topaz_alloc_bd_table(struct qtnf_pcie_topaz_state *ts, + struct qtnf_topaz_bda __iomem *bda) +{ + struct qtnf_extra_bd_params __iomem *extra_params; + struct qtnf_pcie_bus_priv *priv = &ts->base; + dma_addr_t paddr; + void *vaddr; + int len; + int i; + + /* bd table */ + + len = priv->tx_bd_num * sizeof(struct qtnf_topaz_tx_bd) + + priv->rx_bd_num * sizeof(struct qtnf_topaz_rx_bd) + + sizeof(struct qtnf_extra_bd_params); + + vaddr = dmam_alloc_coherent(&priv->pdev->dev, len, &paddr, GFP_KERNEL); + if (!vaddr) + return -ENOMEM; + + memset(vaddr, 0, len); + + /* tx bd */ + + ts->tx_bd_vbase = vaddr; + qtnf_non_posted_write(paddr, &bda->bda_rc_tx_bd_base); + + for (i = 0; i < priv->tx_bd_num; i++) + ts->tx_bd_vbase[i].info |= cpu_to_le32(QTN_BD_EMPTY); + + pr_debug("TX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr); + + priv->tx_bd_r_index = 0; + priv->tx_bd_w_index = 0; + + /* rx bd */ + + vaddr = ((struct qtnf_topaz_tx_bd *)vaddr) + priv->tx_bd_num; + paddr += priv->tx_bd_num * sizeof(struct qtnf_topaz_tx_bd); + + ts->rx_bd_vbase = vaddr; + qtnf_non_posted_write(paddr, &bda->bda_rc_rx_bd_base); + + pr_debug("RX descriptor table: vaddr=0x%p paddr=%pad\n", vaddr, &paddr); + + /* extra shared params */ + + vaddr = ((struct qtnf_topaz_rx_bd *)vaddr) + priv->rx_bd_num; + paddr += priv->rx_bd_num * sizeof(struct qtnf_topaz_rx_bd); + + extra_params = (struct qtnf_extra_bd_params __iomem *)vaddr; + + ts->ep_next_rx_pkt = &extra_params->param1; + qtnf_non_posted_write(paddr + QTNF_BD_PARAM_OFFSET(1), + &bda->bda_ep_next_pkt); + ts->txqueue_wake = &extra_params->param2; + ts->ep_pmstate = &extra_params->param3; + ts->dma_msi_dummy = paddr + QTNF_BD_PARAM_OFFSET(4); + + return 0; +} + +static int +topaz_skb2rbd_attach(struct qtnf_pcie_topaz_state *ts, u16 index, u32 wrap) +{ + struct qtnf_topaz_rx_bd *rxbd = &ts->rx_bd_vbase[index]; + struct sk_buff *skb; + dma_addr_t paddr; + + skb = __netdev_alloc_skb_ip_align(NULL, SKB_BUF_SIZE, GFP_ATOMIC); + if (!skb) { + ts->base.rx_skb[index] = NULL; + return -ENOMEM; + } + + ts->base.rx_skb[index] = skb; + + paddr = pci_map_single(ts->base.pdev, skb->data, + SKB_BUF_SIZE, PCI_DMA_FROMDEVICE); + if (pci_dma_mapping_error(ts->base.pdev, paddr)) { + pr_err("skb mapping error: %pad\n", &paddr); + return -ENOMEM; + } + + rxbd->addr = cpu_to_le32(QTN_HOST_LO32(paddr)); + rxbd->info = cpu_to_le32(QTN_BD_EMPTY | wrap); + + ts->base.rx_bd_w_index = index; + + return 0; +} + +static int topaz_alloc_rx_buffers(struct qtnf_pcie_topaz_state *ts) +{ + u16 i; + int ret = 0; + + memset(ts->rx_bd_vbase, 0x0, + ts->base.rx_bd_num * sizeof(struct qtnf_topaz_rx_bd)); + + for (i = 0; i < ts->base.rx_bd_num; i++) { + ret = topaz_skb2rbd_attach(ts, i, 0); + if (ret) + break; + } + + ts->rx_bd_vbase[ts->base.rx_bd_num - 1].info |= + cpu_to_le32(QTN_BD_WRAP); + + return ret; +} + +/* all rx/tx activity should have ceased before calling this function */ +static void qtnf_topaz_free_xfer_buffers(struct qtnf_pcie_topaz_state *ts) +{ + struct qtnf_pcie_bus_priv *priv = &ts->base; + struct qtnf_topaz_rx_bd *rxbd; + struct qtnf_topaz_tx_bd *txbd; + struct sk_buff *skb; + dma_addr_t paddr; + int i; + + /* free rx buffers */ + for (i = 0; i < priv->rx_bd_num; i++) { + if (priv->rx_skb && priv->rx_skb[i]) { + rxbd = &ts->rx_bd_vbase[i]; + skb = priv->rx_skb[i]; + paddr = QTN_HOST_ADDR(0x0, le32_to_cpu(rxbd->addr)); + pci_unmap_single(priv->pdev, paddr, SKB_BUF_SIZE, + PCI_DMA_FROMDEVICE); + dev_kfree_skb_any(skb); + priv->rx_skb[i] = NULL; + rxbd->addr = 0; + rxbd->info = 0; + } + } + + /* free tx buffers */ + for (i = 0; i < priv->tx_bd_num; i++) { + if (priv->tx_skb && priv->tx_skb[i]) { + txbd = &ts->tx_bd_vbase[i]; + skb = priv->tx_skb[i]; + paddr = QTN_HOST_ADDR(0x0, le32_to_cpu(txbd->addr)); + pci_unmap_single(priv->pdev, paddr, SKB_BUF_SIZE, + PCI_DMA_TODEVICE); + dev_kfree_skb_any(skb); + priv->tx_skb[i] = NULL; + txbd->addr = 0; + txbd->info = 0; + } + } +} + +static int qtnf_pcie_topaz_init_xfer(struct qtnf_pcie_topaz_state *ts, + unsigned int tx_bd_size) +{ + struct qtnf_topaz_bda __iomem *bda = ts->bda; + struct qtnf_pcie_bus_priv *priv = &ts->base; + int ret; + + if (tx_bd_size == 0) + tx_bd_size = TOPAZ_TX_BD_SIZE_DEFAULT; + + /* check TX BD queue max length according to struct qtnf_topaz_bda */ + if (tx_bd_size > QTN_PCIE_RC_TX_QUEUE_LEN) { + pr_warn("TX BD queue cannot exceed %d\n", + QTN_PCIE_RC_TX_QUEUE_LEN); + tx_bd_size = QTN_PCIE_RC_TX_QUEUE_LEN; + } + + priv->tx_bd_num = tx_bd_size; + qtnf_non_posted_write(priv->tx_bd_num, &bda->bda_rc_tx_bd_num); + qtnf_non_posted_write(priv->rx_bd_num, &bda->bda_rc_rx_bd_num); + + priv->rx_bd_w_index = 0; + priv->rx_bd_r_index = 0; + + ret = qtnf_pcie_alloc_skb_array(priv); + if (ret) { + pr_err("failed to allocate skb array\n"); + return ret; + } + + ret = topaz_alloc_bd_table(ts, bda); + if (ret) { + pr_err("failed to allocate bd table\n"); + return ret; + } + + ret = topaz_alloc_rx_buffers(ts); + if (ret) { + pr_err("failed to allocate rx buffers\n"); + return ret; + } + + return ret; +} + +static void qtnf_topaz_data_tx_reclaim(struct qtnf_pcie_topaz_state *ts) +{ + struct qtnf_pcie_bus_priv *priv = &ts->base; + struct qtnf_topaz_tx_bd *txbd; + struct sk_buff *skb; + unsigned long flags; + dma_addr_t paddr; + u32 tx_done_index; + int count = 0; + int i; + + spin_lock_irqsave(&priv->tx_reclaim_lock, flags); + + tx_done_index = readl(ts->ep_next_rx_pkt); + i = priv->tx_bd_r_index; + + if (CIRC_CNT(priv->tx_bd_w_index, tx_done_index, priv->tx_bd_num)) + writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_TX_DONE_IRQ), + TOPAZ_LH_IPC4_INT(priv->sysctl_bar)); + + while (CIRC_CNT(tx_done_index, i, priv->tx_bd_num)) { + skb = priv->tx_skb[i]; + + if (likely(skb)) { + txbd = &ts->tx_bd_vbase[i]; + paddr = QTN_HOST_ADDR(0x0, le32_to_cpu(txbd->addr)); + pci_unmap_single(priv->pdev, paddr, skb->len, + PCI_DMA_TODEVICE); + + if (skb->dev) { + qtnf_update_tx_stats(skb->dev, skb); + if (unlikely(priv->tx_stopped)) { + qtnf_wake_all_queues(skb->dev); + priv->tx_stopped = 0; + } + } + + dev_kfree_skb_any(skb); + } + + priv->tx_skb[i] = NULL; + count++; + + if (++i >= priv->tx_bd_num) + i = 0; + } + + priv->tx_reclaim_done += count; + priv->tx_reclaim_req++; + priv->tx_bd_r_index = i; + + spin_unlock_irqrestore(&priv->tx_reclaim_lock, flags); +} + +static void qtnf_try_stop_xmit(struct qtnf_bus *bus, struct net_device *ndev) +{ + struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus); + + if (ndev) { + netif_tx_stop_all_queues(ndev); + ts->base.tx_stopped = 1; + } + + writel(0x0, ts->txqueue_wake); + + /* sync up tx queue status before generating interrupt */ + dma_wmb(); + + /* send irq to card: tx stopped */ + writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_TX_STOP_IRQ), + TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar)); + + /* schedule reclaim attempt */ + tasklet_hi_schedule(&ts->base.reclaim_tq); +} + +static void qtnf_try_wake_xmit(struct qtnf_bus *bus, struct net_device *ndev) +{ + struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus); + int ready; + + ready = readl(ts->txqueue_wake); + if (ready) { + netif_wake_queue(ndev); + } else { + /* re-send irq to card: tx stopped */ + writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_TX_STOP_IRQ), + TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar)); + } +} + +static int qtnf_tx_queue_ready(struct qtnf_pcie_topaz_state *ts) +{ + struct qtnf_pcie_bus_priv *priv = &ts->base; + + if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index, + priv->tx_bd_num)) { + qtnf_topaz_data_tx_reclaim(ts); + + if (!CIRC_SPACE(priv->tx_bd_w_index, priv->tx_bd_r_index, + priv->tx_bd_num)) { + priv->tx_full_count++; + return 0; + } + } + + return 1; +} + +static int qtnf_pcie_data_tx(struct qtnf_bus *bus, struct sk_buff *skb) +{ + struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus); + struct qtnf_pcie_bus_priv *priv = &ts->base; + struct qtnf_topaz_bda __iomem *bda = ts->bda; + struct qtnf_topaz_tx_bd *txbd; + dma_addr_t skb_paddr; + unsigned long flags; + int ret = 0; + int len; + int i; + + spin_lock_irqsave(&priv->tx_lock, flags); + + if (!qtnf_tx_queue_ready(ts)) { + qtnf_try_stop_xmit(bus, skb->dev); + spin_unlock_irqrestore(&priv->tx_lock, flags); + return NETDEV_TX_BUSY; + } + + i = priv->tx_bd_w_index; + priv->tx_skb[i] = skb; + len = skb->len; + + skb_paddr = pci_map_single(priv->pdev, skb->data, + skb->len, PCI_DMA_TODEVICE); + if (pci_dma_mapping_error(priv->pdev, skb_paddr)) { + ret = -ENOMEM; + goto tx_done; + } + + txbd = &ts->tx_bd_vbase[i]; + txbd->addr = cpu_to_le32(QTN_HOST_LO32(skb_paddr)); + + writel(QTN_HOST_LO32(skb_paddr), &bda->request[i].addr); + writel(len | QTN_PCIE_TX_VALID_PKT, &bda->request[i].info); + + /* sync up descriptor updates before generating interrupt */ + dma_wmb(); + + /* generate irq to card: tx done */ + writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_TX_DONE_IRQ), + TOPAZ_LH_IPC4_INT(priv->sysctl_bar)); + + if (++i >= priv->tx_bd_num) + i = 0; + + priv->tx_bd_w_index = i; + +tx_done: + if (ret) { + if (skb->dev) + skb->dev->stats.tx_dropped++; + dev_kfree_skb_any(skb); + } + + priv->tx_done_count++; + spin_unlock_irqrestore(&priv->tx_lock, flags); + + qtnf_topaz_data_tx_reclaim(ts); + + return NETDEV_TX_OK; +} + +static irqreturn_t qtnf_pcie_topaz_interrupt(int irq, void *data) +{ + struct qtnf_bus *bus = (struct qtnf_bus *)data; + struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus); + struct qtnf_pcie_bus_priv *priv = &ts->base; + + if (!priv->msi_enabled && !qtnf_topaz_intx_asserted(ts)) + return IRQ_NONE; + + priv->pcie_irq_count++; + + qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_in); + qtnf_shm_ipc_irq_handler(&priv->shm_ipc_ep_out); + + if (napi_schedule_prep(&bus->mux_napi)) { + disable_rx_irqs(ts); + __napi_schedule(&bus->mux_napi); + } + + tasklet_hi_schedule(&priv->reclaim_tq); + + if (!priv->msi_enabled) + qtnf_deassert_intx(ts); + + return IRQ_HANDLED; +} + +static int qtnf_rx_data_ready(struct qtnf_pcie_topaz_state *ts) +{ + u16 index = ts->base.rx_bd_r_index; + struct qtnf_topaz_rx_bd *rxbd; + u32 descw; + + rxbd = &ts->rx_bd_vbase[index]; + descw = le32_to_cpu(rxbd->info); + + if (descw & QTN_BD_EMPTY) + return 0; + + return 1; +} + +static int qtnf_topaz_rx_poll(struct napi_struct *napi, int budget) +{ + struct qtnf_bus *bus = container_of(napi, struct qtnf_bus, mux_napi); + struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus); + struct qtnf_pcie_bus_priv *priv = &ts->base; + struct net_device *ndev = NULL; + struct sk_buff *skb = NULL; + int processed = 0; + struct qtnf_topaz_rx_bd *rxbd; + dma_addr_t skb_paddr; + int consume; + u32 descw; + u32 poffset; + u32 psize; + u16 r_idx; + u16 w_idx; + int ret; + + while (processed < budget) { + if (!qtnf_rx_data_ready(ts)) + goto rx_out; + + r_idx = priv->rx_bd_r_index; + rxbd = &ts->rx_bd_vbase[r_idx]; + descw = le32_to_cpu(rxbd->info); + + skb = priv->rx_skb[r_idx]; + poffset = QTN_GET_OFFSET(descw); + psize = QTN_GET_LEN(descw); + consume = 1; + + if (descw & QTN_BD_EMPTY) { + pr_warn("skip invalid rxbd[%d]\n", r_idx); + consume = 0; + } + + if (!skb) { + pr_warn("skip missing rx_skb[%d]\n", r_idx); + consume = 0; + } + + if (skb && (skb_tailroom(skb) < psize)) { + pr_err("skip packet with invalid length: %u > %u\n", + psize, skb_tailroom(skb)); + consume = 0; + } + + if (skb) { + skb_paddr = QTN_HOST_ADDR(0x0, le32_to_cpu(rxbd->addr)); + pci_unmap_single(priv->pdev, skb_paddr, SKB_BUF_SIZE, + PCI_DMA_FROMDEVICE); + } + + if (consume) { + skb_reserve(skb, poffset); + skb_put(skb, psize); + ndev = qtnf_classify_skb(bus, skb); + if (likely(ndev)) { + qtnf_update_rx_stats(ndev, skb); + skb->protocol = eth_type_trans(skb, ndev); + netif_receive_skb(skb); + } else { + pr_debug("drop untagged skb\n"); + bus->mux_dev.stats.rx_dropped++; + dev_kfree_skb_any(skb); + } + } else { + if (skb) { + bus->mux_dev.stats.rx_dropped++; + dev_kfree_skb_any(skb); + } + } + + /* notify card about recv packets once per several packets */ + if (((++ts->rx_pkt_count) & RX_DONE_INTR_MSK) == 0) + writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_RX_DONE_IRQ), + TOPAZ_LH_IPC4_INT(priv->sysctl_bar)); + + priv->rx_skb[r_idx] = NULL; + if (++r_idx >= priv->rx_bd_num) + r_idx = 0; + + priv->rx_bd_r_index = r_idx; + + /* repalce processed buffer by a new one */ + w_idx = priv->rx_bd_w_index; + while (CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index, + priv->rx_bd_num) > 0) { + if (++w_idx >= priv->rx_bd_num) + w_idx = 0; + + ret = topaz_skb2rbd_attach(ts, w_idx, + descw & QTN_BD_WRAP); + if (ret) { + pr_err("failed to allocate new rx_skb[%d]\n", + w_idx); + break; + } + } + + processed++; + } + +rx_out: + if (processed < budget) { + napi_complete(napi); + enable_rx_irqs(ts); + } + + return processed; +} + +static void +qtnf_pcie_data_tx_timeout(struct qtnf_bus *bus, struct net_device *ndev) +{ + struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus); + + qtnf_try_wake_xmit(bus, ndev); + tasklet_hi_schedule(&ts->base.reclaim_tq); +} + +static void qtnf_pcie_data_rx_start(struct qtnf_bus *bus) +{ + struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus); + + napi_enable(&bus->mux_napi); + enable_rx_irqs(ts); +} + +static void qtnf_pcie_data_rx_stop(struct qtnf_bus *bus) +{ + struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus); + + disable_rx_irqs(ts); + napi_disable(&bus->mux_napi); +} + +static const struct qtnf_bus_ops qtnf_pcie_topaz_bus_ops = { + /* control path methods */ + .control_tx = qtnf_pcie_control_tx, + + /* data path methods */ + .data_tx = qtnf_pcie_data_tx, + .data_tx_timeout = qtnf_pcie_data_tx_timeout, + .data_rx_start = qtnf_pcie_data_rx_start, + .data_rx_stop = qtnf_pcie_data_rx_stop, +}; + +static int qtnf_dbg_irq_stats(struct seq_file *s, void *data) +{ + struct qtnf_bus *bus = dev_get_drvdata(s->private); + struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus); + + seq_printf(s, "pcie_irq_count(%u)\n", ts->base.pcie_irq_count); + + return 0; +} + +static int qtnf_dbg_pkt_stats(struct seq_file *s, void *data) +{ + struct qtnf_bus *bus = dev_get_drvdata(s->private); + struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus); + struct qtnf_pcie_bus_priv *priv = &ts->base; + u32 tx_done_index = readl(ts->ep_next_rx_pkt); + + seq_printf(s, "tx_full_count(%u)\n", priv->tx_full_count); + seq_printf(s, "tx_done_count(%u)\n", priv->tx_done_count); + seq_printf(s, "tx_reclaim_done(%u)\n", priv->tx_reclaim_done); + seq_printf(s, "tx_reclaim_req(%u)\n", priv->tx_reclaim_req); + + seq_printf(s, "tx_bd_r_index(%u)\n", priv->tx_bd_r_index); + seq_printf(s, "tx_done_index(%u)\n", tx_done_index); + seq_printf(s, "tx_bd_w_index(%u)\n", priv->tx_bd_w_index); + + seq_printf(s, "tx host queue len(%u)\n", + CIRC_CNT(priv->tx_bd_w_index, priv->tx_bd_r_index, + priv->tx_bd_num)); + seq_printf(s, "tx reclaim queue len(%u)\n", + CIRC_CNT(tx_done_index, priv->tx_bd_r_index, + priv->tx_bd_num)); + seq_printf(s, "tx card queue len(%u)\n", + CIRC_CNT(priv->tx_bd_w_index, tx_done_index, + priv->tx_bd_num)); + + seq_printf(s, "rx_bd_r_index(%u)\n", priv->rx_bd_r_index); + seq_printf(s, "rx_bd_w_index(%u)\n", priv->rx_bd_w_index); + seq_printf(s, "rx alloc queue len(%u)\n", + CIRC_SPACE(priv->rx_bd_w_index, priv->rx_bd_r_index, + priv->rx_bd_num)); + + return 0; +} + +static void qtnf_reset_dma_offset(struct qtnf_pcie_topaz_state *ts) +{ + struct qtnf_topaz_bda __iomem *bda = ts->bda; + u32 offset = readl(&bda->bda_dma_offset); + + if ((offset & PCIE_DMA_OFFSET_ERROR_MASK) != PCIE_DMA_OFFSET_ERROR) + return; + + writel(0x0, &bda->bda_dma_offset); +} + +static int qtnf_pcie_endian_detect(struct qtnf_pcie_topaz_state *ts) +{ + struct qtnf_topaz_bda __iomem *bda = ts->bda; + u32 timeout = 0; + u32 endian; + int ret = 0; + + writel(QTN_PCI_ENDIAN_DETECT_DATA, &bda->bda_pci_endian); + + /* flush endian modifications before status update */ + dma_wmb(); + + writel(QTN_PCI_ENDIAN_VALID_STATUS, &bda->bda_pci_pre_status); + + while (readl(&bda->bda_pci_post_status) != + QTN_PCI_ENDIAN_VALID_STATUS) { + usleep_range(1000, 1200); + if (++timeout > QTN_FW_DL_TIMEOUT_MS) { + pr_err("card endianness detection timed out\n"); + ret = -ETIMEDOUT; + goto endian_out; + } + } + + /* do not read before status is updated */ + dma_rmb(); + + endian = readl(&bda->bda_pci_endian); + WARN(endian != QTN_PCI_LITTLE_ENDIAN, + "%s: unexpected card endianness", __func__); + +endian_out: + writel(0, &bda->bda_pci_pre_status); + writel(0, &bda->bda_pci_post_status); + writel(0, &bda->bda_pci_endian); + + return ret; +} + +static int qtnf_pre_init_ep(struct qtnf_bus *bus) +{ + struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus); + struct qtnf_topaz_bda __iomem *bda = ts->bda; + u32 flags; + int ret; + + ret = qtnf_pcie_endian_detect(ts); + if (ret < 0) { + pr_err("failed to detect card endianness\n"); + return ret; + } + + writeb(ts->base.msi_enabled, &ts->bda->bda_rc_msi_enabled); + qtnf_reset_dma_offset(ts); + + /* notify card about driver type and boot mode */ + flags = readl(&bda->bda_flags) | QTN_BDA_HOST_QLINK_DRV; + + if (ts->base.flashboot) + flags |= QTN_BDA_FLASH_BOOT; + else + flags &= ~QTN_BDA_FLASH_BOOT; + + writel(flags, &bda->bda_flags); + + qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_HOST_RDY); + if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_TARGET_RDY, + QTN_FW_DL_TIMEOUT_MS)) { + pr_err("card is not ready to boot...\n"); + return -ETIMEDOUT; + } + + return ret; +} + +static int qtnf_post_init_ep(struct qtnf_pcie_topaz_state *ts) +{ + struct pci_dev *pdev = ts->base.pdev; + + setup_rx_irqs(ts); + disable_rx_irqs(ts); + + if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_QLINK_DONE, + QTN_FW_QLINK_TIMEOUT_MS)) + return -ETIMEDOUT; + + enable_irq(pdev->irq); + return 0; +} + +static int +qtnf_ep_fw_load(struct qtnf_pcie_topaz_state *ts, const u8 *fw, u32 fw_size) +{ + struct qtnf_topaz_bda __iomem *bda = ts->bda; + struct pci_dev *pdev = ts->base.pdev; + u32 remaining = fw_size; + u8 *curr = (u8 *)fw; + u32 blksize; + u32 nblocks; + u32 offset; + u32 count; + u32 size; + dma_addr_t paddr; + void *data; + int ret = 0; + + pr_debug("FW upload started: fw_addr = 0x%p, size=%d\n", fw, fw_size); + + blksize = ts->base.fw_blksize; + + if (blksize < PAGE_SIZE) + blksize = PAGE_SIZE; + + while (blksize >= PAGE_SIZE) { + pr_debug("allocating %u bytes to upload FW\n", blksize); + data = dma_alloc_coherent(&pdev->dev, blksize, + &paddr, GFP_KERNEL); + if (data) + break; + blksize /= 2; + } + + if (!data) { + pr_err("failed to allocate DMA buffer for FW upload\n"); + ret = -ENOMEM; + goto fw_load_out; + } + + nblocks = NBLOCKS(fw_size, blksize); + offset = readl(&bda->bda_dma_offset); + + qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_HOST_LOAD); + if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_EP_RDY, + QTN_FW_DL_TIMEOUT_MS)) { + pr_err("card is not ready to download FW\n"); + ret = -ETIMEDOUT; + goto fw_load_map; + } + + for (count = 0 ; count < nblocks; count++) { + size = (remaining > blksize) ? blksize : remaining; + + memcpy(data, curr, size); + qtnf_non_posted_write(paddr + offset, &bda->bda_img); + qtnf_non_posted_write(size, &bda->bda_img_size); + + pr_debug("chunk[%u] VA[0x%p] PA[%pad] sz[%u]\n", + count, (void *)curr, &paddr, size); + + qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_BLOCK_RDY); + if (qtnf_poll_state(&ts->bda->bda_bootstate, + QTN_BDA_FW_BLOCK_DONE, + QTN_FW_DL_TIMEOUT_MS)) { + pr_err("confirmation for block #%d timed out\n", count); + ret = -ETIMEDOUT; + goto fw_load_map; + } + + remaining = (remaining < size) ? remaining : (remaining - size); + curr += size; + } + + /* upload completion mark: zero-sized block */ + qtnf_non_posted_write(0, &bda->bda_img); + qtnf_non_posted_write(0, &bda->bda_img_size); + + qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_BLOCK_RDY); + if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_BLOCK_DONE, + QTN_FW_DL_TIMEOUT_MS)) { + pr_err("confirmation for the last block timed out\n"); + ret = -ETIMEDOUT; + goto fw_load_map; + } + + /* RC is done */ + qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_BLOCK_END); + if (qtnf_poll_state(&ts->bda->bda_bootstate, QTN_BDA_FW_LOAD_DONE, + QTN_FW_DL_TIMEOUT_MS)) { + pr_err("confirmation for FW upload completion timed out\n"); + ret = -ETIMEDOUT; + goto fw_load_map; + } + + pr_debug("FW upload completed: totally sent %d blocks\n", count); + +fw_load_map: + dma_free_coherent(&pdev->dev, blksize, data, paddr); + +fw_load_out: + return ret; +} + +static int qtnf_topaz_fw_upload(struct qtnf_pcie_topaz_state *ts, + const char *fwname) +{ + const struct firmware *fw; + struct pci_dev *pdev = ts->base.pdev; + int ret; + + if (qtnf_poll_state(&ts->bda->bda_bootstate, + QTN_BDA_FW_LOAD_RDY, + QTN_FW_DL_TIMEOUT_MS)) { + pr_err("%s: card is not ready\n", fwname); + return -1; + } + + pr_info("starting firmware upload: %s\n", fwname); + + ret = request_firmware(&fw, fwname, &pdev->dev); + if (ret < 0) { + pr_err("%s: request_firmware error %d\n", fwname, ret); + return -1; + } + + ret = qtnf_ep_fw_load(ts, fw->data, fw->size); + release_firmware(fw); + + if (ret) + pr_err("%s: FW upload error\n", fwname); + + return ret; +} + +static void qtnf_topaz_fw_work_handler(struct work_struct *work) +{ + struct qtnf_bus *bus = container_of(work, struct qtnf_bus, fw_work); + struct qtnf_pcie_topaz_state *ts = (void *)get_bus_priv(bus); + int ret; + int bootloader_needed = readl(&ts->bda->bda_flags) & QTN_BDA_XMIT_UBOOT; + + qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_TARGET_BOOT); + + if (bootloader_needed) { + ret = qtnf_topaz_fw_upload(ts, QTN_PCI_TOPAZ_BOOTLD_NAME); + if (ret) + goto fw_load_exit; + + ret = qtnf_pre_init_ep(bus); + if (ret) + goto fw_load_exit; + + qtnf_set_state(&ts->bda->bda_bootstate, + QTN_BDA_FW_TARGET_BOOT); + } + + if (ts->base.flashboot) { + pr_info("booting firmware from flash\n"); + + ret = qtnf_poll_state(&ts->bda->bda_bootstate, + QTN_BDA_FW_FLASH_BOOT, + QTN_FW_DL_TIMEOUT_MS); + if (ret) + goto fw_load_exit; + } else { + ret = qtnf_topaz_fw_upload(ts, QTN_PCI_TOPAZ_FW_NAME); + if (ret) + goto fw_load_exit; + + qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_START); + ret = qtnf_poll_state(&ts->bda->bda_bootstate, + QTN_BDA_FW_CONFIG, + QTN_FW_QLINK_TIMEOUT_MS); + if (ret) { + pr_err("FW bringup timed out\n"); + goto fw_load_exit; + } + + qtnf_set_state(&ts->bda->bda_bootstate, QTN_BDA_FW_RUN); + ret = qtnf_poll_state(&ts->bda->bda_bootstate, + QTN_BDA_FW_RUNNING, + QTN_FW_QLINK_TIMEOUT_MS); + if (ret) { + pr_err("card bringup timed out\n"); + goto fw_load_exit; + } + } + + pr_info("firmware is up and running\n"); + + ret = qtnf_post_init_ep(ts); + if (ret) + pr_err("FW runtime failure\n"); + +fw_load_exit: + qtnf_pcie_fw_boot_done(bus, ret ? false : true); + + if (ret == 0) { + qtnf_debugfs_add_entry(bus, "pkt_stats", qtnf_dbg_pkt_stats); + qtnf_debugfs_add_entry(bus, "irq_stats", qtnf_dbg_irq_stats); + } +} + +static void qtnf_reclaim_tasklet_fn(unsigned long data) +{ + struct qtnf_pcie_topaz_state *ts = (void *)data; + + qtnf_topaz_data_tx_reclaim(ts); +} + +static u64 qtnf_topaz_dma_mask_get(void) +{ + return DMA_BIT_MASK(32); +} + +static int qtnf_pcie_topaz_probe(struct qtnf_bus *bus, unsigned int tx_bd_num) +{ + struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus); + struct pci_dev *pdev = ts->base.pdev; + struct qtnf_shm_ipc_int ipc_int; + unsigned long irqflags; + int ret; + + bus->bus_ops = &qtnf_pcie_topaz_bus_ops; + INIT_WORK(&bus->fw_work, qtnf_topaz_fw_work_handler); + ts->bda = ts->base.epmem_bar; + + /* assign host msi irq before card init */ + if (ts->base.msi_enabled) + irqflags = IRQF_NOBALANCING; + else + irqflags = IRQF_NOBALANCING | IRQF_SHARED; + + ret = devm_request_irq(&pdev->dev, pdev->irq, + &qtnf_pcie_topaz_interrupt, + irqflags, "qtnf_topaz_irq", (void *)bus); + if (ret) { + pr_err("failed to request pcie irq %d\n", pdev->irq); + return ret; + } + + disable_irq(pdev->irq); + + ret = qtnf_pre_init_ep(bus); + if (ret) { + pr_err("failed to init card\n"); + return ret; + } + + ret = qtnf_pcie_topaz_init_xfer(ts, tx_bd_num); + if (ret) { + pr_err("PCIE xfer init failed\n"); + return ret; + } + + tasklet_init(&ts->base.reclaim_tq, qtnf_reclaim_tasklet_fn, + (unsigned long)ts); + netif_napi_add(&bus->mux_dev, &bus->mux_napi, + qtnf_topaz_rx_poll, 10); + + ipc_int.fn = qtnf_topaz_ipc_gen_ep_int; + ipc_int.arg = ts; + qtnf_pcie_init_shm_ipc(&ts->base, &ts->bda->bda_shm_reg1, + &ts->bda->bda_shm_reg2, &ipc_int); + + return 0; +} + +static void qtnf_pcie_topaz_remove(struct qtnf_bus *bus) +{ + struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus); + + qtnf_topaz_reset_ep(ts); + qtnf_topaz_free_xfer_buffers(ts); +} + +#ifdef CONFIG_PM_SLEEP +static int qtnf_pcie_topaz_suspend(struct qtnf_bus *bus) +{ + struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus); + struct pci_dev *pdev = ts->base.pdev; + + writel((u32 __force)PCI_D3hot, ts->ep_pmstate); + dma_wmb(); + writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_PM_EP_IRQ), + TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar)); + + pci_save_state(pdev); + pci_enable_wake(pdev, PCI_D3hot, 1); + pci_set_power_state(pdev, PCI_D3hot); + + return 0; +} + +static int qtnf_pcie_topaz_resume(struct qtnf_bus *bus) +{ + struct qtnf_pcie_topaz_state *ts = get_bus_priv(bus); + struct pci_dev *pdev = ts->base.pdev; + + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); + pci_enable_wake(pdev, PCI_D0, 0); + + writel((u32 __force)PCI_D0, ts->ep_pmstate); + dma_wmb(); + writel(TOPAZ_IPC_IRQ_WORD(TOPAZ_RC_PM_EP_IRQ), + TOPAZ_LH_IPC4_INT(ts->base.sysctl_bar)); + + return 0; +} +#endif + +struct qtnf_bus *qtnf_pcie_topaz_alloc(struct pci_dev *pdev) +{ + struct qtnf_bus *bus; + struct qtnf_pcie_topaz_state *ts; + + bus = devm_kzalloc(&pdev->dev, sizeof(*bus) + sizeof(*ts), GFP_KERNEL); + if (!bus) + return NULL; + + ts = get_bus_priv(bus); + ts->base.probe_cb = qtnf_pcie_topaz_probe; + ts->base.remove_cb = qtnf_pcie_topaz_remove; + ts->base.dma_mask_get_cb = qtnf_topaz_dma_mask_get; +#ifdef CONFIG_PM_SLEEP + ts->base.resume_cb = qtnf_pcie_topaz_resume; + ts->base.suspend_cb = qtnf_pcie_topaz_suspend; +#endif + + return bus; +} diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie_ipc.h b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie_ipc.h new file mode 100644 index 000000000000..eb30e9d08de2 --- /dev/null +++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie_ipc.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2018 Quantenna Communications */ + +#ifndef _QTN_FMAC_PCIE_IPC_H_ +#define _QTN_FMAC_PCIE_IPC_H_ + +#include <linux/types.h> + +#include "shm_ipc_defs.h" + +/* EP/RC status and flags */ +#define QTN_BDA_PCIE_INIT 0x01 +#define QTN_BDA_PCIE_RDY 0x02 +#define QTN_BDA_FW_LOAD_RDY 0x03 +#define QTN_BDA_FW_LOAD_DONE 0x04 +#define QTN_BDA_FW_START 0x05 +#define QTN_BDA_FW_RUN 0x06 +#define QTN_BDA_FW_HOST_RDY 0x07 +#define QTN_BDA_FW_TARGET_RDY 0x11 +#define QTN_BDA_FW_TARGET_BOOT 0x12 +#define QTN_BDA_FW_FLASH_BOOT 0x13 +#define QTN_BDA_FW_QLINK_DONE 0x14 +#define QTN_BDA_FW_HOST_LOAD 0x08 +#define QTN_BDA_FW_BLOCK_DONE 0x09 +#define QTN_BDA_FW_BLOCK_RDY 0x0A +#define QTN_BDA_FW_EP_RDY 0x0B +#define QTN_BDA_FW_BLOCK_END 0x0C +#define QTN_BDA_FW_CONFIG 0x0D +#define QTN_BDA_FW_RUNNING 0x0E +#define QTN_BDA_PCIE_FAIL 0x82 +#define QTN_BDA_FW_LOAD_FAIL 0x85 + +#define QTN_BDA_RCMODE BIT(1) +#define QTN_BDA_MSI BIT(2) +#define QTN_BDA_HOST_CALCMD BIT(3) +#define QTN_BDA_FLASH_PRESENT BIT(4) +#define QTN_BDA_FLASH_BOOT BIT(5) +#define QTN_BDA_XMIT_UBOOT BIT(6) +#define QTN_BDA_HOST_QLINK_DRV BIT(7) +#define QTN_BDA_TARGET_FBOOT_ERR BIT(8) +#define QTN_BDA_TARGET_FWLOAD_ERR BIT(9) +#define QTN_BDA_HOST_NOFW_ERR BIT(12) +#define QTN_BDA_HOST_MEMALLOC_ERR BIT(13) +#define QTN_BDA_HOST_MEMMAP_ERR BIT(14) +#define QTN_BDA_VER(x) (((x) >> 4) & 0xFF) +#define QTN_BDA_ERROR_MASK 0xFF00 + +/* registers and shmem address macros */ +#if BITS_PER_LONG == 64 +#define QTN_HOST_HI32(a) ((u32)(((u64)a) >> 32)) +#define QTN_HOST_LO32(a) ((u32)(((u64)a) & 0xffffffffUL)) +#define QTN_HOST_ADDR(h, l) ((((u64)h) << 32) | ((u64)l)) +#elif BITS_PER_LONG == 32 +#define QTN_HOST_HI32(a) 0 +#define QTN_HOST_LO32(a) ((u32)(((u32)a) & 0xffffffffUL)) +#define QTN_HOST_ADDR(h, l) ((u32)l) +#else +#error Unexpected BITS_PER_LONG value +#endif + +#define QTN_PCIE_BDA_VERSION 0x1001 + +#define PCIE_BDA_NAMELEN 32 + +#define QTN_PCIE_RC_TX_QUEUE_LEN 256 +#define QTN_PCIE_TX_VALID_PKT 0x80000000 +#define QTN_PCIE_PKT_LEN_MASK 0xffff + +#define QTN_BD_EMPTY ((uint32_t)0x00000001) +#define QTN_BD_WRAP ((uint32_t)0x00000002) +#define QTN_BD_MASK_LEN ((uint32_t)0xFFFF0000) +#define QTN_BD_MASK_OFFSET ((uint32_t)0x0000FF00) + +#define QTN_GET_LEN(x) (((x) >> 16) & 0xFFFF) +#define QTN_GET_OFFSET(x) (((x) >> 8) & 0xFF) +#define QTN_SET_LEN(len) (((len) & 0xFFFF) << 16) +#define QTN_SET_OFFSET(of) (((of) & 0xFF) << 8) + +#define RX_DONE_INTR_MSK ((0x1 << 6) - 1) + +#define PCIE_DMA_OFFSET_ERROR 0xFFFF +#define PCIE_DMA_OFFSET_ERROR_MASK 0xFFFF + +#define QTN_PCI_ENDIAN_DETECT_DATA 0x12345678 +#define QTN_PCI_ENDIAN_REVERSE_DATA 0x78563412 +#define QTN_PCI_ENDIAN_VALID_STATUS 0x3c3c3c3c +#define QTN_PCI_ENDIAN_INVALID_STATUS 0 +#define QTN_PCI_LITTLE_ENDIAN 0 +#define QTN_PCI_BIG_ENDIAN 0xffffffff + +#define NBLOCKS(size, blksize) \ + ((size) / (blksize) + (((size) % (blksize) > 0) ? 1 : 0)) + +#endif /* _QTN_FMAC_PCIE_IPC_H_ */ diff --git a/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie_regs.h b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie_regs.h new file mode 100644 index 000000000000..4782e1ed3c2c --- /dev/null +++ b/drivers/net/wireless/quantenna/qtnfmac/pcie/topaz_pcie_regs.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +/* Copyright (c) 2018 Quantenna Communications */ + +#ifndef __TOPAZ_PCIE_H +#define __TOPAZ_PCIE_H + +/* Topaz PCIe DMA registers */ +#define PCIE_DMA_WR_INTR_STATUS(base) ((base) + 0x9bc) +#define PCIE_DMA_WR_INTR_MASK(base) ((base) + 0x9c4) +#define PCIE_DMA_WR_INTR_CLR(base) ((base) + 0x9c8) +#define PCIE_DMA_WR_ERR_STATUS(base) ((base) + 0x9cc) +#define PCIE_DMA_WR_DONE_IMWR_ADDR_LOW(base) ((base) + 0x9D0) +#define PCIE_DMA_WR_DONE_IMWR_ADDR_HIGH(base) ((base) + 0x9d4) + +#define PCIE_DMA_RD_INTR_STATUS(base) ((base) + 0x310) +#define PCIE_DMA_RD_INTR_MASK(base) ((base) + 0x319) +#define PCIE_DMA_RD_INTR_CLR(base) ((base) + 0x31c) +#define PCIE_DMA_RD_ERR_STATUS_LOW(base) ((base) + 0x324) +#define PCIE_DMA_RD_ERR_STATUS_HIGH(base) ((base) + 0x328) +#define PCIE_DMA_RD_DONE_IMWR_ADDR_LOW(base) ((base) + 0x33c) +#define PCIE_DMA_RD_DONE_IMWR_ADDR_HIGH(base) ((base) + 0x340) + +/* Topaz LHost IPC4 interrupt */ +#define TOPAZ_LH_IPC4_INT(base) ((base) + 0x13C) +#define TOPAZ_LH_IPC4_INT_MASK(base) ((base) + 0x140) + +#define TOPAZ_RC_TX_DONE_IRQ (0) +#define TOPAZ_RC_RST_EP_IRQ (1) +#define TOPAZ_RC_TX_STOP_IRQ (2) +#define TOPAZ_RC_RX_DONE_IRQ (3) +#define TOPAZ_RC_PM_EP_IRQ (4) + +/* Topaz LHost M2L interrupt */ +#define TOPAZ_CTL_M2L_INT(base) ((base) + 0x2C) +#define TOPAZ_CTL_M2L_INT_MASK(base) ((base) + 0x30) + +#define TOPAZ_RC_CTRL_IRQ (6) + +#define TOPAZ_IPC_IRQ_WORD(irq) (BIT(irq) | BIT(irq + 16)) + +/* PCIe legacy INTx */ +#define TOPAZ_PCIE_CFG0_OFFSET (0x6C) +#define TOPAZ_ASSERT_INTX BIT(9) + +#endif /* __TOPAZ_PCIE_H */ diff --git a/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h b/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h index 1fe798a9a667..40295a511224 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h +++ b/drivers/net/wireless/quantenna/qtnfmac/qtn_hw_ids.h @@ -23,7 +23,7 @@ /* PCIE Device IDs */ -#define PCIE_DEVICE_ID_QTN_PEARL (0x0008) +#define PCIE_DEVICE_ID_QSR (0x0008) #define QTN_REG_SYS_CTRL_CSR 0x14 #define QTN_CHIP_ID_MASK 0xF0 @@ -35,6 +35,8 @@ /* FW names */ #define QTN_PCI_PEARL_FW_NAME "qtn/fmac_qsr10g.img" +#define QTN_PCI_TOPAZ_FW_NAME "qtn/fmac_qsr1000.img" +#define QTN_PCI_TOPAZ_BOOTLD_NAME "qtn/uboot_qsr1000.img" static inline unsigned int qtnf_chip_id_get(const void __iomem *regs_base) { diff --git a/drivers/net/wireless/quantenna/qtnfmac/util.c b/drivers/net/wireless/quantenna/qtnfmac/util.c index e745733ba417..3bc96b264769 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/util.c +++ b/drivers/net/wireless/quantenna/qtnfmac/util.c @@ -15,6 +15,7 @@ */ #include "util.h" +#include "qtn_hw_ids.h" void qtnf_sta_list_init(struct qtnf_sta_list *list) { @@ -116,3 +117,20 @@ void qtnf_sta_list_free(struct qtnf_sta_list *list) INIT_LIST_HEAD(&list->head); } + +const char *qtnf_chipid_to_string(unsigned long chip_id) +{ + switch (chip_id) { + case QTN_CHIP_ID_TOPAZ: + return "Topaz"; + case QTN_CHIP_ID_PEARL: + return "Pearl revA"; + case QTN_CHIP_ID_PEARL_B: + return "Pearl revB"; + case QTN_CHIP_ID_PEARL_C: + return "Pearl revC"; + default: + return "unknown"; + } +} +EXPORT_SYMBOL_GPL(qtnf_chipid_to_string); diff --git a/drivers/net/wireless/quantenna/qtnfmac/util.h b/drivers/net/wireless/quantenna/qtnfmac/util.h index 0d4d92b11540..b8744baac332 100644 --- a/drivers/net/wireless/quantenna/qtnfmac/util.h +++ b/drivers/net/wireless/quantenna/qtnfmac/util.h @@ -20,6 +20,8 @@ #include <linux/kernel.h> #include "core.h" +const char *qtnf_chipid_to_string(unsigned long chip_id); + void qtnf_sta_list_init(struct qtnf_sta_list *list); struct qtnf_sta_node *qtnf_sta_list_lookup(struct qtnf_sta_list *list, diff --git a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c index 0bc8b0249c57..49a732798395 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c @@ -1302,7 +1302,7 @@ static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev, break; case 2: /* Failure, excessive retries */ __set_bit(TXDONE_EXCESSIVE_RETRY, &txdesc.flags); - /* Don't break, this is a failed frame! */ + /* Fall through - this is a failed frame! */ default: /* Failure */ __set_bit(TXDONE_FAILURE, &txdesc.flags); } diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c index 1ff5434798ec..e8e7bfe1ba9b 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c @@ -1430,7 +1430,7 @@ static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev, break; case 2: /* Failure, excessive retries */ __set_bit(TXDONE_EXCESSIVE_RETRY, &txdesc.flags); - /* Don't break, this is a failed frame! */ + /* Fall through - this is a failed frame! */ default: /* Failure */ __set_bit(TXDONE_FAILURE, &txdesc.flags); } diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index 9e7b8933d30c..0e95555aec62 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -2482,6 +2482,7 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev, switch (rt2x00dev->default_ant.tx_chain_num) { case 1: rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1); + /* fall through */ case 2: rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 1); break; @@ -2490,6 +2491,7 @@ static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev, switch (rt2x00dev->default_ant.rx_chain_num) { case 1: rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1); + /* fall through */ case 2: rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 1); break; @@ -9457,8 +9459,10 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) switch (rx_chains) { case 3: spec->ht.mcs.rx_mask[2] = 0xff; + /* fall through */ case 2: spec->ht.mcs.rx_mask[1] = 0xff; + /* fall through */ case 1: spec->ht.mcs.rx_mask[0] = 0xff; spec->ht.mcs.rx_mask[4] = 0x1; /* MCS32 */ diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c index cb0e1196f2c2..4c5de8fc8f12 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c @@ -2226,7 +2226,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev) break; case 6: /* Failure, excessive retries */ __set_bit(TXDONE_EXCESSIVE_RETRY, &txdesc.flags); - /* Don't break, this is a failed frame! */ + /* Fall through - this is a failed frame! */ default: /* Failure */ __set_bit(TXDONE_FAILURE, &txdesc.flags); } diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c index 08c607c031bc..33ad87528d9a 100644 --- a/drivers/net/wireless/ray_cs.c +++ b/drivers/net/wireless/ray_cs.c @@ -889,8 +889,10 @@ static int ray_hw_xmit(unsigned char *data, int len, struct net_device *dev, switch (ccsindex = get_free_tx_ccs(local)) { case ECCSBUSY: pr_debug("ray_hw_xmit tx_ccs table busy\n"); + /* fall through */ case ECCSFULL: pr_debug("ray_hw_xmit No free tx ccs\n"); + /* fall through */ case ECARDGONE: netif_stop_queue(dev); return XMIT_NO_CCS; diff --git a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c index cec37787ecf8..1a2ea8b47714 100644 --- a/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c +++ b/drivers/net/wireless/realtek/rtl818x/rtl8187/dev.c @@ -444,12 +444,13 @@ static int rtl8187_init_urbs(struct ieee80211_hw *dev) skb_queue_tail(&priv->rx_queue, skb); usb_anchor_urb(entry, &priv->anchored); ret = usb_submit_urb(entry, GFP_KERNEL); - usb_put_urb(entry); if (ret) { skb_unlink(skb, &priv->rx_queue); usb_unanchor_urb(entry); + usb_put_urb(entry); goto err; } + usb_put_urb(entry); } return ret; diff --git a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c index 56040b181cf5..2bd43057dda3 100644 --- a/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c +++ b/drivers/net/wireless/realtek/rtl8xxxu/rtl8xxxu_core.c @@ -1153,6 +1153,7 @@ void rtl8xxxu_gen1_config_channel(struct ieee80211_hw *hw) switch (hw->conf.chandef.width) { case NL80211_CHAN_WIDTH_20_NOHT: ht = false; + /* fall through */ case NL80211_CHAN_WIDTH_20: opmode |= BW_OPMODE_20MHZ; rtl8xxxu_write8(priv, REG_BW_OPMODE, opmode); @@ -1280,6 +1281,7 @@ void rtl8xxxu_gen2_config_channel(struct ieee80211_hw *hw) switch (hw->conf.chandef.width) { case NL80211_CHAN_WIDTH_20_NOHT: ht = false; + /* fall through */ case NL80211_CHAN_WIDTH_20: rf_mode_bw |= WMAC_TRXPTCL_CTL_BW_20; subchannel = 0; @@ -1748,9 +1750,11 @@ static int rtl8xxxu_identify_chip(struct rtl8xxxu_priv *priv) case 3: priv->ep_tx_low_queue = 1; priv->ep_tx_count++; + /* fall through */ case 2: priv->ep_tx_normal_queue = 1; priv->ep_tx_count++; + /* fall through */ case 1: priv->ep_tx_high_queue = 1; priv->ep_tx_count++; @@ -5688,6 +5692,7 @@ static int rtl8xxxu_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, break; case WLAN_CIPHER_SUITE_TKIP: key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; + break; default: return -EOPNOTSUPP; } diff --git a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c index 6fbf8845a2ab..d748aab66aa2 100644 --- a/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c +++ b/drivers/net/wireless/realtek/rtlwifi/btcoexist/halbtcoutsrc.c @@ -292,11 +292,9 @@ bool halbtc_send_bt_mp_operation(struct btc_coexist *btcoexist, u8 op_code, static void halbtc_leave_lps(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv; - struct rtl_ps_ctl *ppsc; bool ap_enable = false; rtlpriv = btcoexist->adapter; - ppsc = rtl_psc(rtlpriv); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE, &ap_enable); @@ -315,11 +313,9 @@ static void halbtc_leave_lps(struct btc_coexist *btcoexist) static void halbtc_enter_lps(struct btc_coexist *btcoexist) { struct rtl_priv *rtlpriv; - struct rtl_ps_ctl *ppsc; bool ap_enable = false; rtlpriv = btcoexist->adapter; - ppsc = rtl_psc(rtlpriv); btcoexist->btc_get(btcoexist, BTC_GET_BL_WIFI_AP_MODE_ENABLE, &ap_enable); diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c index 4c1f8b08fc10..14dcb0816bc0 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8188ee/trx.c @@ -29,7 +29,6 @@ #include "../stats.h" #include "reg.h" #include "def.h" -#include "phy.h" #include "trx.h" #include "led.h" #include "dm.h" diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c index 85cedd083d2b..75bfa9dfef4a 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8192de/fw.c @@ -173,7 +173,7 @@ static int _rtl92d_fw_init(struct ieee80211_hw *hw) rtl_read_byte(rtlpriv, FW_MAC1_READY)); } RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG, - "Polling FW ready fail!! REG_MCUFWDL:0x%08ul\n", + "Polling FW ready fail!! REG_MCUFWDL:0x%08x\n", rtl_read_dword(rtlpriv, REG_MCUFWDL)); return -1; } diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c index 5cf29f5a4b54..3f3327878b51 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/phy.c @@ -509,13 +509,10 @@ bool rtl8723e_phy_config_rf_with_headerfile(struct ieee80211_hw *hw, int i; bool rtstatus = true; u32 *radioa_array_table; - u32 *radiob_array_table; - u16 radioa_arraylen, radiob_arraylen; + u16 radioa_arraylen; radioa_arraylen = RTL8723ERADIOA_1TARRAYLENGTH; radioa_array_table = RTL8723E_RADIOA_1TARRAY; - radiob_arraylen = RTL8723E_RADIOB_1TARRAYLENGTH; - radiob_array_table = RTL8723E_RADIOB_1TARRAY; rtstatus = true; diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.c b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.c index 61e86045f15c..1bbee0bfac23 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.c @@ -475,10 +475,6 @@ u32 RTL8723E_RADIOA_1TARRAY[RTL8723ERADIOA_1TARRAYLENGTH] = { 0x000, 0x00030159, }; -u32 RTL8723E_RADIOB_1TARRAY[RTL8723E_RADIOB_1TARRAYLENGTH] = { - 0x0, -}; - u32 RTL8723EMAC_ARRAY[RTL8723E_MACARRAYLENGTH] = { 0x420, 0x00000080, 0x423, 0x00000000, diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.h b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.h index 57a548ceba7d..a044f3c456fa 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.h +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8723ae/table.h @@ -36,8 +36,6 @@ extern u32 RTL8723EPHY_REG_1TARRAY[RTL8723E_PHY_REG_1TARRAY_LENGTH]; extern u32 RTL8723EPHY_REG_ARRAY_PG[RTL8723E_PHY_REG_ARRAY_PGLENGTH]; #define RTL8723ERADIOA_1TARRAYLENGTH 282 extern u32 RTL8723E_RADIOA_1TARRAY[RTL8723ERADIOA_1TARRAYLENGTH]; -#define RTL8723E_RADIOB_1TARRAYLENGTH 1 -extern u32 RTL8723E_RADIOB_1TARRAY[RTL8723E_RADIOB_1TARRAYLENGTH]; #define RTL8723E_MACARRAYLENGTH 172 extern u32 RTL8723EMAC_ARRAY[RTL8723E_MACARRAYLENGTH]; #define RTL8723E_AGCTAB_1TARRAYLENGTH 320 diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c index 176deb2b5386..a75451c246fd 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/phy.c @@ -394,6 +394,7 @@ static void _rtl8812ae_phy_set_rfe_reg_24g(struct ieee80211_hw *hw) rtl_set_bbreg(hw, RB_RFE_INV, BMASKRFEINV, 0x000); break; } + /* fall through */ case 0: case 2: default: diff --git a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c index d7960dd5bf1a..0f2b7c619918 100644 --- a/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c +++ b/drivers/net/wireless/realtek/rtlwifi/rtl8821ae/trx.c @@ -29,7 +29,6 @@ #include "../stats.h" #include "reg.h" #include "def.h" -#include "phy.h" #include "trx.h" #include "led.h" #include "dm.h" @@ -307,14 +306,12 @@ static void translate_rx_signal_stuff(struct ieee80211_hw *hw, u8 *praddr; u8 *psaddr; __le16 fc; - u16 type; bool packet_matchbssid, packet_toself, packet_beacon; tmp_buf = skb->data + pstatus->rx_drvinfo_size + pstatus->rx_bufshift; hdr = (struct ieee80211_hdr *)tmp_buf; fc = hdr->frame_control; - type = WLAN_FC_GET_TYPE(hdr->frame_control); praddr = hdr->addr1; psaddr = ieee80211_get_SA(hdr); ether_addr_copy(pstatus->psaddr, psaddr); diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c index 612c211e21a1..449f6d23c5e3 100644 --- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c +++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c @@ -210,7 +210,7 @@ int rsi_init_sdio_slave_regs(struct rsi_hw *adapter) } /* This tells SDIO FIFO when to start read to host */ - rsi_dbg(INIT_ZONE, "%s: Initialzing SDIO read start level\n", __func__); + rsi_dbg(INIT_ZONE, "%s: Initializing SDIO read start level\n", __func__); byte = 0x24; status = rsi_sdio_write_register(adapter, @@ -223,7 +223,7 @@ int rsi_init_sdio_slave_regs(struct rsi_hw *adapter) return -1; } - rsi_dbg(INIT_ZONE, "%s: Initialzing FIFO ctrl registers\n", __func__); + rsi_dbg(INIT_ZONE, "%s: Initializing FIFO ctrl registers\n", __func__); byte = (128 - 32); status = rsi_sdio_write_register(adapter, diff --git a/drivers/net/wireless/st/cw1200/debug.c b/drivers/net/wireless/st/cw1200/debug.c index 295cb1a29f25..2231ba08bc1f 100644 --- a/drivers/net/wireless/st/cw1200/debug.c +++ b/drivers/net/wireless/st/cw1200/debug.c @@ -289,19 +289,7 @@ static int cw1200_status_show(struct seq_file *seq, void *v) return 0; } -static int cw1200_status_open(struct inode *inode, struct file *file) -{ - return single_open(file, &cw1200_status_show, - inode->i_private); -} - -static const struct file_operations fops_status = { - .open = cw1200_status_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .owner = THIS_MODULE, -}; +DEFINE_SHOW_ATTRIBUTE(cw1200_status); static int cw1200_counters_show(struct seq_file *seq, void *v) { @@ -345,19 +333,7 @@ static int cw1200_counters_show(struct seq_file *seq, void *v) return 0; } -static int cw1200_counters_open(struct inode *inode, struct file *file) -{ - return single_open(file, &cw1200_counters_show, - inode->i_private); -} - -static const struct file_operations fops_counters = { - .open = cw1200_counters_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, - .owner = THIS_MODULE, -}; +DEFINE_SHOW_ATTRIBUTE(cw1200_counters); static ssize_t cw1200_wsm_dumps(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) @@ -399,11 +375,11 @@ int cw1200_debug_init(struct cw1200_common *priv) goto err; if (!debugfs_create_file("status", 0400, d->debugfs_phy, - priv, &fops_status)) + priv, &cw1200_status_fops)) goto err; if (!debugfs_create_file("counters", 0400, d->debugfs_phy, - priv, &fops_counters)) + priv, &cw1200_counters_fops)) goto err; if (!debugfs_create_file("wsm_dumps", 0200, d->debugfs_phy, diff --git a/drivers/net/wireless/st/cw1200/scan.c b/drivers/net/wireless/st/cw1200/scan.c index 67213f11acbd..0a9eac93dd01 100644 --- a/drivers/net/wireless/st/cw1200/scan.c +++ b/drivers/net/wireless/st/cw1200/scan.c @@ -78,6 +78,10 @@ int cw1200_hw_scan(struct ieee80211_hw *hw, if (req->n_ssids > WSM_SCAN_MAX_NUM_OF_SSIDS) return -EINVAL; + /* will be unlocked in cw1200_scan_work() */ + down(&priv->scan.lock); + mutex_lock(&priv->conf_mutex); + frame.skb = ieee80211_probereq_get(hw, priv->vif->addr, NULL, 0, req->ie_len); if (!frame.skb) @@ -86,19 +90,15 @@ int cw1200_hw_scan(struct ieee80211_hw *hw, if (req->ie_len) skb_put_data(frame.skb, req->ie, req->ie_len); - /* will be unlocked in cw1200_scan_work() */ - down(&priv->scan.lock); - mutex_lock(&priv->conf_mutex); - ret = wsm_set_template_frame(priv, &frame); if (!ret) { /* Host want to be the probe responder. */ ret = wsm_set_probe_responder(priv, true); } if (ret) { + dev_kfree_skb(frame.skb); mutex_unlock(&priv->conf_mutex); up(&priv->scan.lock); - dev_kfree_skb(frame.skb); return ret; } @@ -120,10 +120,9 @@ int cw1200_hw_scan(struct ieee80211_hw *hw, ++priv->scan.n_ssids; } - mutex_unlock(&priv->conf_mutex); - if (frame.skb) dev_kfree_skb(frame.skb); + mutex_unlock(&priv->conf_mutex); queue_work(priv->workqueue, &priv->scan.work); return 0; } diff --git a/drivers/net/wireless/st/cw1200/sta.c b/drivers/net/wireless/st/cw1200/sta.c index 38678e9a0562..8dae92a79fe1 100644 --- a/drivers/net/wireless/st/cw1200/sta.c +++ b/drivers/net/wireless/st/cw1200/sta.c @@ -1123,7 +1123,7 @@ int cw1200_setup_mac(struct cw1200_common *priv) * * NOTE2: RSSI based reports have been switched to RCPI, since * FW has a bug and RSSI reported values are not stable, - * what can leads to signal level oscilations in user-end applications + * what can lead to signal level oscilations in user-end applications */ struct wsm_rcpi_rssi_threshold threshold = { .rssiRcpiMode = WSM_RCPI_RSSI_THRESHOLD_ENABLE | diff --git a/drivers/net/wireless/ti/wlcore/vendor_cmd.c b/drivers/net/wireless/ti/wlcore/vendor_cmd.c index dbe78d8491ef..7f34ec077ee5 100644 --- a/drivers/net/wireless/ti/wlcore/vendor_cmd.c +++ b/drivers/net/wireless/ti/wlcore/vendor_cmd.c @@ -70,7 +70,7 @@ wlcore_vendor_cmd_smart_config_start(struct wiphy *wiphy, out: mutex_unlock(&wl->mutex); - return 0; + return ret; } static int diff --git a/drivers/net/wireless/virt_wifi.c b/drivers/net/wireless/virt_wifi.c new file mode 100644 index 000000000000..64b218699656 --- /dev/null +++ b/drivers/net/wireless/virt_wifi.c @@ -0,0 +1,632 @@ +// SPDX-License-Identifier: GPL-2.0 +/* drivers/net/wireless/virt_wifi.c + * + * A fake implementation of cfg80211_ops that can be tacked on to an ethernet + * net_device to make it appear as a wireless connection. + * + * Copyright (C) 2018 Google, Inc. + * + * Author: schuffelen@google.com + */ + +#include <net/cfg80211.h> +#include <net/rtnetlink.h> +#include <linux/etherdevice.h> +#include <linux/module.h> + +#include <net/cfg80211.h> +#include <net/rtnetlink.h> +#include <linux/etherdevice.h> +#include <linux/module.h> + +static struct wiphy *common_wiphy; + +struct virt_wifi_wiphy_priv { + struct delayed_work scan_result; + struct cfg80211_scan_request *scan_request; + bool being_deleted; +}; + +static struct ieee80211_channel channel_2ghz = { + .band = NL80211_BAND_2GHZ, + .center_freq = 2432, + .hw_value = 2432, + .max_power = 20, +}; + +static struct ieee80211_rate bitrates_2ghz[] = { + { .bitrate = 10 }, + { .bitrate = 20 }, + { .bitrate = 55 }, + { .bitrate = 110 }, + { .bitrate = 60 }, + { .bitrate = 120 }, + { .bitrate = 240 }, +}; + +static struct ieee80211_supported_band band_2ghz = { + .channels = &channel_2ghz, + .bitrates = bitrates_2ghz, + .band = NL80211_BAND_2GHZ, + .n_channels = 1, + .n_bitrates = ARRAY_SIZE(bitrates_2ghz), + .ht_cap = { + .ht_supported = true, + .cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 | + IEEE80211_HT_CAP_GRN_FLD | + IEEE80211_HT_CAP_SGI_20 | + IEEE80211_HT_CAP_SGI_40 | + IEEE80211_HT_CAP_DSSSCCK40, + .ampdu_factor = 0x3, + .ampdu_density = 0x6, + .mcs = { + .rx_mask = {0xff, 0xff}, + .tx_params = IEEE80211_HT_MCS_TX_DEFINED, + }, + }, +}; + +static struct ieee80211_channel channel_5ghz = { + .band = NL80211_BAND_5GHZ, + .center_freq = 5240, + .hw_value = 5240, + .max_power = 20, +}; + +static struct ieee80211_rate bitrates_5ghz[] = { + { .bitrate = 60 }, + { .bitrate = 120 }, + { .bitrate = 240 }, +}; + +#define RX_MCS_MAP (IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | \ + IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 | \ + IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 | \ + IEEE80211_VHT_MCS_SUPPORT_0_9 << 6 | \ + IEEE80211_VHT_MCS_SUPPORT_0_9 << 8 | \ + IEEE80211_VHT_MCS_SUPPORT_0_9 << 10 | \ + IEEE80211_VHT_MCS_SUPPORT_0_9 << 12 | \ + IEEE80211_VHT_MCS_SUPPORT_0_9 << 14) + +#define TX_MCS_MAP (IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | \ + IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 | \ + IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 | \ + IEEE80211_VHT_MCS_SUPPORT_0_9 << 6 | \ + IEEE80211_VHT_MCS_SUPPORT_0_9 << 8 | \ + IEEE80211_VHT_MCS_SUPPORT_0_9 << 10 | \ + IEEE80211_VHT_MCS_SUPPORT_0_9 << 12 | \ + IEEE80211_VHT_MCS_SUPPORT_0_9 << 14) + +static struct ieee80211_supported_band band_5ghz = { + .channels = &channel_5ghz, + .bitrates = bitrates_5ghz, + .band = NL80211_BAND_5GHZ, + .n_channels = 1, + .n_bitrates = ARRAY_SIZE(bitrates_5ghz), + .ht_cap = { + .ht_supported = true, + .cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 | + IEEE80211_HT_CAP_GRN_FLD | + IEEE80211_HT_CAP_SGI_20 | + IEEE80211_HT_CAP_SGI_40 | + IEEE80211_HT_CAP_DSSSCCK40, + .ampdu_factor = 0x3, + .ampdu_density = 0x6, + .mcs = { + .rx_mask = {0xff, 0xff}, + .tx_params = IEEE80211_HT_MCS_TX_DEFINED, + }, + }, + .vht_cap = { + .vht_supported = true, + .cap = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 | + IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ | + IEEE80211_VHT_CAP_RXLDPC | + IEEE80211_VHT_CAP_SHORT_GI_80 | + IEEE80211_VHT_CAP_SHORT_GI_160 | + IEEE80211_VHT_CAP_TXSTBC | + IEEE80211_VHT_CAP_RXSTBC_1 | + IEEE80211_VHT_CAP_RXSTBC_2 | + IEEE80211_VHT_CAP_RXSTBC_3 | + IEEE80211_VHT_CAP_RXSTBC_4 | + IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK, + .vht_mcs = { + .rx_mcs_map = cpu_to_le16(RX_MCS_MAP), + .tx_mcs_map = cpu_to_le16(TX_MCS_MAP), + } + }, +}; + +/* Assigned at module init. Guaranteed locally-administered and unicast. */ +static u8 fake_router_bssid[ETH_ALEN] __ro_after_init = {}; + +/* Called with the rtnl lock held. */ +static int virt_wifi_scan(struct wiphy *wiphy, + struct cfg80211_scan_request *request) +{ + struct virt_wifi_wiphy_priv *priv = wiphy_priv(wiphy); + + wiphy_debug(wiphy, "scan\n"); + + if (priv->scan_request || priv->being_deleted) + return -EBUSY; + + priv->scan_request = request; + schedule_delayed_work(&priv->scan_result, HZ * 2); + + return 0; +} + +/* Acquires and releases the rdev BSS lock. */ +static void virt_wifi_scan_result(struct work_struct *work) +{ + struct { + u8 tag; + u8 len; + u8 ssid[8]; + } __packed ssid = { + .tag = WLAN_EID_SSID, .len = 8, .ssid = "VirtWifi", + }; + struct cfg80211_bss *informed_bss; + struct virt_wifi_wiphy_priv *priv = + container_of(work, struct virt_wifi_wiphy_priv, + scan_result.work); + struct wiphy *wiphy = priv_to_wiphy(priv); + struct cfg80211_scan_info scan_info = { .aborted = false }; + + informed_bss = cfg80211_inform_bss(wiphy, &channel_5ghz, + CFG80211_BSS_FTYPE_PRESP, + fake_router_bssid, + ktime_get_boot_ns(), + WLAN_CAPABILITY_ESS, 0, + (void *)&ssid, sizeof(ssid), + DBM_TO_MBM(-50), GFP_KERNEL); + cfg80211_put_bss(wiphy, informed_bss); + + /* Schedules work which acquires and releases the rtnl lock. */ + cfg80211_scan_done(priv->scan_request, &scan_info); + priv->scan_request = NULL; +} + +/* May acquire and release the rdev BSS lock. */ +static void virt_wifi_cancel_scan(struct wiphy *wiphy) +{ + struct virt_wifi_wiphy_priv *priv = wiphy_priv(wiphy); + + cancel_delayed_work_sync(&priv->scan_result); + /* Clean up dangling callbacks if necessary. */ + if (priv->scan_request) { + struct cfg80211_scan_info scan_info = { .aborted = true }; + /* Schedules work which acquires and releases the rtnl lock. */ + cfg80211_scan_done(priv->scan_request, &scan_info); + priv->scan_request = NULL; + } +} + +struct virt_wifi_netdev_priv { + struct delayed_work connect; + struct net_device *lowerdev; + struct net_device *upperdev; + u32 tx_packets; + u32 tx_failed; + u8 connect_requested_bss[ETH_ALEN]; + bool is_up; + bool is_connected; + bool being_deleted; +}; + +/* Called with the rtnl lock held. */ +static int virt_wifi_connect(struct wiphy *wiphy, struct net_device *netdev, + struct cfg80211_connect_params *sme) +{ + struct virt_wifi_netdev_priv *priv = netdev_priv(netdev); + bool could_schedule; + + if (priv->being_deleted || !priv->is_up) + return -EBUSY; + + could_schedule = schedule_delayed_work(&priv->connect, HZ * 2); + if (!could_schedule) + return -EBUSY; + + if (sme->bssid) + ether_addr_copy(priv->connect_requested_bss, sme->bssid); + else + eth_zero_addr(priv->connect_requested_bss); + + wiphy_debug(wiphy, "connect\n"); + + return 0; +} + +/* Acquires and releases the rdev event lock. */ +static void virt_wifi_connect_complete(struct work_struct *work) +{ + struct virt_wifi_netdev_priv *priv = + container_of(work, struct virt_wifi_netdev_priv, connect.work); + u8 *requested_bss = priv->connect_requested_bss; + bool has_addr = !is_zero_ether_addr(requested_bss); + bool right_addr = ether_addr_equal(requested_bss, fake_router_bssid); + u16 status = WLAN_STATUS_SUCCESS; + + if (!priv->is_up || (has_addr && !right_addr)) + status = WLAN_STATUS_UNSPECIFIED_FAILURE; + else + priv->is_connected = true; + + /* Schedules an event that acquires the rtnl lock. */ + cfg80211_connect_result(priv->upperdev, requested_bss, NULL, 0, NULL, 0, + status, GFP_KERNEL); + netif_carrier_on(priv->upperdev); +} + +/* May acquire and release the rdev event lock. */ +static void virt_wifi_cancel_connect(struct net_device *netdev) +{ + struct virt_wifi_netdev_priv *priv = netdev_priv(netdev); + + /* If there is work pending, clean up dangling callbacks. */ + if (cancel_delayed_work_sync(&priv->connect)) { + /* Schedules an event that acquires the rtnl lock. */ + cfg80211_connect_result(priv->upperdev, + priv->connect_requested_bss, NULL, 0, + NULL, 0, + WLAN_STATUS_UNSPECIFIED_FAILURE, + GFP_KERNEL); + } +} + +/* Called with the rtnl lock held. Acquires the rdev event lock. */ +static int virt_wifi_disconnect(struct wiphy *wiphy, struct net_device *netdev, + u16 reason_code) +{ + struct virt_wifi_netdev_priv *priv = netdev_priv(netdev); + + if (priv->being_deleted) + return -EBUSY; + + wiphy_debug(wiphy, "disconnect\n"); + virt_wifi_cancel_connect(netdev); + + cfg80211_disconnected(netdev, reason_code, NULL, 0, true, GFP_KERNEL); + priv->is_connected = false; + netif_carrier_off(netdev); + + return 0; +} + +/* Called with the rtnl lock held. */ +static int virt_wifi_get_station(struct wiphy *wiphy, struct net_device *dev, + const u8 *mac, struct station_info *sinfo) +{ + struct virt_wifi_netdev_priv *priv = netdev_priv(dev); + + wiphy_debug(wiphy, "get_station\n"); + + if (!priv->is_connected || !ether_addr_equal(mac, fake_router_bssid)) + return -ENOENT; + + sinfo->filled = BIT_ULL(NL80211_STA_INFO_TX_PACKETS) | + BIT_ULL(NL80211_STA_INFO_TX_FAILED) | + BIT_ULL(NL80211_STA_INFO_SIGNAL) | + BIT_ULL(NL80211_STA_INFO_TX_BITRATE); + sinfo->tx_packets = priv->tx_packets; + sinfo->tx_failed = priv->tx_failed; + /* For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_ */ + sinfo->signal = -50; + sinfo->txrate = (struct rate_info) { + .legacy = 10, /* units are 100kbit/s */ + }; + return 0; +} + +/* Called with the rtnl lock held. */ +static int virt_wifi_dump_station(struct wiphy *wiphy, struct net_device *dev, + int idx, u8 *mac, struct station_info *sinfo) +{ + struct virt_wifi_netdev_priv *priv = netdev_priv(dev); + + wiphy_debug(wiphy, "dump_station\n"); + + if (idx != 0 || !priv->is_connected) + return -ENOENT; + + ether_addr_copy(mac, fake_router_bssid); + return virt_wifi_get_station(wiphy, dev, fake_router_bssid, sinfo); +} + +static const struct cfg80211_ops virt_wifi_cfg80211_ops = { + .scan = virt_wifi_scan, + + .connect = virt_wifi_connect, + .disconnect = virt_wifi_disconnect, + + .get_station = virt_wifi_get_station, + .dump_station = virt_wifi_dump_station, +}; + +/* Acquires and releases the rtnl lock. */ +static struct wiphy *virt_wifi_make_wiphy(void) +{ + struct wiphy *wiphy; + struct virt_wifi_wiphy_priv *priv; + int err; + + wiphy = wiphy_new(&virt_wifi_cfg80211_ops, sizeof(*priv)); + + if (!wiphy) + return NULL; + + wiphy->max_scan_ssids = 4; + wiphy->max_scan_ie_len = 1000; + wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; + + wiphy->bands[NL80211_BAND_2GHZ] = &band_2ghz; + wiphy->bands[NL80211_BAND_5GHZ] = &band_5ghz; + wiphy->bands[NL80211_BAND_60GHZ] = NULL; + + wiphy->regulatory_flags = REGULATORY_WIPHY_SELF_MANAGED; + wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); + + priv = wiphy_priv(wiphy); + priv->being_deleted = false; + priv->scan_request = NULL; + INIT_DELAYED_WORK(&priv->scan_result, virt_wifi_scan_result); + + err = wiphy_register(wiphy); + if (err < 0) { + wiphy_free(wiphy); + return NULL; + } + + return wiphy; +} + +/* Acquires and releases the rtnl lock. */ +static void virt_wifi_destroy_wiphy(struct wiphy *wiphy) +{ + struct virt_wifi_wiphy_priv *priv; + + WARN(!wiphy, "%s called with null wiphy", __func__); + if (!wiphy) + return; + + priv = wiphy_priv(wiphy); + priv->being_deleted = true; + virt_wifi_cancel_scan(wiphy); + + if (wiphy->registered) + wiphy_unregister(wiphy); + wiphy_free(wiphy); +} + +/* Enters and exits a RCU-bh critical section. */ +static netdev_tx_t virt_wifi_start_xmit(struct sk_buff *skb, + struct net_device *dev) +{ + struct virt_wifi_netdev_priv *priv = netdev_priv(dev); + + priv->tx_packets++; + if (!priv->is_connected) { + priv->tx_failed++; + return NET_XMIT_DROP; + } + + skb->dev = priv->lowerdev; + return dev_queue_xmit(skb); +} + +/* Called with rtnl lock held. */ +static int virt_wifi_net_device_open(struct net_device *dev) +{ + struct virt_wifi_netdev_priv *priv = netdev_priv(dev); + + priv->is_up = true; + return 0; +} + +/* Called with rtnl lock held. */ +static int virt_wifi_net_device_stop(struct net_device *dev) +{ + struct virt_wifi_netdev_priv *n_priv = netdev_priv(dev); + struct virt_wifi_wiphy_priv *w_priv; + + n_priv->is_up = false; + + if (!dev->ieee80211_ptr) + return 0; + w_priv = wiphy_priv(dev->ieee80211_ptr->wiphy); + + virt_wifi_cancel_scan(dev->ieee80211_ptr->wiphy); + virt_wifi_cancel_connect(dev); + netif_carrier_off(dev); + + return 0; +} + +static const struct net_device_ops virt_wifi_ops = { + .ndo_start_xmit = virt_wifi_start_xmit, + .ndo_open = virt_wifi_net_device_open, + .ndo_stop = virt_wifi_net_device_stop, +}; + +/* Invoked as part of rtnl lock release. */ +static void virt_wifi_net_device_destructor(struct net_device *dev) +{ + /* Delayed past dellink to allow nl80211 to react to the device being + * deleted. + */ + kfree(dev->ieee80211_ptr); + dev->ieee80211_ptr = NULL; + free_netdev(dev); +} + +/* No lock interaction. */ +static void virt_wifi_setup(struct net_device *dev) +{ + ether_setup(dev); + dev->netdev_ops = &virt_wifi_ops; + dev->priv_destructor = virt_wifi_net_device_destructor; +} + +/* Called in a RCU read critical section from netif_receive_skb */ +static rx_handler_result_t virt_wifi_rx_handler(struct sk_buff **pskb) +{ + struct sk_buff *skb = *pskb; + struct virt_wifi_netdev_priv *priv = + rcu_dereference(skb->dev->rx_handler_data); + + if (!priv->is_connected) + return RX_HANDLER_PASS; + + /* GFP_ATOMIC because this is a packet interrupt handler. */ + skb = skb_share_check(skb, GFP_ATOMIC); + if (!skb) { + dev_err(&priv->upperdev->dev, "can't skb_share_check\n"); + return RX_HANDLER_CONSUMED; + } + + *pskb = skb; + skb->dev = priv->upperdev; + skb->pkt_type = PACKET_HOST; + return RX_HANDLER_ANOTHER; +} + +/* Called with rtnl lock held. */ +static int virt_wifi_newlink(struct net *src_net, struct net_device *dev, + struct nlattr *tb[], struct nlattr *data[], + struct netlink_ext_ack *extack) +{ + struct virt_wifi_netdev_priv *priv = netdev_priv(dev); + int err; + + if (!tb[IFLA_LINK]) + return -EINVAL; + + netif_carrier_off(dev); + + priv->upperdev = dev; + priv->lowerdev = __dev_get_by_index(src_net, + nla_get_u32(tb[IFLA_LINK])); + + if (!priv->lowerdev) + return -ENODEV; + if (!tb[IFLA_MTU]) + dev->mtu = priv->lowerdev->mtu; + else if (dev->mtu > priv->lowerdev->mtu) + return -EINVAL; + + err = netdev_rx_handler_register(priv->lowerdev, virt_wifi_rx_handler, + priv); + if (err) { + dev_err(&priv->lowerdev->dev, + "can't netdev_rx_handler_register: %d\n", err); + return err; + } + + eth_hw_addr_inherit(dev, priv->lowerdev); + netif_stacked_transfer_operstate(priv->lowerdev, dev); + + SET_NETDEV_DEV(dev, &priv->lowerdev->dev); + dev->ieee80211_ptr = kzalloc(sizeof(*dev->ieee80211_ptr), GFP_KERNEL); + + if (!dev->ieee80211_ptr) + goto remove_handler; + + dev->ieee80211_ptr->iftype = NL80211_IFTYPE_STATION; + dev->ieee80211_ptr->wiphy = common_wiphy; + + err = register_netdevice(dev); + if (err) { + dev_err(&priv->lowerdev->dev, "can't register_netdevice: %d\n", + err); + goto free_wireless_dev; + } + + err = netdev_upper_dev_link(priv->lowerdev, dev, extack); + if (err) { + dev_err(&priv->lowerdev->dev, "can't netdev_upper_dev_link: %d\n", + err); + goto unregister_netdev; + } + + priv->being_deleted = false; + priv->is_connected = false; + priv->is_up = false; + INIT_DELAYED_WORK(&priv->connect, virt_wifi_connect_complete); + + return 0; +unregister_netdev: + unregister_netdevice(dev); +free_wireless_dev: + kfree(dev->ieee80211_ptr); + dev->ieee80211_ptr = NULL; +remove_handler: + netdev_rx_handler_unregister(priv->lowerdev); + + return err; +} + +/* Called with rtnl lock held. */ +static void virt_wifi_dellink(struct net_device *dev, + struct list_head *head) +{ + struct virt_wifi_netdev_priv *priv = netdev_priv(dev); + + if (dev->ieee80211_ptr) + virt_wifi_cancel_scan(dev->ieee80211_ptr->wiphy); + + priv->being_deleted = true; + virt_wifi_cancel_connect(dev); + netif_carrier_off(dev); + + netdev_rx_handler_unregister(priv->lowerdev); + netdev_upper_dev_unlink(priv->lowerdev, dev); + + unregister_netdevice_queue(dev, head); + + /* Deleting the wiphy is handled in the module destructor. */ +} + +static struct rtnl_link_ops virt_wifi_link_ops = { + .kind = "virt_wifi", + .setup = virt_wifi_setup, + .newlink = virt_wifi_newlink, + .dellink = virt_wifi_dellink, + .priv_size = sizeof(struct virt_wifi_netdev_priv), +}; + +/* Acquires and releases the rtnl lock. */ +static int __init virt_wifi_init_module(void) +{ + int err; + + /* Guaranteed to be locallly-administered and not multicast. */ + eth_random_addr(fake_router_bssid); + + common_wiphy = virt_wifi_make_wiphy(); + if (!common_wiphy) + return -ENOMEM; + + err = rtnl_link_register(&virt_wifi_link_ops); + if (err) + virt_wifi_destroy_wiphy(common_wiphy); + + return err; +} + +/* Acquires and releases the rtnl lock. */ +static void __exit virt_wifi_cleanup_module(void) +{ + /* Will delete any devices that depend on the wiphy. */ + rtnl_link_unregister(&virt_wifi_link_ops); + virt_wifi_destroy_wiphy(common_wiphy); +} + +module_init(virt_wifi_init_module); +module_exit(virt_wifi_cleanup_module); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Cody Schuffelen <schuffelen@google.com>"); +MODULE_DESCRIPTION("Driver for a wireless wrapper of ethernet devices"); +MODULE_ALIAS_RTNL_LINK("virt_wifi"); diff --git a/drivers/net/wireless/zydas/zd1201.c b/drivers/net/wireless/zydas/zd1201.c index 253403899fe9..22c70f1f568c 100644 --- a/drivers/net/wireless/zydas/zd1201.c +++ b/drivers/net/wireless/zydas/zd1201.c @@ -969,6 +969,7 @@ static int zd1201_set_mode(struct net_device *dev, */ zd1201_join(zd, "\0-*#\0", 5); /* Put port in pIBSS */ + /* Fall through */ case 8: /* No pseudo-IBSS in wireless extensions (yet) */ porttype = ZD1201_PORTTYPE_PSEUDOIBSS; break; diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index fe1d52247bbe..2625740bdc4a 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -186,7 +186,7 @@ static const struct file_operations xenvif_dbg_io_ring_ops_fops = { .write = xenvif_write_io_ring, }; -static int xenvif_read_ctrl(struct seq_file *m, void *v) +static int xenvif_ctrl_show(struct seq_file *m, void *v) { struct xenvif *vif = m->private; @@ -194,19 +194,7 @@ static int xenvif_read_ctrl(struct seq_file *m, void *v) return 0; } - -static int xenvif_ctrl_open(struct inode *inode, struct file *filp) -{ - return single_open(filp, xenvif_read_ctrl, inode->i_private); -} - -static const struct file_operations xenvif_dbg_ctrl_ops_fops = { - .owner = THIS_MODULE, - .open = xenvif_ctrl_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(xenvif_ctrl); static void xenvif_debugfs_addif(struct xenvif *vif) { @@ -238,7 +226,7 @@ static void xenvif_debugfs_addif(struct xenvif *vif) 0400, vif->xenvif_dbg_root, vif, - &xenvif_dbg_ctrl_ops_fops); + &xenvif_ctrl_fops); if (IS_ERR_OR_NULL(pfile)) pr_warn("Creation of ctrl file returned %ld!\n", PTR_ERR(pfile)); diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 5b97cc946d70..c914c24f880b 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -337,8 +337,6 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue) return; } - wmb(); /* barrier so backend seens requests */ - RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&queue->rx, notify); if (notify) notify_remote_via_irq(queue->rx_irq); |